Merge branch 'master' into custom-command-extra-volumes

This commit is contained in:
Klaus Post 2024-07-23 03:11:45 -07:00 committed by GitHub
commit 6f39f0dc9f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
132 changed files with 4313 additions and 2149 deletions

View file

@ -21,8 +21,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: 1.22.4
check-latest: true
go-version: 1.22.5
- name: Get official govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
shell: bash

View file

@ -2,6 +2,9 @@
extend-exclude = [
".git/",
"docs/",
"CREDITS",
"go.mod",
"go.sum",
]
ignore-hidden = false
@ -16,6 +19,7 @@ extend-ignore-re = [
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
"ERRO:",
]
[default.extend-words]

841
CREDITS
View file

@ -6334,6 +6334,203 @@ SOFTWARE.
================================================================
github.com/go-ini/ini
https://github.com/go-ini/ini
----------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright 2014 Unknwon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================================
github.com/go-jose/go-jose/v4
https://github.com/go-jose/go-jose/v4
----------------------------------------------------------------
@ -11223,33 +11420,29 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
github.com/gorilla/websocket
https://github.com/gorilla/websocket
----------------------------------------------------------------
Copyright (c) 2023 The Gorilla Authors. All rights reserved.
Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================
github.com/hashicorp/errwrap
@ -13082,376 +13275,6 @@ Mozilla Public License, version 2.0
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version
of the License under which You originally received the Covered Software,
or under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a
modified version of this License if you rename the license and remove
any references to the name of the license steward (except to note that
such modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary
Licenses If You choose to distribute Source Code Form that is
Incompatible With Secondary Licenses under the terms of this version of
the License, the notice described in Exhibit B of this License must be
attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file,
then You may include the notice in a location (such as a LICENSE file in a
relevant directory) where a recipient would be likely to look for such a
notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - "Incompatible With Secondary Licenses" Notice
This Source Code Form is "Incompatible
With Secondary Licenses", as defined by
the Mozilla Public License, v. 2.0.
================================================================
github.com/hashicorp/golang-lru/v2
https://github.com/hashicorp/golang-lru/v2
----------------------------------------------------------------
Copyright (c) 2014 HashiCorp, Inc.
Mozilla Public License, version 2.0
1. Definitions
1.1. "Contributor"
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. "Contributor Version"
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributor's Contribution.
1.3. "Contribution"
means Covered Software of a particular Contributor.
1.4. "Covered Software"
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. "Incompatible With Secondary Licenses"
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of
version 1.1 or earlier of the License, but not also under the terms of
a Secondary License.
1.6. "Executable Form"
means any form of the work other than Source Code Form.
1.7. "Larger Work"
means a work that combines Covered Software with other material, in a
separate file or files, that is not Covered Software.
1.8. "License"
means this document.
1.9. "Licensable"
means having the right to grant, to the maximum extent possible, whether
at the time of the initial grant or subsequently, any and all of the
rights conveyed by this License.
1.10. "Modifications"
means any of the following:
a. any file in Source Code Form that results from an addition to,
deletion from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. "Patent Claims" of a Contributor
means any patent claim(s), including without limitation, method,
process, and apparatus claims, in any patent Licensable by such
Contributor that would be infringed, but for the grant of the License,
by the making, using, selling, offering for sale, having made, import,
or transfer of either its Contributions or its Contributor Version.
1.12. "Secondary License"
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. "Source Code Form"
means the form of the work preferred for making modifications.
1.14. "You" (or "Your")
means an individual or a legal entity exercising rights under this
License. For legal entities, "You" includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, "control" means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or
as part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its
Contributions or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution
become effective for each Contribution on the date the Contributor first
distributes such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under
this License. No additional rights or licenses will be implied from the
distribution or licensing of Covered Software under this License.
Notwithstanding Section 2.1(b) above, no patent license is granted by a
Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third party's
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of
its Contributions.
This License does not grant any rights in the trademarks, service marks,
or logos of any Contributor (except as may be necessary to comply with
the notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this
License (see Section 10.2) or under the terms of a Secondary License (if
permitted under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its
Contributions are its original creation(s) or it has sufficient rights to
grant the rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under
applicable copyright doctrines of fair use, fair dealing, or other
equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under
the terms of this License. You must inform recipients that the Source
Code Form of the Covered Software is governed by the terms of this
License, and how they can obtain a copy of this License. You may not
attempt to alter or restrict the recipients' rights in the Source Code
Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this
License, or sublicense it under different terms, provided that the
license for the Executable Form does not attempt to limit or alter the
recipients' rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for
the Covered Software. If the Larger Work is a combination of Covered
Software with a work governed by one or more Secondary Licenses, and the
Covered Software is not Incompatible With Secondary Licenses, this
License permits You to additionally distribute such Covered Software
under the terms of such Secondary License(s), so that the recipient of
the Larger Work may, at their option, further distribute the Covered
Software under the terms of either this License or such Secondary
License(s).
3.4. Notices
You may not remove or alter the substance of any license notices
(including copyright notices, patent notices, disclaimers of warranty, or
limitations of liability) contained within the Source Code Form of the
Covered Software, except that You may alter any license notices to the
extent required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on
behalf of any Contributor. You must make it absolutely clear that any
such warranty, support, indemnity, or liability obligation is offered by
You alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute,
judicial order, or regulation then You must: (a) comply with the terms of
this License to the maximum extent possible; and (b) describe the
limitations and the code they affect. Such description must be placed in a
text file included with all distributions of the Covered Software under
this License. Except to the extent prohibited by statute or regulation,
such description must be sufficiently detailed for a recipient of ordinary
skill to be able to understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing
basis, if such Contributor fails to notify You of the non-compliance by
some reasonable means prior to 60 days after You have come back into
compliance. Moreover, Your grants from a particular Contributor are
reinstated on an ongoing basis if such Contributor notifies You of the
non-compliance by some reasonable means, this is the first time You have
received notice of non-compliance with this License from such
Contributor, and You become compliant prior to 30 days after Your receipt
of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions,
counter-claims, and cross-claims) alleging that a Contributor Version
directly or indirectly infringes any patent, then the rights granted to
You by any and all Contributors for the Covered Software under Section
2.1 of this License shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an "as is" basis,
without warranty of any kind, either expressed, implied, or statutory,
including, without limitation, warranties that the Covered Software is free
of defects, merchantable, fit for a particular purpose or non-infringing.
The entire risk as to the quality and performance of the Covered Software
is with You. Should any Covered Software prove defective in any respect,
You (not any Contributor) assume the cost of any necessary servicing,
repair, or correction. This disclaimer of warranty constitutes an essential
part of this License. No use of any Covered Software is authorized under
this License except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from
such party's negligence to the extent applicable law prohibits such
limitation. Some jurisdictions do not allow the exclusion or limitation of
incidental or consequential damages, so this exclusion and limitation may
not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts
of a jurisdiction where the defendant maintains its principal place of
business and such litigation shall be governed by laws of that
jurisdiction, without reference to its conflict-of-law provisions. Nothing
in this Section shall prevent a party's ability to bring cross-claims or
counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject
matter hereof. If any provision of this License is held to be
unenforceable, such provision shall be reformed only to the extent
necessary to make it enforceable. Any law or regulation which provides that
the language of a contract shall be construed against the drafter shall not
be used to construe this License against a Contributor.
10. Versions of the License
10.1. New Versions
@ -24119,6 +23942,43 @@ SOFTWARE.
================================================================
github.com/munnerz/goautoneg
https://github.com/munnerz/goautoneg
----------------------------------------------------------------
Copyright (c) 2011, Open Knowledge Foundation Ltd.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
Neither the name of the Open Knowledge Foundation Ltd. nor the
names of its contributors may be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================
github.com/nats-io/jwt/v2
https://github.com/nats-io/jwt/v2
----------------------------------------------------------------
@ -33171,203 +33031,6 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
================================================================
gopkg.in/ini.v1
https://gopkg.in/ini.v1
----------------------------------------------------------------
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright 2014 Unknwon
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
================================================================
gopkg.in/yaml.v2
https://gopkg.in/yaml.v2
----------------------------------------------------------------

View file

@ -34,11 +34,14 @@ verifiers: lint check-gen
check-gen: ## check for updated autogenerated files
@go generate ./... >/dev/null
@go mod tidy -compat=1.21
@(! git diff --name-only | grep '_gen.go$$') || (echo "Non-committed changes in auto-generated code is detected, please commit them to proceed." && false)
@(! git diff --name-only | grep 'go.sum') || (echo "Non-committed changes in auto-generated go.sum is detected, please commit them to proceed." && false)
lint: getdeps ## runs golangci-lint suite of linters
@echo "Running $@ check"
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml
@command typos && typos ./ || echo "typos binary is not found.. skipping.."
lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
@echo "Running $@ check"
@ -86,9 +89,9 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
test-iam: install-race ## verify IAM (external IDP, etcd backends)
@echo "Running tests for IAM (external IDP, etcd backends)"
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue,dev -v -run TestIAM* ./cmd
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -timeout 15m -tags kqueue,dev -v -run TestIAM* ./cmd
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue,dev -v -run TestIAM* ./cmd
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -timeout 15m -race -tags kqueue,dev -v -run TestIAM* ./cmd
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
@echo "Running upgrade tests for IAM (LDAP backend)"

View file

@ -101,7 +101,7 @@ function fail() {
}
function check_online() {
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
fi
}

View file

@ -78,7 +78,7 @@ function start_minio_3_node() {
}
function check_heal() {
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
if ! grep -q 'API:' ${WORK_DIR}/dist-minio-*.log; then
return 1
fi

View file

@ -216,6 +216,12 @@ func toAdminAPIErr(ctx context.Context, err error) APIError {
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
case errors.Is(err, errTierInvalidConfig):
apiErr = APIError{
Code: "XMinioAdminTierInvalidConfig",
Description: err.Error(),
HTTPStatusCode: http.StatusBadRequest,
}
default:
apiErr = errorCodes.ToAPIErrWithErr(toAdminAPIErrCode(ctx, err), err)
}

View file

@ -479,3 +479,180 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
writeSuccessResponseJSON(w, encryptedData)
}
// ListAccessKeysLDAPBulk - GET /minio/admin/v3/idp/ldap/list-access-keys-bulk
func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
// Get current object layer instance.
objectAPI := newObjectLayerFn()
if objectAPI == nil || globalNotificationSys == nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
return
}
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
dnList := r.Form["userDNs"]
isAll := r.Form.Get("all") == "true"
onlySelf := !isAll && len(dnList) == 0
if isAll && len(dnList) > 0 {
// This should be checked on client side, so return generic error
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
// Empty DN list and not self, list access keys for all users
if isAll {
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListUsersAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
} else if len(dnList) == 1 {
var dn string
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(dnList[0])
if err == nil {
dn = foundResult.NormDN
}
if dn == cred.ParentUser || dnList[0] == cred.ParentUser {
onlySelf = true
}
}
if !globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.ListServiceAccountsAdminAction,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
DenyOnly: onlySelf,
}) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if onlySelf && len(dnList) == 0 {
selfDN := cred.AccessKey
if cred.ParentUser != "" {
selfDN = cred.ParentUser
}
dnList = append(dnList, selfDN)
}
var ldapUserList []string
if isAll {
ldapUsers, err := globalIAMSys.ListLDAPUsers(ctx)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for user := range ldapUsers {
ldapUserList = append(ldapUserList, user)
}
} else {
for _, userDN := range dnList {
// Validate the userDN
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if foundResult == nil {
continue
}
ldapUserList = append(ldapUserList, foundResult.NormDN)
}
}
listType := r.Form.Get("listType")
var listSTSKeys, listServiceAccounts bool
switch listType {
case madmin.AccessKeyListUsersOnly:
listSTSKeys = false
listServiceAccounts = false
case madmin.AccessKeyListSTSOnly:
listSTSKeys = true
listServiceAccounts = false
case madmin.AccessKeyListSvcaccOnly:
listSTSKeys = false
listServiceAccounts = true
case madmin.AccessKeyListAll:
listSTSKeys = true
listServiceAccounts = true
default:
err := errors.New("invalid list type")
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
return
}
accessKeyMap := make(map[string]madmin.ListAccessKeysLDAPResp)
for _, internalDN := range ldapUserList {
externalDN := globalIAMSys.LDAPConfig.DecodeDN(internalDN)
accessKeys := madmin.ListAccessKeysLDAPResp{}
if listSTSKeys {
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, internalDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, sts := range stsKeys {
expiryTime := sts.Expiration
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
AccessKey: sts.AccessKey,
Expiration: &expiryTime,
})
}
// if only STS keys, skip if user has no STS keys
if !listServiceAccounts && len(stsKeys) == 0 {
continue
}
}
if listServiceAccounts {
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, internalDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
for _, svc := range serviceAccounts {
expiryTime := svc.Expiration
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
AccessKey: svc.AccessKey,
Expiration: &expiryTime,
})
}
// if only service accounts, skip if user has no service accounts
if !listSTSKeys && len(serviceAccounts) == 0 {
continue
}
}
accessKeyMap[externalDN] = accessKeys
}
data, err := json.Marshal(accessKeyMap)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
writeSuccessResponseJSON(w, encryptedData)
}

View file

@ -374,6 +374,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
globalNotificationSys.StopRebalance(r.Context())
writeSuccessResponseHeadersOnly(w)
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
globalNotificationSys.LoadRebalanceMeta(ctx, false)
}
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {

View file

@ -120,9 +120,12 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
}
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
c.Fatalf("Unable to attach policy: %v", err)
}
accessKeys[i] = accessKey

View file

@ -239,9 +239,12 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
c.Assert(v.Status, madmin.AccountEnabled)
// 3. Associate policy and check that user can access
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{"readwrite"},
User: accessKey,
})
if err != nil {
c.Fatalf("unable to set policy: %v", err)
c.Fatalf("unable to attach policy: %v", err)
}
client := s.getUserClient(c, accessKey, secretKey, "")
@ -348,9 +351,12 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
if err != nil {
c.Fatalf("policy add error: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("unable to attach policy: %v", err)
}
// 2.3 check user has access to bucket
c.mustListObjects(ctx, uClient, bucket)
@ -470,9 +476,12 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
c.mustNotListObjects(ctx, uClient, "testbucket")
// 3.2 associate policy to user
err = s.adm.SetPolicy(ctx, policy1, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy1},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("unable to attach policy: %v", err)
}
admClnt := s.getAdminClient(c, accessKey, secretKey, "")
@ -490,10 +499,22 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
c.Fatalf("policy was missing!")
}
// 3.2 associate policy to user
err = s.adm.SetPolicy(ctx, policy2, accessKey, false)
// Detach policy1 to set up for policy2
_, err = s.adm.DetachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy1},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("unable to detach policy: %v", err)
}
// 3.2 associate policy to user
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy2},
User: accessKey,
})
if err != nil {
c.Fatalf("unable to attach policy: %v", err)
}
// 3.3 check user can create service account implicitly.
@ -571,9 +592,12 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
c.mustNotListObjects(ctx, uClient, bucket)
// 3.2 associate policy to user
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("unable to attach policy: %v", err)
}
// 3.3 check user has access to bucket
c.mustListObjects(ctx, uClient, bucket)
@ -726,9 +750,12 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
c.mustNotListObjects(ctx, uClient, bucket)
// 3. Associate policy to group and check user got access.
err = s.adm.SetPolicy(ctx, policy, group, true)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
Group: group,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("unable to attach policy: %v", err)
}
// 3.1 check user has access to bucket
c.mustListObjects(ctx, uClient, bucket)
@ -871,9 +898,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("unable to attach policy: %v", err)
}
// Create an madmin client with user creds
@ -952,9 +982,12 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("unable to attach policy: %v", err)
}
// Create an madmin client with user creds
@ -1031,9 +1064,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("unable to attach policy: %v", err)
}
// 1. Create a service account for the user

View file

@ -2186,7 +2186,7 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
writeSuccessResponseHeadersOnly(w)
}
// KMSKeyStatusHandler - GET /minio/admin/v3/kms/status
// KMSStatusHandler - GET /minio/admin/v3/kms/status
func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()

View file

@ -301,8 +301,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
// LDAP specific service accounts ops
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp/ldap/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccountLDAP))
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys").
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).
Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys-bulk").
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAPBulk)).Queries("listType", "{listType:.*}")
// LDAP IAM operations
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
@ -340,6 +341,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/list-jobs").HandlerFunc(
adminMiddleware(adminAPI.ListBatchJobs))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/status-job").HandlerFunc(
adminMiddleware(adminAPI.BatchJobStatus))
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/describe-job").HandlerFunc(
adminMiddleware(adminAPI.DescribeBatchJob))
adminRouter.Methods(http.MethodDelete).Path(adminVersion + "/cancel-job").HandlerFunc(

View file

@ -946,10 +946,20 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
// writeErrorResponse writes error headers
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
if err.HTTPStatusCode == http.StatusServiceUnavailable {
// Set retry-after header to indicate user-agents to retry request after 120secs.
switch err.HTTPStatusCode {
case http.StatusServiceUnavailable:
// Set retry-after header to indicate user-agents to retry request after 60 seconds.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "120")
w.Header().Set(xhttp.RetryAfter, "60")
case http.StatusTooManyRequests:
_, deadline := globalAPIConfig.getRequestsPool()
if deadline <= 0 {
// Set retry-after header to indicate user-agents to retry request after 10 seconds.
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
w.Header().Set(xhttp.RetryAfter, "10")
} else {
w.Header().Set(xhttp.RetryAfter, strconv.Itoa(int(deadline.Seconds())))
}
}
switch err.Code {

View file

@ -88,6 +88,8 @@ type healingTracker struct {
ItemsSkipped uint64
BytesSkipped uint64
RetryAttempts uint64
// Add future tracking capabilities
// Be sure that they are included in toHealingDisk
}
@ -363,7 +365,7 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
localDrives := cloneDrives(globalLocalDrives)
globalLocalDrivesMu.RUnlock()
for _, disk := range localDrives {
_, err := disk.GetDiskID()
_, err := disk.DiskInfo(context.Background(), DiskInfoOptions{})
if errors.Is(err, errUnformattedDisk) {
disksToHeal = append(disksToHeal, disk.Endpoint())
continue
@ -382,6 +384,8 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
var errRetryHealing = errors.New("some items failed to heal, we will retry healing this drive again")
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
poolIdx, setIdx := endpoint.PoolIdx, endpoint.SetIdx
disk := getStorageViaEndpoint(endpoint)
@ -389,6 +393,17 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
return fmt.Errorf("Unexpected error disk must be initialized by now after formatting: %s", endpoint)
}
_, err := disk.DiskInfo(ctx, DiskInfoOptions{})
if err != nil {
if errors.Is(err, errDriveIsRoot) {
// This is a root drive, ignore and move on
return nil
}
if !errors.Is(err, errUnformattedDisk) {
return err
}
}
// Prevent parallel erasure set healing
locker := z.NewNSLock(minioMetaBucket, fmt.Sprintf("new-drive-healing/%d/%d", poolIdx, setIdx))
lkctx, err := locker.GetLock(ctx, newDiskHealingTimeout)
@ -451,8 +466,27 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
return err
}
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
// if objects have failed healing, we attempt a retry to heal the drive upto 3 times before giving up.
if tracker.ItemsFailed > 0 && tracker.RetryAttempts < 4 {
tracker.RetryAttempts++
bugLogIf(ctx, tracker.update(ctx))
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retrying %s time (healed: %d, skipped: %d, failed: %d).", disk,
humanize.Ordinal(int(tracker.RetryAttempts)), tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
return errRetryHealing
}
if tracker.ItemsFailed > 0 {
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retried %d times (healed: %d, skipped: %d, failed: %d).", disk,
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
} else {
if tracker.RetryAttempts > 0 {
healingLogEvent(ctx, "Healing of drive '%s' is complete, retried %d times (healed: %d, skipped: %d).", disk,
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped)
} else {
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped)
}
}
if serverDebugLog {
tracker.printTo(os.Stdout)
fmt.Printf("\n")
@ -524,7 +558,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
if err := healFreshDisk(ctx, z, disk); err != nil {
globalBackgroundHealState.setDiskHealingStatus(disk, false)
timedout := OperationTimedOut{}
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) {
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) && !errors.Is(err, errRetryHealing) {
printEndpointError(disk, err, false)
}
return

View file

@ -200,6 +200,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "BytesSkipped")
return
}
case "RetryAttempts":
z.RetryAttempts, err = dc.ReadUint64()
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
default:
err = dc.Skip()
if err != nil {
@ -213,9 +219,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 25
// map header, size 26
// write "ID"
err = en.Append(0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
err = en.Append(0xde, 0x0, 0x1a, 0xa2, 0x49, 0x44)
if err != nil {
return
}
@ -478,15 +484,25 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "BytesSkipped")
return
}
// write "RetryAttempts"
err = en.Append(0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
if err != nil {
return
}
err = en.WriteUint64(z.RetryAttempts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 25
// map header, size 26
// string "ID"
o = append(o, 0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
o = append(o, 0xde, 0x0, 0x1a, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "PoolIndex"
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
@ -566,6 +582,9 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
// string "BytesSkipped"
o = append(o, 0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
o = msgp.AppendUint64(o, z.BytesSkipped)
// string "RetryAttempts"
o = append(o, 0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
o = msgp.AppendUint64(o, z.RetryAttempts)
return
}
@ -763,6 +782,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "BytesSkipped")
return
}
case "RetryAttempts":
z.RetryAttempts, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "RetryAttempts")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
@ -785,6 +810,6 @@ func (z *healingTracker) Msgsize() (s int) {
for za0002 := range z.HealedBuckets {
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
}
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size
return
}

View file

@ -36,6 +36,7 @@ import (
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/wildcard"
"github.com/minio/pkg/v3/workers"
"github.com/minio/pkg/v3/xtime"
"gopkg.in/yaml.v3"
)
@ -116,7 +117,7 @@ func (p BatchJobExpirePurge) Validate() error {
// BatchJobExpireFilter holds all the filters currently supported for batch replication
type BatchJobExpireFilter struct {
line, col int
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedBefore *time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`
Metadata []BatchJobKV `yaml:"metadata,omitempty" json:"metadata"`
@ -162,7 +163,7 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
if len(ef.Name) > 0 && !wildcard.Match(ef.Name, obj.Name) {
return false
}
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan {
if ef.OlderThan > 0 && now.Sub(obj.ModTime) <= ef.OlderThan.D() {
return false
}
@ -514,7 +515,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
if err := ri.loadOrInit(ctx, api, job); err != nil {
return err
}
@ -552,22 +553,25 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
go func() {
saveTicker := time.NewTicker(10 * time.Second)
defer saveTicker.Stop()
for {
quit := false
after := time.Minute
for !quit {
select {
case <-saveTicker.C:
// persist in-memory state to disk after every 10secs.
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
case <-ctx.Done():
// persist in-memory state immediately before exiting due to context cancellation.
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
return
quit = true
case <-saverQuitCh:
// persist in-memory state immediately to disk.
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
return
quit = true
}
if quit {
// save immediately if we are quitting
after = 0
}
ctx, cancel := context.WithTimeout(GlobalContext, 30*time.Second) // independent context
batchLogIf(ctx, ri.updateAfter(ctx, api, after, job))
cancel()
}
}()
@ -584,7 +588,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
versionsCount int
toDel []expireObjInfo
)
failed := true
failed := false
for result := range results {
if result.Err != nil {
failed = true

View file

@ -306,7 +306,7 @@ func (z *BatchJobExpireFilter) DecodeMsg(dc *msgp.Reader) (err error) {
}
switch msgp.UnsafeString(field) {
case "OlderThan":
z.OlderThan, err = dc.ReadDuration()
err = z.OlderThan.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
@ -433,7 +433,7 @@ func (z *BatchJobExpireFilter) EncodeMsg(en *msgp.Writer) (err error) {
if err != nil {
return
}
err = en.WriteDuration(z.OlderThan)
err = z.OlderThan.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
@ -544,7 +544,11 @@ func (z *BatchJobExpireFilter) MarshalMsg(b []byte) (o []byte, err error) {
// map header, size 8
// string "OlderThan"
o = append(o, 0x88, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
o = msgp.AppendDuration(o, z.OlderThan)
o, err = z.OlderThan.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
// string "CreatedBefore"
o = append(o, 0xad, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x42, 0x65, 0x66, 0x6f, 0x72, 0x65)
if z.CreatedBefore == nil {
@ -613,7 +617,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch msgp.UnsafeString(field) {
case "OlderThan":
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
bts, err = z.OlderThan.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
@ -734,7 +738,7 @@ func (z *BatchJobExpireFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchJobExpireFilter) Msgsize() (s int) {
s = 1 + 10 + msgp.DurationSize + 14
s = 1 + 10 + z.OlderThan.Msgsize() + 14
if z.CreatedBefore == nil {
s += msgp.NilSize
} else {

View file

@ -20,7 +20,7 @@ package cmd
import (
"testing"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
)
func TestParseBatchJobExpire(t *testing.T) {
@ -32,7 +32,7 @@ expire: # Expire objects that match a condition
rules:
- type: object # regular objects with zero or more older versions
name: NAME # match object names that satisfy the wildcard expression.
olderThan: 70h # match objects older than this value
olderThan: 7d10h # match objects older than this value
createdBefore: "2006-01-02T15:04:05.00Z" # match objects created before "date"
tags:
- key: name
@ -64,7 +64,7 @@ expire: # Expire objects that match a condition
delay: 500ms # least amount of delay between each retry
`
var job BatchJobRequest
err := yaml.UnmarshalStrict([]byte(expireYaml), &job)
err := yaml.Unmarshal([]byte(expireYaml), &job)
if err != nil {
t.Fatal("Failed to parse batch-job-expire yaml", err)
}

View file

@ -28,6 +28,7 @@ import (
"math/rand"
"net/http"
"net/url"
"path/filepath"
"runtime"
"strconv"
"strings"
@ -57,6 +58,11 @@ import (
var globalBatchConfig batch.Config
const (
// Keep the completed/failed job stats 3 days before removing it
oldJobsExpiration = 3 * 24 * time.Hour
)
// BatchJobRequest this is an internal data structure not for external consumption.
type BatchJobRequest struct {
ID string `yaml:"-" json:"name"`
@ -262,7 +268,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
if err := ri.loadOrInit(ctx, api, job); err != nil {
return err
}
if ri.Complete {
@ -281,12 +287,12 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
isStorageClassOnly := len(r.Flags.Filter.Metadata) == 1 && strings.EqualFold(r.Flags.Filter.Metadata[0].Key, xhttp.AmzStorageClass)
skip := func(oi ObjectInfo) (ok bool) {
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan {
if r.Flags.Filter.OlderThan > 0 && time.Since(oi.ModTime) < r.Flags.Filter.OlderThan.D() {
// skip all objects that are newer than specified older duration
return true
}
if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan {
if r.Flags.Filter.NewerThan > 0 && time.Since(oi.ModTime) >= r.Flags.Filter.NewerThan.D() {
// skip all objects that are older than specified newer duration
return true
}
@ -722,54 +728,43 @@ const (
batchReplJobDefaultRetryDelay = 250 * time.Millisecond
)
func getJobReportPath(job BatchJobRequest) string {
var fileName string
switch {
case job.Replicate != nil:
fileName = batchReplName
case job.KeyRotate != nil:
fileName = batchKeyRotationName
case job.Expire != nil:
fileName = batchExpireName
}
return pathJoin(batchJobReportsPrefix, job.ID, fileName)
}
func getJobPath(job BatchJobRequest) string {
return pathJoin(batchJobPrefix, job.ID)
}
func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
var format, version uint16
switch {
case job.Replicate != nil:
version = batchReplVersionV1
format = batchReplFormat
case job.KeyRotate != nil:
version = batchKeyRotateVersionV1
format = batchKeyRotationFormat
case job.Expire != nil:
version = batchExpireVersionV1
format = batchExpireFormat
func (ri *batchJobInfo) getJobReportPath() (string, error) {
var fileName string
switch madmin.BatchJobType(ri.JobType) {
case madmin.BatchJobReplicate:
fileName = batchReplName
case madmin.BatchJobKeyRotate:
fileName = batchKeyRotationName
case madmin.BatchJobExpire:
fileName = batchExpireName
default:
return errors.New("no supported batch job request specified")
return "", fmt.Errorf("unknown job type: %v", ri.JobType)
}
data, err := readConfig(ctx, api, getJobReportPath(job))
if err != nil {
if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) {
ri.Version = int(version)
return pathJoin(batchJobReportsPrefix, ri.JobID, fileName), nil
}
func (ri *batchJobInfo) loadOrInit(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
err := ri.load(ctx, api, job)
if errors.Is(err, errNoSuchJob) {
switch {
case job.Replicate != nil:
ri.Version = batchReplVersionV1
ri.RetryAttempts = batchReplJobDefaultRetries
if job.Replicate.Flags.Retry.Attempts > 0 {
ri.RetryAttempts = job.Replicate.Flags.Retry.Attempts
}
case job.KeyRotate != nil:
ri.Version = batchKeyRotateVersionV1
ri.RetryAttempts = batchKeyRotateJobDefaultRetries
if job.KeyRotate.Flags.Retry.Attempts > 0 {
ri.RetryAttempts = job.KeyRotate.Flags.Retry.Attempts
}
case job.Expire != nil:
ri.Version = batchExpireVersionV1
ri.RetryAttempts = batchExpireJobDefaultRetries
if job.Expire.Retry.Attempts > 0 {
ri.RetryAttempts = job.Expire.Retry.Attempts
@ -779,6 +774,39 @@ func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobR
}
return err
}
func (ri *batchJobInfo) load(ctx context.Context, api ObjectLayer, job BatchJobRequest) error {
path, err := job.getJobReportPath()
if err != nil {
batchLogIf(ctx, err)
return err
}
return ri.loadByPath(ctx, api, path)
}
func (ri *batchJobInfo) loadByPath(ctx context.Context, api ObjectLayer, path string) error {
var format, version uint16
switch filepath.Base(path) {
case batchReplName:
version = batchReplVersionV1
format = batchReplFormat
case batchKeyRotationName:
version = batchKeyRotateVersionV1
format = batchKeyRotationFormat
case batchExpireName:
version = batchExpireVersionV1
format = batchExpireFormat
default:
return errors.New("no supported batch job request specified")
}
data, err := readConfig(ctx, api, path)
if err != nil {
if errors.Is(err, errConfigNotFound) || isErrObjectNotFound(err) {
return errNoSuchJob
}
return err
}
if len(data) == 0 {
// Seems to be empty create a new batchRepl object.
return nil
@ -919,7 +947,12 @@ func (ri *batchJobInfo) updateAfter(ctx context.Context, api ObjectLayer, durati
if err != nil {
return err
}
return saveConfig(ctx, api, getJobReportPath(job), buf)
path, err := ri.getJobReportPath()
if err != nil {
batchLogIf(ctx, err)
return err
}
return saveConfig(ctx, api, path, buf)
}
ri.mu.Unlock()
return nil
@ -944,8 +977,10 @@ func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo,
ri.mu.Lock()
defer ri.mu.Unlock()
if success {
ri.Bucket = bucket
ri.Object = info.Name
}
ri.countItem(info.Size, info.DeleteMarker, success, attempt)
}
@ -971,7 +1006,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
if err := ri.loadOrInit(ctx, api, job); err != nil {
return err
}
if ri.Complete {
@ -987,12 +1022,12 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
rnd := rand.New(rand.NewSource(time.Now().UnixNano()))
selectObj := func(info FileInfo) (ok bool) {
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan {
if r.Flags.Filter.OlderThan > 0 && time.Since(info.ModTime) < r.Flags.Filter.OlderThan.D() {
// skip all objects that are newer than specified older duration
return false
}
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan {
if r.Flags.Filter.NewerThan > 0 && time.Since(info.ModTime) >= r.Flags.Filter.NewerThan.D() {
// skip all objects that are older than specified newer duration
return false
}
@ -1071,6 +1106,10 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
retryAttempts := ri.RetryAttempts
retry := false
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
var (
walkCh = make(chan itemOrErr[ObjectInfo], 100)
slowCh = make(chan itemOrErr[ObjectInfo], 100)
@ -1145,12 +1184,6 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
if walkQuorum == "" {
walkQuorum = "strict"
}
retryAttempts := ri.RetryAttempts
retry := false
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
ctx, cancel := context.WithCancel(ctx)
// one of source/target is s3, skip delete marker and all versions under the same object name.
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
@ -1436,10 +1469,24 @@ func (j BatchJobRequest) Validate(ctx context.Context, o ObjectLayer) error {
}
func (j BatchJobRequest) delete(ctx context.Context, api ObjectLayer) {
deleteConfig(ctx, api, getJobReportPath(j))
deleteConfig(ctx, api, getJobPath(j))
}
func (j BatchJobRequest) getJobReportPath() (string, error) {
var fileName string
switch {
case j.Replicate != nil:
fileName = batchReplName
case j.KeyRotate != nil:
fileName = batchKeyRotationName
case j.Expire != nil:
fileName = batchExpireName
default:
return "", errors.New("unknown job type")
}
return pathJoin(batchJobReportsPrefix, j.ID, fileName), nil
}
func (j *BatchJobRequest) save(ctx context.Context, api ObjectLayer) error {
if j.Replicate == nil && j.KeyRotate == nil && j.Expire == nil {
return errInvalidArgument
@ -1520,6 +1567,9 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
writeErrorResponseJSON(ctx, w, toAPIError(ctx, result.Err), r.URL)
return
}
if strings.HasPrefix(result.Item.Name, batchJobReportsPrefix+slashSeparator) {
continue
}
req := &BatchJobRequest{}
if err := req.load(ctx, objectAPI, result.Item.Name); err != nil {
if !errors.Is(err, errNoSuchJob) {
@ -1542,6 +1592,55 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
batchLogIf(ctx, json.NewEncoder(w).Encode(&listResult))
}
// BatchJobStatus - returns the status of a batch job saved in the disk
func (a adminAPIHandlers) BatchJobStatus(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ListBatchJobsAction)
if objectAPI == nil {
return
}
jobID := r.Form.Get("jobId")
if jobID == "" {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, errInvalidArgument), r.URL)
return
}
req := BatchJobRequest{ID: jobID}
if i := strings.Index(jobID, "-"); i > 0 {
switch madmin.BatchJobType(jobID[:i]) {
case madmin.BatchJobReplicate:
req.Replicate = &BatchJobReplicateV1{}
case madmin.BatchJobKeyRotate:
req.KeyRotate = &BatchJobKeyRotateV1{}
case madmin.BatchJobExpire:
req.Expire = &BatchJobExpire{}
default:
writeErrorResponseJSON(ctx, w, toAPIError(ctx, errors.New("job ID format unrecognized")), r.URL)
return
}
}
ri := &batchJobInfo{}
if err := ri.load(ctx, objectAPI, req); err != nil {
if !errors.Is(err, errNoSuchJob) {
batchLogIf(ctx, err)
}
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
buf, err := json.Marshal(madmin.BatchJobStatus{LastMetric: ri.metric()})
if err != nil {
batchLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
w.Write(buf)
}
var errNoSuchJob = errors.New("no such job")
// DescribeBatchJob returns the currently active batch job definition
@ -1633,7 +1732,7 @@ func (a adminAPIHandlers) StartBatchJob(w http.ResponseWriter, r *http.Request)
return
}
job.ID = fmt.Sprintf("%s%s%d", shortuuid.New(), getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
job.ID = fmt.Sprintf("%s-%s%s%d", job.Type(), shortuuid.New(), getKeySeparator(), GetProxyEndpointLocalIndex(globalProxyEndpoints))
job.User = user
job.Started = time.Now()
@ -1721,11 +1820,60 @@ func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobP
jobCancelers: make(map[string]context.CancelFunc),
}
jpool.ResizeWorkers(workers)
jpool.resume()
randomWait := func() time.Duration {
// randomWait depends on the number of nodes to avoid triggering resume and cleanups at the same time.
return time.Duration(rand.Float64() * float64(time.Duration(globalEndpoints.NEndpoints())*time.Hour))
}
go func() {
jpool.resume(randomWait)
jpool.cleanupReports(randomWait)
}()
return jpool
}
func (j *BatchJobPool) resume() {
func (j *BatchJobPool) cleanupReports(randomWait func() time.Duration) {
t := time.NewTimer(randomWait())
defer t.Stop()
for {
select {
case <-GlobalContext.Done():
return
case <-t.C:
results := make(chan itemOrErr[ObjectInfo], 100)
ctx, cancel := context.WithCancel(j.ctx)
defer cancel()
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobReportsPrefix, results, WalkOptions{}); err != nil {
batchLogIf(j.ctx, err)
t.Reset(randomWait())
continue
}
for result := range results {
if result.Err != nil {
batchLogIf(j.ctx, result.Err)
continue
}
ri := &batchJobInfo{}
if err := ri.loadByPath(ctx, j.objLayer, result.Item.Name); err != nil {
batchLogIf(ctx, err)
continue
}
if (ri.Complete || ri.Failed) && time.Since(ri.LastUpdate) > oldJobsExpiration {
deleteConfig(ctx, j.objLayer, result.Item.Name)
}
}
t.Reset(randomWait())
}
}
}
func (j *BatchJobPool) resume(randomWait func() time.Duration) {
time.Sleep(randomWait())
results := make(chan itemOrErr[ObjectInfo], 100)
ctx, cancel := context.WithCancel(j.ctx)
defer cancel()
@ -1738,6 +1886,9 @@ func (j *BatchJobPool) resume() {
batchLogIf(j.ctx, result.Err)
continue
}
if strings.HasPrefix(result.Item.Name, batchJobReportsPrefix+slashSeparator) {
continue
}
// ignore batch-replicate.bin and batch-rotate.bin entries
if strings.HasSuffix(result.Item.Name, slashSeparator) {
continue
@ -1988,7 +2139,7 @@ func (m *batchJobMetrics) purgeJobMetrics() {
var toDeleteJobMetrics []string
m.RLock()
for id, metrics := range m.metrics {
if time.Since(metrics.LastUpdate) > 24*time.Hour && (metrics.Complete || metrics.Failed) {
if time.Since(metrics.LastUpdate) > oldJobsExpiration && (metrics.Complete || metrics.Failed) {
toDeleteJobMetrics = append(toDeleteJobMetrics, id)
}
}

View file

@ -21,8 +21,8 @@ import (
"time"
miniogo "github.com/minio/minio-go/v7"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v3/xtime"
)
//go:generate msgp -file $GOFILE
@ -65,8 +65,8 @@ import (
// BatchReplicateFilter holds all the filters currently supported for batch replication
type BatchReplicateFilter struct {
NewerThan time.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
OlderThan time.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
NewerThan xtime.Duration `yaml:"newerThan,omitempty" json:"newerThan"`
OlderThan xtime.Duration `yaml:"olderThan,omitempty" json:"olderThan"`
CreatedAfter time.Time `yaml:"createdAfter,omitempty" json:"createdAfter"`
CreatedBefore time.Time `yaml:"createdBefore,omitempty" json:"createdBefore"`
Tags []BatchJobKV `yaml:"tags,omitempty" json:"tags"`

View file

@ -1409,13 +1409,13 @@ func (z *BatchReplicateFilter) DecodeMsg(dc *msgp.Reader) (err error) {
}
switch msgp.UnsafeString(field) {
case "NewerThan":
z.NewerThan, err = dc.ReadDuration()
err = z.NewerThan.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "NewerThan")
return
}
case "OlderThan":
z.OlderThan, err = dc.ReadDuration()
err = z.OlderThan.DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
@ -1489,7 +1489,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
if err != nil {
return
}
err = en.WriteDuration(z.NewerThan)
err = z.NewerThan.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "NewerThan")
return
@ -1499,7 +1499,7 @@ func (z *BatchReplicateFilter) EncodeMsg(en *msgp.Writer) (err error) {
if err != nil {
return
}
err = en.WriteDuration(z.OlderThan)
err = z.OlderThan.EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
@ -1567,10 +1567,18 @@ func (z *BatchReplicateFilter) MarshalMsg(b []byte) (o []byte, err error) {
// map header, size 6
// string "NewerThan"
o = append(o, 0x86, 0xa9, 0x4e, 0x65, 0x77, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
o = msgp.AppendDuration(o, z.NewerThan)
o, err = z.NewerThan.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "NewerThan")
return
}
// string "OlderThan"
o = append(o, 0xa9, 0x4f, 0x6c, 0x64, 0x65, 0x72, 0x54, 0x68, 0x61, 0x6e)
o = msgp.AppendDuration(o, z.OlderThan)
o, err = z.OlderThan.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
}
// string "CreatedAfter"
o = append(o, 0xac, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x66, 0x74, 0x65, 0x72)
o = msgp.AppendTime(o, z.CreatedAfter)
@ -1619,13 +1627,13 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
}
switch msgp.UnsafeString(field) {
case "NewerThan":
z.NewerThan, bts, err = msgp.ReadDurationBytes(bts)
bts, err = z.NewerThan.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "NewerThan")
return
}
case "OlderThan":
z.OlderThan, bts, err = msgp.ReadDurationBytes(bts)
bts, err = z.OlderThan.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "OlderThan")
return
@ -1694,7 +1702,7 @@ func (z *BatchReplicateFilter) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *BatchReplicateFilter) Msgsize() (s int) {
s = 1 + 10 + msgp.DurationSize + 10 + msgp.DurationSize + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
s = 1 + 10 + z.NewerThan.Msgsize() + 10 + z.OlderThan.Msgsize() + 13 + msgp.TimeSize + 14 + msgp.TimeSize + 5 + msgp.ArrayHeaderSize
for za0001 := range z.Tags {
s += z.Tags[za0001].Msgsize()
}

100
cmd/batch-replicate_test.go Normal file
View file

@ -0,0 +1,100 @@
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"testing"
"gopkg.in/yaml.v3"
)
func TestParseBatchJobReplicate(t *testing.T) {
replicateYaml := `
replicate:
apiVersion: v1
# source of the objects to be replicated
source:
type: minio # valid values are "s3" or "minio"
bucket: mytest
prefix: object-prefix1 # 'PREFIX' is optional
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
# Either the 'source' or 'remote' *must* be the "local" deployment
# endpoint: "http://127.0.0.1:9000"
# # path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
# credentials:
# accessKey: minioadmin # Required
# secretKey: minioadmin # Required
# # sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
snowball: # automatically activated if the source is local
disable: true # optionally turn-off snowball archive transfer
# batch: 100 # upto this many objects per archive
# inmemory: true # indicates if the archive must be staged locally or in-memory
# compress: false # S2/Snappy compressed archive
# smallerThan: 5MiB # create archive for all objects smaller than 5MiB
# skipErrs: false # skips any source side read() errors
# target where the objects must be replicated
target:
type: minio # valid values are "s3" or "minio"
bucket: mytest
prefix: stage # 'PREFIX' is optional
# If your source is the 'local' alias specified to 'mc batch start', then the 'endpoint' and 'credentials' fields are optional and can be omitted
# Either the 'source' or 'remote' *must* be the "local" deployment
endpoint: "http://127.0.0.1:9001"
# path: "on|off|auto" # "on" enables path-style bucket lookup. "off" enables virtual host (DNS)-style bucket lookup. Defaults to "auto"
credentials:
accessKey: minioadmin
secretKey: minioadmin
# sessionToken: SESSION-TOKEN # Optional only available when rotating credentials are used
# NOTE: All flags are optional
# - filtering criteria only applies for all source objects match the criteria
# - configurable notification endpoints
# - configurable retries for the job (each retry skips successfully previously replaced objects)
flags:
filter:
newerThan: "7d10h31s" # match objects newer than this value (e.g. 7d10h31s)
olderThan: "7d" # match objects older than this value (e.g. 7d10h31s)
# createdAfter: "date" # match objects created after "date"
# createdBefore: "date" # match objects created before "date"
## NOTE: tags are not supported when "source" is remote.
tags:
- key: "name"
value: "pick*" # match objects with tag 'name', with all values starting with 'pick'
metadata:
- key: "content-type"
value: "image/*" # match objects with 'content-type', with all values starting with 'image/'
# notify:
# endpoint: "https://notify.endpoint" # notification endpoint to receive job status events
# token: "Bearer xxxxx" # optional authentication token for the notification endpoint
#
# retry:
# attempts: 10 # number of retries for the job before giving up
# delay: "500ms" # least amount of delay between each retry
`
var job BatchJobRequest
err := yaml.Unmarshal([]byte(replicateYaml), &job)
if err != nil {
t.Fatal("Failed to parse batch-job-replicate yaml", err)
}
}

View file

@ -257,7 +257,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
JobType: string(job.Type()),
StartTime: job.Started,
}
if err := ri.load(ctx, api, job); err != nil {
if err := ri.loadOrInit(ctx, api, job); err != nil {
return err
}
if ri.Complete {
@ -389,6 +389,17 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
stopFn(result, err)
batchLogIf(ctx, err)
success = false
if attempts >= retryAttempts {
auditOptions := AuditLogOptions{
Event: "KeyRotate",
APIName: "StartBatchJob",
Bucket: result.Bucket,
Object: result.Name,
VersionID: result.VersionID,
Error: err.Error(),
}
auditLogInternal(ctx, auditOptions)
}
} else {
stopFn(result, nil)
}

View file

@ -114,6 +114,9 @@ var skipEnvs = map[string]struct{}{
"MINIO_ROOT_PASSWORD": {},
"MINIO_ACCESS_KEY": {},
"MINIO_SECRET_KEY": {},
"MINIO_OPERATOR_VERSION": {},
"MINIO_VSPHERE_PLUGIN_VERSION": {},
"MINIO_CI_CD": {},
}
func getServerSystemCfg() *ServerSystemConfig {

View file

@ -1803,12 +1803,15 @@ var (
type ReplicationPool struct {
// atomic ops:
activeWorkers int32
activeLrgWorkers int32
activeMRFWorkers int32
objLayer ObjectLayer
ctx context.Context
priority string
maxWorkers int
maxLWorkers int
mu sync.RWMutex
mrfMU sync.Mutex
resyncer *replicationResyncer
@ -1882,9 +1885,13 @@ func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPool
if maxWorkers > 0 && failedWorkers > maxWorkers {
failedWorkers = maxWorkers
}
maxLWorkers := LargeWorkerCount
if opts.MaxLWorkers > 0 {
maxLWorkers = opts.MaxLWorkers
}
pool := &ReplicationPool{
workers: make([]chan ReplicationWorkerOperation, 0, workers),
lrgworkers: make([]chan ReplicationWorkerOperation, 0, LargeWorkerCount),
lrgworkers: make([]chan ReplicationWorkerOperation, 0, maxLWorkers),
mrfReplicaCh: make(chan ReplicationWorkerOperation, 100000),
mrfWorkerKillCh: make(chan struct{}, failedWorkers),
resyncer: newresyncer(),
@ -1894,9 +1901,10 @@ func NewReplicationPool(ctx context.Context, o ObjectLayer, opts replicationPool
objLayer: o,
priority: priority,
maxWorkers: maxWorkers,
maxLWorkers: maxLWorkers,
}
pool.AddLargeWorkers()
pool.ResizeLrgWorkers(maxLWorkers, 0)
pool.ResizeWorkers(workers, 0)
pool.ResizeFailedWorkers(failedWorkers)
go pool.resyncer.PersistToDisk(ctx, o)
@ -1975,23 +1983,8 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT
}
}
// AddLargeWorkers adds a static number of workers to handle large uploads
func (p *ReplicationPool) AddLargeWorkers() {
for i := 0; i < LargeWorkerCount; i++ {
p.lrgworkers = append(p.lrgworkers, make(chan ReplicationWorkerOperation, 100000))
i := i
go p.AddLargeWorker(p.lrgworkers[i])
}
go func() {
<-p.ctx.Done()
for i := 0; i < LargeWorkerCount; i++ {
xioutil.SafeClose(p.lrgworkers[i])
}
}()
}
// AddLargeWorker adds a replication worker to the static pool for large uploads.
func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation) {
func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation, opTracker *int32) {
for {
select {
case <-p.ctx.Done():
@ -2002,11 +1995,23 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation
}
switch v := oi.(type) {
case ReplicateObjectInfo:
if opTracker != nil {
atomic.AddInt32(opTracker, 1)
}
globalReplicationStats.incQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
replicateObject(p.ctx, v, p.objLayer)
globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
if opTracker != nil {
atomic.AddInt32(opTracker, -1)
}
case DeletedObjectReplicationInfo:
if opTracker != nil {
atomic.AddInt32(opTracker, 1)
}
replicateDelete(p.ctx, v, p.objLayer)
if opTracker != nil {
atomic.AddInt32(opTracker, -1)
}
default:
bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
}
@ -2014,6 +2019,30 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation
}
}
// ResizeLrgWorkers sets replication workers pool for large transfers(>=128MiB) to new size.
// checkOld can be set to an expected value.
// If the worker count changed
func (p *ReplicationPool) ResizeLrgWorkers(n, checkOld int) {
p.mu.Lock()
defer p.mu.Unlock()
if (checkOld > 0 && len(p.lrgworkers) != checkOld) || n == len(p.lrgworkers) || n < 1 {
// Either already satisfied or worker count changed while we waited for the lock.
return
}
for len(p.lrgworkers) < n {
input := make(chan ReplicationWorkerOperation, 100000)
p.lrgworkers = append(p.lrgworkers, input)
go p.AddLargeWorker(input, &p.activeLrgWorkers)
}
for len(p.lrgworkers) > n {
worker := p.lrgworkers[len(p.lrgworkers)-1]
p.lrgworkers = p.lrgworkers[:len(p.lrgworkers)-1]
xioutil.SafeClose(worker)
}
}
// ActiveWorkers returns the number of active workers handling replication traffic.
func (p *ReplicationPool) ActiveWorkers() int {
return int(atomic.LoadInt32(&p.activeWorkers))
@ -2024,6 +2053,11 @@ func (p *ReplicationPool) ActiveMRFWorkers() int {
return int(atomic.LoadInt32(&p.activeMRFWorkers))
}
// ActiveLrgWorkers returns the number of active workers handling traffic > 128MiB object size.
func (p *ReplicationPool) ActiveLrgWorkers() int {
return int(atomic.LoadInt32(&p.activeLrgWorkers))
}
// ResizeWorkers sets replication workers pool to new size.
// checkOld can be set to an expected value.
// If the worker count changed
@ -2049,7 +2083,7 @@ func (p *ReplicationPool) ResizeWorkers(n, checkOld int) {
}
// ResizeWorkerPriority sets replication failed workers pool size
func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers int) {
func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers, maxLWorkers int) {
var workers, mrfWorkers int
p.mu.Lock()
switch pri {
@ -2076,11 +2110,15 @@ func (p *ReplicationPool) ResizeWorkerPriority(pri string, maxWorkers int) {
if maxWorkers > 0 && mrfWorkers > maxWorkers {
mrfWorkers = maxWorkers
}
if maxLWorkers <= 0 {
maxLWorkers = LargeWorkerCount
}
p.priority = pri
p.maxWorkers = maxWorkers
p.mu.Unlock()
p.ResizeWorkers(workers, 0)
p.ResizeFailedWorkers(mrfWorkers)
p.ResizeLrgWorkers(maxLWorkers, 0)
}
// ResizeFailedWorkers sets replication failed workers pool size
@ -2127,6 +2165,15 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
case p.lrgworkers[h%LargeWorkerCount] <- ri:
default:
globalReplicationPool.queueMRFSave(ri.ToMRFEntry())
p.mu.RLock()
maxLWorkers := p.maxLWorkers
existing := len(p.lrgworkers)
p.mu.RUnlock()
maxLWorkers = min(maxLWorkers, LargeWorkerCount)
if p.ActiveLrgWorkers() < maxLWorkers {
workers := min(existing+1, maxLWorkers)
p.ResizeLrgWorkers(workers, existing)
}
}
return
}
@ -2231,6 +2278,7 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
type replicationPoolOpts struct {
Priority string
MaxWorkers int
MaxLWorkers int
}
func initBackgroundReplication(ctx context.Context, objectAPI ObjectLayer) {

View file

@ -685,16 +685,6 @@ func loadEnvVarsFromFiles() {
}
}
if env.IsSet(kms.EnvKMSSecretKeyFile) {
kmsSecret, err := readFromSecret(env.Get(kms.EnvKMSSecretKeyFile, ""))
if err != nil {
logger.Fatal(err, "Unable to read the KMS secret key inherited from secret file")
}
if kmsSecret != "" {
os.Setenv(kms.EnvKMSSecretKey, kmsSecret)
}
}
if env.IsSet(config.EnvConfigEnvFile) {
ekvs, err := minioEnvironFromFile(env.Get(config.EnvConfigEnvFile, ""))
if err != nil && !os.IsNotExist(err) {
@ -834,7 +824,7 @@ func serverHandleEnvVars() {
}
}
globalDisableFreezeOnBoot = env.Get("_MINIO_DISABLE_API_FREEZE_ON_BOOT", "") == "true" || serverDebugLog
globalEnableSyncBoot = env.Get("MINIO_SYNC_BOOT", config.EnableOff) == config.EnableOn
}
func loadRootCredentials() {
@ -843,6 +833,7 @@ func loadRootCredentials() {
// Check both cases and authenticate them if correctly defined
var user, password string
var hasCredentials bool
var legacyCredentials bool
//nolint:gocritic
if env.IsSet(config.EnvRootUser) && env.IsSet(config.EnvRootPassword) {
user = env.Get(config.EnvRootUser, "")
@ -851,6 +842,7 @@ func loadRootCredentials() {
} else if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
user = env.Get(config.EnvAccessKey, "")
password = env.Get(config.EnvSecretKey, "")
legacyCredentials = true
hasCredentials = true
} else if globalServerCtxt.RootUser != "" && globalServerCtxt.RootPwd != "" {
user, password = globalServerCtxt.RootUser, globalServerCtxt.RootPwd
@ -859,8 +851,13 @@ func loadRootCredentials() {
if hasCredentials {
cred, err := auth.CreateCredentials(user, password)
if err != nil {
if legacyCredentials {
logger.Fatal(config.ErrInvalidCredentials(err),
"Unable to validate credentials inherited from the shell environment")
} else {
logger.Fatal(config.ErrInvalidRootUserCredentials(err),
"Unable to validate credentials inherited from the shell environment")
}
}
if env.IsSet(config.EnvAccessKey) && env.IsSet(config.EnvSecretKey) {
msg := fmt.Sprintf("WARNING: %s and %s are deprecated.\n"+
@ -874,6 +871,12 @@ func loadRootCredentials() {
} else {
globalActiveCred = auth.DefaultCredentials
}
var err error
globalNodeAuthToken, err = authenticateNode(globalActiveCred.AccessKey, globalActiveCred.SecretKey)
if err != nil {
logger.Fatal(err, "Unable to generate internode credentials")
}
}
// Initialize KMS global variable after valiadating and loading the configuration.

View file

@ -101,7 +101,7 @@ func initHelp() {
config.HelpKV{
Key: config.SubnetSubSys,
Type: "string",
Description: "register the cluster to MinIO SUBNET",
Description: "register Enterprise license for the cluster",
Optional: true,
},
config.HelpKV{

View file

@ -227,7 +227,9 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
binary.LittleEndian.PutUint64(tmp, cycleInfo.next)
tmp, _ = cycleInfo.MarshalMsg(tmp)
err = saveConfig(ctx, objAPI, dataUsageBloomNamePath, tmp)
scannerLogIf(ctx, err, dataUsageBloomNamePath)
if err != nil {
scannerLogIf(ctx, fmt.Errorf("%w, Object %s", err, dataUsageBloomNamePath))
}
}
}
}
@ -797,7 +799,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
}, madmin.HealItemObject)
stopFn(int(ver.Size))
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
scannerLogIf(ctx, err, fiv.Name)
if err != nil {
scannerLogIf(ctx, fmt.Errorf("%w, Object %s/%s/%s", err, bucket, fiv.Name, ver.VersionID))
}
}
if err == nil {
successVersions++
@ -1271,7 +1275,7 @@ func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer,
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return false
}
ilmLogIf(ctx, err)
ilmLogIf(ctx, fmt.Errorf("expireTransitionedObject(%s, %s): %w", obj.Bucket, obj.Name, err))
return false
}
timeILM(1)
@ -1324,7 +1328,7 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
return false
}
// Assume it is still there.
ilmLogOnceIf(ctx, err, "non-transition-expiry")
ilmLogOnceIf(ctx, fmt.Errorf("DeleteObject(%s, %s): %w", obj.Bucket, obj.Name, err), "non-transition-expiry"+obj.Name)
return false
}
if dobj.Name == "" {

View file

@ -61,10 +61,9 @@ func NewDummyDataGen(totalLength, skipOffset int64) io.ReadSeeker {
}
skipOffset %= int64(len(alphabets))
as := make([]byte, 2*len(alphabets))
copy(as, alphabets)
copy(as[len(alphabets):], alphabets)
b := as[skipOffset : skipOffset+int64(len(alphabets))]
const multiply = 100
as := bytes.Repeat(alphabets, multiply)
b := as[skipOffset : skipOffset+int64(len(alphabets)*(multiply-1))]
return &DummyDataGen{
length: totalLength,
b: b,

View file

@ -134,13 +134,18 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
SSES3SinglePartObjects := make(map[int]bool)
for i, object := range batch {
if kind, ok := crypto.IsEncrypted(object.UserDefined); ok && kind == crypto.S3 && !crypto.IsMultiPart(object.UserDefined) {
ETag, err := etag.Parse(object.ETag)
if err != nil {
continue
}
if ETag.IsEncrypted() {
SSES3SinglePartObjects[i] = true
metadata = append(metadata, object.UserDefined)
buckets = append(buckets, object.Bucket)
names = append(names, object.Name)
}
}
}
// If there are no SSE-S3 single-part objects
// we can skip the decryption process. However,
@ -190,7 +195,7 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
if err != nil {
return err
}
if SSES3SinglePartObjects[i] && ETag.IsEncrypted() {
if SSES3SinglePartObjects[i] {
ETag, err = etag.Decrypt(keys[0][:], ETag)
if err != nil {
return err

View file

@ -629,7 +629,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
}
for i, v := range result.Before.Drives {
if v.Endpoint == disk.String() {
if v.Endpoint == disk.Endpoint().String() {
result.After.Drives[i].State = madmin.DriveStateOk
}
}

View file

@ -358,7 +358,7 @@ func (p *poolMeta) validate(pools []*erasureSets) (bool, error) {
update = true
}
if ok && pi.completed {
return false, fmt.Errorf("pool(%s) = %s is decommissioned, please remove from server command line", humanize.Ordinal(pi.position+1), k)
logger.LogIf(GlobalContext, "decommission", fmt.Errorf("pool(%s) = %s is decommissioned, please remove from server command line", humanize.Ordinal(pi.position+1), k))
}
}

View file

@ -134,7 +134,7 @@ func TestPoolMetaValidate(t *testing.T) {
meta: nmeta1,
pools: pools,
name: "Invalid-Completed-Pool-Not-Removed",
expectedErr: true,
expectedErr: false,
expectedUpdate: false,
},
{

View file

@ -119,11 +119,8 @@ func (z *erasureServerPools) loadRebalanceMeta(ctx context.Context) error {
}
z.rebalMu.Lock()
if len(r.PoolStats) == len(z.serverPools) {
z.rebalMeta = r
} else {
z.updateRebalanceStats(ctx)
}
z.rebalMu.Unlock()
return nil
@ -147,24 +144,16 @@ func (z *erasureServerPools) updateRebalanceStats(ctx context.Context) error {
}
}
if ok {
lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName)
lkCtx, err := lock.GetLock(ctx, globalOperationTimeout)
if err != nil {
rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err))
return err
}
defer lock.Unlock(lkCtx)
ctx = lkCtx.Context()
noLockOpts := ObjectOptions{NoLock: true}
return z.rebalMeta.saveWithOpts(ctx, z.serverPools[0], noLockOpts)
return z.rebalMeta.save(ctx, z.serverPools[0])
}
return nil
}
func (z *erasureServerPools) findIndex(index int) int {
if z.rebalMeta == nil {
return 0
}
for i := 0; i < len(z.rebalMeta.PoolStats); i++ {
if i == index {
return index
@ -277,6 +266,10 @@ func (z *erasureServerPools) bucketRebalanceDone(bucket string, poolIdx int) {
z.rebalMu.Lock()
defer z.rebalMu.Unlock()
if z.rebalMeta == nil {
return
}
ps := z.rebalMeta.PoolStats[poolIdx]
if ps == nil {
return
@ -331,6 +324,10 @@ func (r *rebalanceMeta) loadWithOpts(ctx context.Context, store objectIO, opts O
}
func (r *rebalanceMeta) saveWithOpts(ctx context.Context, store objectIO, opts ObjectOptions) error {
if r == nil {
return nil
}
data := make([]byte, 4, r.Msgsize()+4)
// Initialize the header.
@ -353,8 +350,15 @@ func (z *erasureServerPools) IsRebalanceStarted() bool {
z.rebalMu.RLock()
defer z.rebalMu.RUnlock()
if r := z.rebalMeta; r != nil {
if r.StoppedAt.IsZero() {
r := z.rebalMeta
if r == nil {
return false
}
if !r.StoppedAt.IsZero() {
return false
}
for _, ps := range r.PoolStats {
if ps.Participating && ps.Info.Status != rebalCompleted {
return true
}
}
@ -369,7 +373,7 @@ func (z *erasureServerPools) IsPoolRebalancing(poolIndex int) bool {
if !r.StoppedAt.IsZero() {
return false
}
ps := z.rebalMeta.PoolStats[poolIndex]
ps := r.PoolStats[poolIndex]
return ps.Participating && ps.Info.Status == rebalStarted
}
return false
@ -794,8 +798,10 @@ func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int
case rebalSaveStoppedAt:
r.StoppedAt = time.Now()
case rebalSaveStats:
if z.rebalMeta != nil {
r.PoolStats[poolIdx] = z.rebalMeta.PoolStats[poolIdx]
}
}
z.rebalMeta = r
return z.rebalMeta.saveWithOpts(ctx, z.serverPools[0], noLockOpts)

View file

@ -1526,15 +1526,13 @@ func (z *erasureServerPools) listObjectsGeneric(ctx context.Context, bucket, pre
loi.NextMarker = last.Name
}
if merged.lastSkippedEntry != "" {
if merged.lastSkippedEntry > loi.NextMarker {
// An object hidden by ILM was found during listing. Since the number of entries
// fetched from drives is limited, set IsTruncated to true to ask the s3 client
// to continue listing if it wishes in order to find if there is more objects.
loi.IsTruncated = true
if loi.IsTruncated && merged.lastSkippedEntry > loi.NextMarker {
// An object hidden by ILM was found during a truncated listing. Since the number of entries
// fetched from drives is limited by max-keys, we should use the last ILM filtered entry
// as a continuation token if it is lexially higher than the last visible object so that the
// next call of WalkDir() with the max-keys can reach new objects not seen previously.
loi.NextMarker = merged.lastSkippedEntry
}
}
if loi.NextMarker != "" {
loi.NextMarker = opts.encodeMarker(loi.NextMarker)
@ -2343,12 +2341,18 @@ func (z *erasureServerPools) HealObjects(ctx context.Context, bucket, prefix str
var poolErrs [][]error
for idx, erasureSet := range z.serverPools {
if opts.Pool != nil && *opts.Pool != idx {
continue
}
if z.IsSuspended(idx) {
continue
}
errs := make([]error, len(erasureSet.sets))
var wg sync.WaitGroup
for idx, set := range erasureSet.sets {
if opts.Set != nil && *opts.Set != idx {
continue
}
wg.Add(1)
go func(idx int, set *erasureObjects) {
defer wg.Done()
@ -2441,6 +2445,7 @@ const (
type HealthOptions struct {
Maintenance bool
DeploymentType string
NoLogging bool
}
// HealthResult returns the current state of the system, also
@ -2477,7 +2482,7 @@ func (hr HealthResult) String() string {
if i == 0 {
str.WriteString(")")
} else {
str.WriteString("), ")
str.WriteString(") | ")
}
}
return str.String()
@ -2600,7 +2605,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
})
healthy := erasureSetUpCount[poolIdx][setIdx].online >= poolWriteQuorums[poolIdx]
if !healthy {
if !healthy && !opts.NoLogging {
storageLogIf(logger.SetReqInfo(ctx, reqInfo),
fmt.Errorf("Write quorum could not be established on pool: %d, set: %d, expected write quorum: %d, drives-online: %d",
poolIdx, setIdx, poolWriteQuorums[poolIdx], erasureSetUpCount[poolIdx][setIdx].online), logger.FatalKind)
@ -2608,7 +2613,7 @@ func (z *erasureServerPools) Health(ctx context.Context, opts HealthOptions) Hea
result.Healthy = result.Healthy && healthy
healthyRead := erasureSetUpCount[poolIdx][setIdx].online >= poolReadQuorums[poolIdx]
if !healthyRead {
if !healthyRead && !opts.NoLogging {
storageLogIf(logger.SetReqInfo(ctx, reqInfo),
fmt.Errorf("Read quorum could not be established on pool: %d, set: %d, expected read quorum: %d, drives-online: %d",
poolIdx, setIdx, poolReadQuorums[poolIdx], erasureSetUpCount[poolIdx][setIdx].online))

View file

@ -120,13 +120,6 @@ func connectEndpoint(endpoint Endpoint) (StorageAPI, *formatErasureV3, error) {
format, err := loadFormatErasure(disk, false)
if err != nil {
if errors.Is(err, errUnformattedDisk) {
info, derr := disk.DiskInfo(context.TODO(), DiskInfoOptions{})
if derr != nil && info.RootDisk {
disk.Close()
return nil, nil, fmt.Errorf("Drive: %s is a root drive", disk)
}
}
disk.Close()
return nil, nil, fmt.Errorf("Drive: %s returned %w", disk, err) // make sure to '%w' to wrap the error
}
@ -196,7 +189,7 @@ func (s *erasureSets) Legacy() (ok bool) {
// connectDisks - attempt to connect all the endpoints, loads format
// and re-arranges the disks in proper position.
func (s *erasureSets) connectDisks() {
func (s *erasureSets) connectDisks(log bool) {
defer func() {
s.lastConnectDisksOpTime = time.Now()
}()
@ -230,9 +223,11 @@ func (s *erasureSets) connectDisks() {
if err != nil {
if endpoint.IsLocal && errors.Is(err, errUnformattedDisk) {
globalBackgroundHealState.pushHealLocalDisks(endpoint)
} else {
} else if !errors.Is(err, errDriveIsRoot) {
if log {
printEndpointError(endpoint, err, true)
}
}
return
}
if disk.IsLocal() && disk.Healing() != nil {
@ -292,7 +287,7 @@ func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInt
time.Sleep(time.Duration(r.Float64() * float64(time.Second)))
// Pre-emptively connect the disks if possible.
s.connectDisks()
s.connectDisks(false)
monitor := time.NewTimer(monitorInterval)
defer monitor.Stop()
@ -306,7 +301,7 @@ func (s *erasureSets) monitorAndConnectEndpoints(ctx context.Context, monitorInt
console.Debugln("running drive monitoring")
}
s.connectDisks()
s.connectDisks(true)
// Reset the timer for next interval
monitor.Reset(monitorInterval)

View file

@ -102,6 +102,8 @@ func diskErrToDriveState(err error) (state string) {
state = madmin.DriveStatePermission
case errors.Is(err, errFaultyDisk):
state = madmin.DriveStateFaulty
case errors.Is(err, errDriveIsRoot):
state = madmin.DriveStateRootMount
case err == nil:
state = madmin.DriveStateOk
default:

View file

@ -441,6 +441,8 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
continue
}
var versionHealed bool
res, err := er.HealObject(ctx, bucket, encodedEntryName,
version.VersionID, madmin.HealOpts{
ScanMode: scanMode,
@ -453,15 +455,22 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
versionNotFound++
continue
}
// If not deleted, assume they failed.
} else {
// Look for the healing results
if res.After.Drives[tracker.DiskIndex].State == madmin.DriveStateOk {
versionHealed = true
}
}
if versionHealed {
result = healEntrySuccess(uint64(version.Size))
} else {
result = healEntryFailure(uint64(version.Size))
if version.VersionID != "" {
healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s-v(%s): %w", bucket, version.Name, version.VersionID, err))
} else {
healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err))
}
} else {
result = healEntrySuccess(uint64(res.ObjectSize))
}
if !send(result) {
@ -509,7 +518,11 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
jt.Take()
go healEntry(bucket, *entry)
},
finished: nil,
finished: func(errs []error) {
if countErrs(errs, nil) != len(errs) {
retErr = fmt.Errorf("one or more errors reported during listing: %v", errors.Join(errs...))
}
},
})
jt.Wait() // synchronize all the concurrent heal jobs
if err != nil {
@ -517,7 +530,10 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
// we let the caller retry this disk again for the
// buckets it failed to list.
retErr = err
healingLogIf(ctx, fmt.Errorf("listing failed with: %v on bucket: %v", err, bucket))
}
if retErr != nil {
healingLogIf(ctx, fmt.Errorf("listing failed with: %v on bucket: %v", retErr, bucket))
continue
}

View file

@ -310,6 +310,7 @@ var (
globalBootTime = UTCNow()
globalActiveCred auth.Credentials
globalNodeAuthToken string
globalSiteReplicatorCred siteReplicatorCred
// Captures if root credentials are set via ENV.
@ -449,8 +450,8 @@ var (
// dynamic sleeper for multipart expiration routine
deleteMultipartCleanupSleeper = newDynamicSleeper(5, 25*time.Millisecond, false)
// Is _MINIO_DISABLE_API_FREEZE_ON_BOOT set?
globalDisableFreezeOnBoot bool
// Is MINIO_SYNC_BOOT set?
globalEnableSyncBoot bool
// Contains NIC interface name used for internode communication
globalInternodeInterface string

View file

@ -41,17 +41,19 @@ func initGlobalGrid(ctx context.Context, eps EndpointServerPools) error {
// Pass Dialer for websocket grid, make sure we do not
// provide any DriveOPTimeout() function, as that is not
// useful over persistent connections.
Dialer: grid.ContextDialer(xhttp.DialContextWithLookupHost(lookupHost, xhttp.NewInternodeDialContext(rest.DefaultTimeout, globalTCPOptions.ForWebsocket()))),
Local: local,
Hosts: hosts,
AddAuth: newCachedAuthToken(),
AuthRequest: storageServerRequestValidate,
BlockConnect: globalGridStart,
TLSConfig: &tls.Config{
Dialer: grid.ConnectWS(
grid.ContextDialer(xhttp.DialContextWithLookupHost(lookupHost, xhttp.NewInternodeDialContext(rest.DefaultTimeout, globalTCPOptions.ForWebsocket()))),
newCachedAuthToken(),
&tls.Config{
RootCAs: globalRootCAs,
CipherSuites: fips.TLSCiphers(),
CurvePreferences: fips.TLSCurveIDs(),
},
}),
Local: local,
Hosts: hosts,
AuthToken: validateStorageRequestToken,
AuthFn: newCachedAuthToken(),
BlockConnect: globalGridStart,
// Record incoming and outgoing bytes.
Incoming: globalConnStats.incInternodeInputBytes,
Outgoing: globalConnStats.incInternodeOutputBytes,

View file

@ -46,6 +46,7 @@ type apiConfig struct {
corsAllowOrigins []string
replicationPriority string
replicationMaxWorkers int
replicationMaxLWorkers int
transitionWorkers int
staleUploadsExpiry time.Duration
@ -170,11 +171,12 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int, legacy bool) {
}
t.listQuorum = listQuorum
if globalReplicationPool != nil &&
(cfg.ReplicationPriority != t.replicationPriority || cfg.ReplicationMaxWorkers != t.replicationMaxWorkers) {
globalReplicationPool.ResizeWorkerPriority(cfg.ReplicationPriority, cfg.ReplicationMaxWorkers)
(cfg.ReplicationPriority != t.replicationPriority || cfg.ReplicationMaxWorkers != t.replicationMaxWorkers || cfg.ReplicationMaxLWorkers != t.replicationMaxLWorkers) {
globalReplicationPool.ResizeWorkerPriority(cfg.ReplicationPriority, cfg.ReplicationMaxWorkers, cfg.ReplicationMaxLWorkers)
}
t.replicationPriority = cfg.ReplicationPriority
t.replicationMaxWorkers = cfg.ReplicationMaxWorkers
t.replicationMaxLWorkers = cfg.ReplicationMaxLWorkers
// N B api.transition_workers will be deprecated
if globalTransitionState != nil {
@ -383,12 +385,14 @@ func (t *apiConfig) getReplicationOpts() replicationPoolOpts {
return replicationPoolOpts{
Priority: "auto",
MaxWorkers: WorkerMaxLimit,
MaxLWorkers: LargeWorkerCount,
}
}
return replicationPoolOpts{
Priority: t.replicationPriority,
MaxWorkers: t.replicationMaxWorkers,
MaxLWorkers: t.replicationMaxLWorkers,
}
}

View file

@ -29,14 +29,35 @@ import (
const unavailable = "offline"
// ClusterCheckHandler returns if the server is ready for requests.
func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClusterCheckHandler")
func checkHealth(w http.ResponseWriter) ObjectLayer {
objLayer := newObjectLayerFn()
if objLayer == nil {
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
return nil
}
if !globalBucketMetadataSys.Initialized() {
w.Header().Set(xhttp.MinIOServerStatus, "bucket-metadata-offline")
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
return nil
}
if !globalIAMSys.Initialized() {
w.Header().Set(xhttp.MinIOServerStatus, "iam-offline")
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
return nil
}
return objLayer
}
// ClusterCheckHandler returns if the server is ready for requests.
func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClusterCheckHandler")
objLayer := checkHealth(w)
if objLayer == nil {
return
}
@ -72,10 +93,8 @@ func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
func ClusterReadCheckHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "ClusterReadCheckHandler")
objLayer := newObjectLayerFn()
objLayer := checkHealth(w)
if objLayer == nil {
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
return
}

View file

@ -439,23 +439,44 @@ func (iamOS *IAMObjectStore) listAllIAMConfigItems(ctx context.Context) (res map
return res, nil
}
const (
maxIAMLoadOpTime = 5 * time.Second
)
// Assumes cache is locked by caller.
func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache) error {
func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache, firstTime bool) error {
bootstrapTraceMsgFirstTime := func(s string) {
if firstTime {
bootstrapTraceMsg(s)
}
}
if iamOS.objAPI == nil {
return errServerNotInitialized
}
bootstrapTraceMsg("loading all IAM items")
bootstrapTraceMsgFirstTime("loading all IAM items")
setDefaultCannedPolicies(cache.iamPolicyDocsMap)
listStartTime := UTCNow()
listedConfigItems, err := iamOS.listAllIAMConfigItems(ctx)
if err != nil {
return fmt.Errorf("unable to list IAM data: %w", err)
}
if took := time.Since(listStartTime); took > maxIAMLoadOpTime {
var s strings.Builder
for k, v := range listedConfigItems {
s.WriteString(fmt.Sprintf(" %s: %d items\n", k, len(v)))
}
logger.Info("listAllIAMConfigItems took %.2fs with contents:\n%s", took.Seconds(), s.String())
}
// Loads things in the same order as `LoadIAMCache()`
bootstrapTraceMsg("loading policy documents")
bootstrapTraceMsgFirstTime("loading policy documents")
policyLoadStartTime := UTCNow()
policiesList := listedConfigItems[policiesListKey]
for _, item := range policiesList {
policyName := path.Dir(item)
@ -463,58 +484,88 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
return fmt.Errorf("unable to load the policy doc `%s`: %w", policyName, err)
}
}
setDefaultCannedPolicies(cache.iamPolicyDocsMap)
if took := time.Since(policyLoadStartTime); took > maxIAMLoadOpTime {
logger.Info("Policy docs load took %.2fs (for %d items)", took.Seconds(), len(policiesList))
}
if iamOS.usersSysType == MinIOUsersSysType {
bootstrapTraceMsg("loading regular IAM users")
bootstrapTraceMsgFirstTime("loading regular IAM users")
regUsersLoadStartTime := UTCNow()
regUsersList := listedConfigItems[usersListKey]
for _, item := range regUsersList {
userName := path.Dir(item)
if err := iamOS.loadUser(ctx, userName, regUser, cache.iamUsersMap); err != nil && err != errNoSuchUser {
return fmt.Errorf("unable to load the user `%s`: %w", userName, err)
return fmt.Errorf("unable to load the user: %w", err)
}
}
if took := time.Since(regUsersLoadStartTime); took > maxIAMLoadOpTime {
actualLoaded := len(cache.iamUsersMap)
logger.Info("Reg. users load took %.2fs (for %d items with %d expired items)", took.Seconds(),
len(regUsersList), len(regUsersList)-actualLoaded)
}
bootstrapTraceMsg("loading regular IAM groups")
bootstrapTraceMsgFirstTime("loading regular IAM groups")
groupsLoadStartTime := UTCNow()
groupsList := listedConfigItems[groupsListKey]
for _, item := range groupsList {
group := path.Dir(item)
if err := iamOS.loadGroup(ctx, group, cache.iamGroupsMap); err != nil && err != errNoSuchGroup {
return fmt.Errorf("unable to load the group `%s`: %w", group, err)
return fmt.Errorf("unable to load the group: %w", err)
}
}
if took := time.Since(groupsLoadStartTime); took > maxIAMLoadOpTime {
logger.Info("Groups load took %.2fs (for %d items)", took.Seconds(), len(groupsList))
}
}
bootstrapTraceMsg("loading user policy mapping")
bootstrapTraceMsgFirstTime("loading user policy mapping")
userPolicyMappingLoadStartTime := UTCNow()
userPolicyMappingsList := listedConfigItems[policyDBUsersListKey]
for _, item := range userPolicyMappingsList {
userName := strings.TrimSuffix(item, ".json")
if err := iamOS.loadMappedPolicy(ctx, userName, regUser, false, cache.iamUserPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) {
return fmt.Errorf("unable to load the policy mapping for the user `%s`: %w", userName, err)
return fmt.Errorf("unable to load the policy mapping for the user: %w", err)
}
}
if took := time.Since(userPolicyMappingLoadStartTime); took > maxIAMLoadOpTime {
logger.Info("User policy mappings load took %.2fs (for %d items)", took.Seconds(), len(userPolicyMappingsList))
}
bootstrapTraceMsg("loading group policy mapping")
bootstrapTraceMsgFirstTime("loading group policy mapping")
groupPolicyMappingLoadStartTime := UTCNow()
groupPolicyMappingsList := listedConfigItems[policyDBGroupsListKey]
for _, item := range groupPolicyMappingsList {
groupName := strings.TrimSuffix(item, ".json")
if err := iamOS.loadMappedPolicy(ctx, groupName, regUser, true, cache.iamGroupPolicyMap); err != nil && !errors.Is(err, errNoSuchPolicy) {
return fmt.Errorf("unable to load the policy mapping for the group `%s`: %w", groupName, err)
return fmt.Errorf("unable to load the policy mapping for the group: %w", err)
}
}
if took := time.Since(groupPolicyMappingLoadStartTime); took > maxIAMLoadOpTime {
logger.Info("Group policy mappings load took %.2fs (for %d items)", took.Seconds(), len(groupPolicyMappingsList))
}
bootstrapTraceMsg("loading service accounts")
bootstrapTraceMsgFirstTime("loading service accounts")
svcAccLoadStartTime := UTCNow()
svcAccList := listedConfigItems[svcAccListKey]
svcUsersMap := make(map[string]UserIdentity, len(svcAccList))
for _, item := range svcAccList {
userName := path.Dir(item)
if err := iamOS.loadUser(ctx, userName, svcUser, svcUsersMap); err != nil && err != errNoSuchUser {
return fmt.Errorf("unable to load the service account `%s`: %w", userName, err)
return fmt.Errorf("unable to load the service account: %w", err)
}
}
if took := time.Since(svcAccLoadStartTime); took > maxIAMLoadOpTime {
logger.Info("Service accounts load took %.2fs (for %d items with %d expired items)", took.Seconds(),
len(svcAccList), len(svcAccList)-len(svcUsersMap))
}
bootstrapTraceMsg("loading STS account policy mapping")
stsPolicyMappingLoadStartTime := UTCNow()
var stsPolicyMappingsCount int
for _, svcAcc := range svcUsersMap {
svcParent := svcAcc.Credentials.ParentUser
if _, ok := cache.iamUsersMap[svcParent]; !ok {
stsPolicyMappingsCount++
// If a service account's parent user is not in iamUsersMap, the
// parent is an STS account. Such accounts may have a policy mapped
// on the parent user, so we load them. This is not needed for the
@ -529,10 +580,14 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
// OIDC/AssumeRoleWithCustomToken/AssumeRoleWithCertificate).
err := iamOS.loadMappedPolicy(ctx, svcParent, stsUser, false, cache.iamSTSPolicyMap)
if err != nil && !errors.Is(err, errNoSuchPolicy) {
return fmt.Errorf("unable to load the policy mapping for the STS user `%s`: %w", svcParent, err)
return fmt.Errorf("unable to load the policy mapping for the STS user: %w", err)
}
}
}
if took := time.Since(stsPolicyMappingLoadStartTime); took > maxIAMLoadOpTime {
logger.Info("STS policy mappings load took %.2fs (for %d items)", took.Seconds(), stsPolicyMappingsCount)
}
// Copy svcUsersMap to cache.iamUsersMap
for k, v := range svcUsersMap {
cache.iamUsersMap[k] = v

View file

@ -431,8 +431,41 @@ func (c *iamCache) policyDBGet(store *IAMStoreSys, name string, isGroup bool) ([
}
}
// returned policy could be empty
policies := mp.toSlice()
// returned policy could be empty, we use set to de-duplicate.
policies := set.CreateStringSet(mp.toSlice()...)
for _, group := range u.Credentials.Groups {
if store.getUsersSysType() == MinIOUsersSysType {
g, ok := c.iamGroupsMap[group]
if !ok {
if err := store.loadGroup(context.Background(), group, c.iamGroupsMap); err != nil {
return nil, time.Time{}, err
}
g, ok = c.iamGroupsMap[group]
if !ok {
return nil, time.Time{}, errNoSuchGroup
}
}
// Group is disabled, so we return no policy - this
// ensures the request is denied.
if g.Status == statusDisabled {
return nil, time.Time{}, nil
}
}
policy, ok := c.iamGroupPolicyMap.Load(group)
if !ok {
if err := store.loadMappedPolicyWithRetry(context.TODO(), group, regUser, true, c.iamGroupPolicyMap, 3); err != nil && !errors.Is(err, errNoSuchPolicy) {
return nil, time.Time{}, err
}
policy, _ = c.iamGroupPolicyMap.Load(group)
}
for _, p := range policy.toSlice() {
policies.Add(p)
}
}
for _, group := range c.iamUserGroupMemberships[name].ToSlice() {
if store.getUsersSysType() == MinIOUsersSysType {
@ -462,10 +495,12 @@ func (c *iamCache) policyDBGet(store *IAMStoreSys, name string, isGroup bool) ([
policy, _ = c.iamGroupPolicyMap.Load(group)
}
policies = append(policies, policy.toSlice()...)
for _, p := range policy.toSlice() {
policies.Add(p)
}
}
return policies, mp.UpdatedAt, nil
return policies.ToSlice(), mp.UpdatedAt, nil
}
func (c *iamCache) updateUserWithClaims(key string, u UserIdentity) error {
@ -537,25 +572,25 @@ func setDefaultCannedPolicies(policies map[string]PolicyDoc) {
// LoadIAMCache reads all IAM items and populates a new iamCache object and
// replaces the in-memory cache object.
func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) error {
bootstrapTraceMsg := func(s string) {
bootstrapTraceMsgFirstTime := func(s string) {
if firstTime {
bootstrapTraceMsg(s)
}
}
bootstrapTraceMsg("loading IAM data")
bootstrapTraceMsgFirstTime("loading IAM data")
newCache := newIamCache()
loadedAt := time.Now()
if iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore); ok {
err := iamOS.loadAllFromObjStore(ctx, newCache)
err := iamOS.loadAllFromObjStore(ctx, newCache, firstTime)
if err != nil {
return err
}
} else {
bootstrapTraceMsg("loading policy documents")
// Only non-object IAM store (i.e. only etcd backend).
bootstrapTraceMsgFirstTime("loading policy documents")
if err := store.loadPolicyDocs(ctx, newCache.iamPolicyDocsMap); err != nil {
return err
}
@ -564,29 +599,29 @@ func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) erro
setDefaultCannedPolicies(newCache.iamPolicyDocsMap)
if store.getUsersSysType() == MinIOUsersSysType {
bootstrapTraceMsg("loading regular users")
bootstrapTraceMsgFirstTime("loading regular users")
if err := store.loadUsers(ctx, regUser, newCache.iamUsersMap); err != nil {
return err
}
bootstrapTraceMsg("loading regular groups")
bootstrapTraceMsgFirstTime("loading regular groups")
if err := store.loadGroups(ctx, newCache.iamGroupsMap); err != nil {
return err
}
}
bootstrapTraceMsg("loading user policy mapping")
bootstrapTraceMsgFirstTime("loading user policy mapping")
// load polices mapped to users
if err := store.loadMappedPolicies(ctx, regUser, false, newCache.iamUserPolicyMap); err != nil {
return err
}
bootstrapTraceMsg("loading group policy mapping")
bootstrapTraceMsgFirstTime("loading group policy mapping")
// load policies mapped to groups
if err := store.loadMappedPolicies(ctx, regUser, true, newCache.iamGroupPolicyMap); err != nil {
return err
}
bootstrapTraceMsg("loading service accounts")
bootstrapTraceMsgFirstTime("loading service accounts")
// load service accounts
if err := store.loadUsers(ctx, svcUser, newCache.iamUsersMap); err != nil {
return err
@ -937,12 +972,7 @@ func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc
}, nil
}
// ListGroups - lists groups. Since this is not going to be a frequent
// operation, we fetch this info from storage, and refresh the cache as well.
func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err error) {
cache := store.lock()
defer store.unlock()
func (store *IAMStoreSys) updateGroups(ctx context.Context, cache *iamCache) (res []string, err error) {
if store.getUsersSysType() == MinIOUsersSysType {
m := map[string]GroupInfo{}
err = store.loadGroups(ctx, m)
@ -970,7 +1000,16 @@ func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err err
})
}
return
return res, nil
}
// ListGroups - lists groups. Since this is not going to be a frequent
// operation, we fetch this info from storage, and refresh the cache as well.
func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err error) {
cache := store.lock()
defer store.unlock()
return store.updateGroups(ctx, cache)
}
// listGroups - lists groups - fetch groups from cache
@ -1445,16 +1484,51 @@ func filterPolicies(cache *iamCache, policyName string, bucketName string) (stri
return strings.Join(policies, ","), policy.MergePolicies(toMerge...)
}
// FilterPolicies - accepts a comma separated list of policy names as a string
// and bucket and returns only policies that currently exist in MinIO. If
// bucketName is non-empty, additionally filters policies matching the bucket.
// The first returned value is the list of currently existing policies, and the
// second is their combined policy definition.
func (store *IAMStoreSys) FilterPolicies(policyName string, bucketName string) (string, policy.Policy) {
cache := store.rlock()
defer store.runlock()
// MergePolicies - accepts a comma separated list of policy names as a string
// and returns only policies that currently exist in MinIO. It includes hot loading
// of policies if not in the memory
func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Policy) {
var policies []string
var missingPolicies []string
var toMerge []policy.Policy
return filterPolicies(cache, policyName, bucketName)
cache := store.rlock()
for _, policy := range newMappedPolicy(policyName).toSlice() {
if policy == "" {
continue
}
p, found := cache.iamPolicyDocsMap[policy]
if !found {
missingPolicies = append(missingPolicies, policy)
continue
}
policies = append(policies, policy)
toMerge = append(toMerge, p.Policy)
}
store.runlock()
if len(missingPolicies) > 0 {
m := make(map[string]PolicyDoc)
for _, policy := range missingPolicies {
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
_ = store.loadPolicyDoc(ctx, policy, m)
cancel()
}
cache := store.lock()
for policy, p := range m {
cache.iamPolicyDocsMap[policy] = p
}
store.unlock()
for policy, p := range m {
policies = append(policies, policy)
toMerge = append(toMerge, p.Policy)
}
}
return strings.Join(policies, ","), policy.MergePolicies(toMerge...)
}
// GetBucketUsers - returns users (not STS or service accounts) that have access
@ -1907,6 +1981,11 @@ func (store *IAMStoreSys) GetAllParentUsers() map[string]ParentUserInfo {
cache := store.rlock()
defer store.runlock()
return store.getParentUsers(cache)
}
// assumes store is locked by caller.
func (store *IAMStoreSys) getParentUsers(cache *iamCache) map[string]ParentUserInfo {
res := map[string]ParentUserInfo{}
for _, ui := range cache.iamUsersMap {
cred := ui.Credentials
@ -1977,50 +2056,104 @@ func (store *IAMStoreSys) GetAllParentUsers() map[string]ParentUserInfo {
return res
}
// Assumes store is locked by caller. If users is empty, returns all user mappings.
func (store *IAMStoreSys) listUserPolicyMappings(cache *iamCache, users []string,
userPredicate func(string) bool,
// GetAllSTSUserMappings - Loads all STS user policy mappings from storage and
// returns them. Also gets any STS users that do not have policy mappings but have
// Service Accounts or STS keys (This is useful if the user is part of a group)
func (store *IAMStoreSys) GetAllSTSUserMappings(userPredicate func(string) bool) (map[string]string, error) {
cache := store.rlock()
defer store.runlock()
stsMap := make(map[string]string)
m := xsync.NewMapOf[string, MappedPolicy]()
if err := store.loadMappedPolicies(context.Background(), stsUser, false, m); err != nil {
return nil, err
}
m.Range(func(user string, mappedPolicy MappedPolicy) bool {
if userPredicate != nil && !userPredicate(user) {
return true
}
stsMap[user] = mappedPolicy.Policies
return true
})
for user := range store.getParentUsers(cache) {
if _, ok := stsMap[user]; !ok {
if userPredicate != nil && !userPredicate(user) {
continue
}
stsMap[user] = ""
}
}
return stsMap, nil
}
// Assumes store is locked by caller. If userMap is empty, returns all user mappings.
func (store *IAMStoreSys) listUserPolicyMappings(cache *iamCache, userMap map[string]set.StringSet,
userPredicate func(string) bool, decodeFunc func(string) string,
) []madmin.UserPolicyEntities {
stsMap := xsync.NewMapOf[string, MappedPolicy]()
resMap := make(map[string]madmin.UserPolicyEntities, len(userMap))
for user, groupSet := range userMap {
// Attempt to load parent user mapping for STS accounts
store.loadMappedPolicy(context.TODO(), user, stsUser, false, stsMap)
decodeUser := user
if decodeFunc != nil {
decodeUser = decodeFunc(user)
}
blankEntities := madmin.UserPolicyEntities{User: decodeUser}
if !groupSet.IsEmpty() {
blankEntities.MemberOfMappings = store.listGroupPolicyMappings(cache, groupSet, nil, decodeFunc)
}
resMap[user] = blankEntities
}
var r []madmin.UserPolicyEntities
usersSet := set.CreateStringSet(users...)
cache.iamUserPolicyMap.Range(func(user string, mappedPolicy MappedPolicy) bool {
if userPredicate != nil && !userPredicate(user) {
return true
}
if !usersSet.IsEmpty() && !usersSet.Contains(user) {
entitiesWithMemberOf, ok := resMap[user]
if !ok {
if len(userMap) > 0 {
return true
}
decodeUser := user
if decodeFunc != nil {
decodeUser = decodeFunc(user)
}
entitiesWithMemberOf = madmin.UserPolicyEntities{User: decodeUser}
}
ps := mappedPolicy.toSlice()
sort.Strings(ps)
r = append(r, madmin.UserPolicyEntities{
User: user,
Policies: ps,
})
entitiesWithMemberOf.Policies = ps
resMap[user] = entitiesWithMemberOf
return true
})
stsMap := xsync.NewMapOf[string, MappedPolicy]()
for _, user := range users {
// Attempt to load parent user mapping for STS accounts
store.loadMappedPolicy(context.TODO(), user, stsUser, false, stsMap)
}
stsMap.Range(func(user string, mappedPolicy MappedPolicy) bool {
if userPredicate != nil && !userPredicate(user) {
return true
}
entitiesWithMemberOf := resMap[user]
ps := mappedPolicy.toSlice()
sort.Strings(ps)
r = append(r, madmin.UserPolicyEntities{
User: user,
Policies: ps,
})
entitiesWithMemberOf.Policies = ps
resMap[user] = entitiesWithMemberOf
return true
})
for _, v := range resMap {
if v.Policies != nil || v.MemberOfMappings != nil {
r = append(r, v)
}
}
sort.Slice(r, func(i, j int) bool {
return r[i].User < r[j].User
})
@ -2029,11 +2162,11 @@ func (store *IAMStoreSys) listUserPolicyMappings(cache *iamCache, users []string
}
// Assumes store is locked by caller. If groups is empty, returns all group mappings.
func (store *IAMStoreSys) listGroupPolicyMappings(cache *iamCache, groups []string,
groupPredicate func(string) bool,
func (store *IAMStoreSys) listGroupPolicyMappings(cache *iamCache, groupsSet set.StringSet,
groupPredicate func(string) bool, decodeFunc func(string) string,
) []madmin.GroupPolicyEntities {
var r []madmin.GroupPolicyEntities
groupsSet := set.CreateStringSet(groups...)
cache.iamGroupPolicyMap.Range(func(group string, mappedPolicy MappedPolicy) bool {
if groupPredicate != nil && !groupPredicate(group) {
return true
@ -2043,10 +2176,15 @@ func (store *IAMStoreSys) listGroupPolicyMappings(cache *iamCache, groups []stri
return true
}
decodeGroup := group
if decodeFunc != nil {
decodeGroup = decodeFunc(group)
}
ps := mappedPolicy.toSlice()
sort.Strings(ps)
r = append(r, madmin.GroupPolicyEntities{
Group: group,
Group: decodeGroup,
Policies: ps,
})
return true
@ -2060,17 +2198,20 @@ func (store *IAMStoreSys) listGroupPolicyMappings(cache *iamCache, groups []stri
}
// Assumes store is locked by caller. If policies is empty, returns all policy mappings.
func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
userPredicate, groupPredicate func(string) bool,
func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, queryPolSet set.StringSet,
userPredicate, groupPredicate func(string) bool, decodeFunc func(string) string,
) []madmin.PolicyEntities {
queryPolSet := set.CreateStringSet(policies...)
policyToUsersMap := make(map[string]set.StringSet)
cache.iamUserPolicyMap.Range(func(user string, mappedPolicy MappedPolicy) bool {
if userPredicate != nil && !userPredicate(user) {
return true
}
decodeUser := user
if decodeFunc != nil {
decodeUser = decodeFunc(user)
}
commonPolicySet := mappedPolicy.policySet()
if !queryPolSet.IsEmpty() {
commonPolicySet = commonPolicySet.Intersection(queryPolSet)
@ -2078,9 +2219,9 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
for _, policy := range commonPolicySet.ToSlice() {
s, ok := policyToUsersMap[policy]
if !ok {
policyToUsersMap[policy] = set.CreateStringSet(user)
policyToUsersMap[policy] = set.CreateStringSet(decodeUser)
} else {
s.Add(user)
s.Add(decodeUser)
policyToUsersMap[policy] = s
}
}
@ -2094,6 +2235,11 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
continue
}
decodeUser := user
if decodeFunc != nil {
decodeUser = decodeFunc(user)
}
var mappedPolicy MappedPolicy
store.loadIAMConfig(context.Background(), &mappedPolicy, getMappedPolicyPath(user, stsUser, false))
@ -2104,9 +2250,9 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
for _, policy := range commonPolicySet.ToSlice() {
s, ok := policyToUsersMap[policy]
if !ok {
policyToUsersMap[policy] = set.CreateStringSet(user)
policyToUsersMap[policy] = set.CreateStringSet(decodeUser)
} else {
s.Add(user)
s.Add(decodeUser)
policyToUsersMap[policy] = s
}
}
@ -2121,6 +2267,11 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
return true
}
decodeUser := user
if decodeFunc != nil {
decodeUser = decodeFunc(user)
}
commonPolicySet := mappedPolicy.policySet()
if !queryPolSet.IsEmpty() {
commonPolicySet = commonPolicySet.Intersection(queryPolSet)
@ -2128,9 +2279,9 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
for _, policy := range commonPolicySet.ToSlice() {
s, ok := policyToUsersMap[policy]
if !ok {
policyToUsersMap[policy] = set.CreateStringSet(user)
policyToUsersMap[policy] = set.CreateStringSet(decodeUser)
} else {
s.Add(user)
s.Add(decodeUser)
policyToUsersMap[policy] = s
}
}
@ -2145,6 +2296,11 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
return true
}
decodeGroup := group
if decodeFunc != nil {
decodeGroup = decodeFunc(group)
}
commonPolicySet := mappedPolicy.policySet()
if !queryPolSet.IsEmpty() {
commonPolicySet = commonPolicySet.Intersection(queryPolSet)
@ -2152,9 +2308,9 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
for _, policy := range commonPolicySet.ToSlice() {
s, ok := policyToGroupsMap[policy]
if !ok {
policyToGroupsMap[policy] = set.CreateStringSet(group)
policyToGroupsMap[policy] = set.CreateStringSet(decodeGroup)
} else {
s.Add(group)
s.Add(decodeGroup)
policyToGroupsMap[policy] = s
}
}
@ -2194,24 +2350,24 @@ func (store *IAMStoreSys) listPolicyMappings(cache *iamCache, policies []string,
}
// ListPolicyMappings - return users/groups mapped to policies.
func (store *IAMStoreSys) ListPolicyMappings(q madmin.PolicyEntitiesQuery,
userPredicate, groupPredicate func(string) bool,
func (store *IAMStoreSys) ListPolicyMappings(q cleanEntitiesQuery,
userPredicate, groupPredicate func(string) bool, decodeFunc func(string) string,
) madmin.PolicyEntitiesResult {
cache := store.rlock()
defer store.runlock()
var result madmin.PolicyEntitiesResult
isAllPoliciesQuery := len(q.Users) == 0 && len(q.Groups) == 0 && len(q.Policy) == 0
isAllPoliciesQuery := len(q.Users) == 0 && len(q.Groups) == 0 && len(q.Policies) == 0
if len(q.Users) > 0 {
result.UserMappings = store.listUserPolicyMappings(cache, q.Users, userPredicate)
result.UserMappings = store.listUserPolicyMappings(cache, q.Users, userPredicate, decodeFunc)
}
if len(q.Groups) > 0 {
result.GroupMappings = store.listGroupPolicyMappings(cache, q.Groups, groupPredicate)
result.GroupMappings = store.listGroupPolicyMappings(cache, q.Groups, groupPredicate, decodeFunc)
}
if len(q.Policy) > 0 || isAllPoliciesQuery {
result.PolicyMappings = store.listPolicyMappings(cache, q.Policy, userPredicate, groupPredicate)
if len(q.Policies) > 0 || isAllPoliciesQuery {
result.PolicyMappings = store.listPolicyMappings(cache, q.Policies, userPredicate, groupPredicate, decodeFunc)
}
return result
}
@ -2638,6 +2794,18 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
}
}
load := len(cache.iamGroupsMap) == 0
if store.getUsersSysType() == LDAPUsersSysType && cache.iamGroupPolicyMap.Size() == 0 {
load = true
}
if load {
if _, err = store.updateGroups(ctx, cache); err != nil {
return "done", err
}
}
cache.buildUserGroupMemberships()
return "done", err
})

View file

@ -217,6 +217,9 @@ func (sys *IAMSys) Load(ctx context.Context, firstTime bool) error {
if firstTime {
bootstrapTraceMsg(fmt.Sprintf("globalIAMSys.Load(): (duration: %s)", loadDuration))
if globalIsDistErasure {
logger.Info("IAM load(startup) finished. (duration: %s)", loadDuration)
}
}
select {
@ -315,6 +318,24 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
break
}
cache := sys.store.lock()
setDefaultCannedPolicies(cache.iamPolicyDocsMap)
sys.store.unlock()
// Load RoleARNs
sys.rolesMap = make(map[arn.ARN]string)
// From OpenID
if riMap := sys.OpenIDConfig.GetRoleInfo(); riMap != nil {
sys.validateAndAddRolePolicyMappings(ctx, riMap)
}
// From AuthN plugin if enabled.
if authn := newGlobalAuthNPluginFn(); authn != nil {
riMap := authn.GetRoleInfo()
sys.validateAndAddRolePolicyMappings(ctx, riMap)
}
// Load IAM data from storage.
for {
if err := sys.Load(retryCtx, true); err != nil {
@ -334,20 +355,6 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc
go sys.periodicRoutines(ctx, refreshInterval)
// Load RoleARNs
sys.rolesMap = make(map[arn.ARN]string)
// From OpenID
if riMap := sys.OpenIDConfig.GetRoleInfo(); riMap != nil {
sys.validateAndAddRolePolicyMappings(ctx, riMap)
}
// From AuthN plugin if enabled.
if authn := newGlobalAuthNPluginFn(); authn != nil {
riMap := authn.GetRoleInfo()
sys.validateAndAddRolePolicyMappings(ctx, riMap)
}
sys.printIAMRoles()
bootstrapTraceMsg("finishing IAM loading")
@ -396,12 +403,12 @@ func (sys *IAMSys) periodicRoutines(ctx context.Context, baseInterval time.Durat
// Load all IAM items (except STS creds) periodically.
refreshStart := time.Now()
if err := sys.Load(ctx, false); err != nil {
iamLogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (took %.2fs): %v", time.Since(refreshStart).Seconds(), err), logger.WarningKind)
iamLogIf(ctx, fmt.Errorf("Failure in periodic refresh for IAM (duration: %s): %v", time.Since(refreshStart), err), logger.WarningKind)
} else {
took := time.Since(refreshStart).Seconds()
if took > maxDurationSecondsForLog {
// Log if we took a lot of time to load.
logger.Info("IAM refresh took %.2fs", took)
logger.Info("IAM refresh took (duration: %s)", took)
}
}
@ -436,7 +443,7 @@ func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[a
// running server by creating the policies after start up.
for arn, rolePolicies := range m {
specifiedPoliciesSet := newMappedPolicy(rolePolicies).policySet()
validPolicies, _ := sys.store.FilterPolicies(rolePolicies, "")
validPolicies, _ := sys.store.MergePolicies(rolePolicies)
knownPoliciesSet := newMappedPolicy(validPolicies).policySet()
unknownPoliciesSet := specifiedPoliciesSet.Difference(knownPoliciesSet)
if len(unknownPoliciesSet) > 0 {
@ -672,7 +679,7 @@ func (sys *IAMSys) CurrentPolicies(policyName string) string {
return ""
}
policies, _ := sys.store.FilterPolicies(policyName, "")
policies, _ := sys.store.MergePolicies(policyName)
return policies
}
@ -786,11 +793,15 @@ func (sys *IAMSys) ListLDAPUsers(ctx context.Context) (map[string]madmin.UserInf
select {
case <-sys.configLoaded:
ldapUsers := make(map[string]madmin.UserInfo)
for user, policy := range sys.store.GetUsersWithMappedPolicies() {
stsMap, err := sys.store.GetAllSTSUserMappings(sys.LDAPConfig.IsLDAPUserDN)
if err != nil {
return nil, err
}
ldapUsers := make(map[string]madmin.UserInfo, len(stsMap))
for user, policy := range stsMap {
ldapUsers[user] = madmin.UserInfo{
PolicyName: policy,
Status: madmin.AccountEnabled,
Status: statusEnabled,
}
}
return ldapUsers, nil
@ -799,6 +810,57 @@ func (sys *IAMSys) ListLDAPUsers(ctx context.Context) (map[string]madmin.UserInf
}
}
type cleanEntitiesQuery struct {
Users map[string]set.StringSet
Groups set.StringSet
Policies set.StringSet
}
// createCleanEntitiesQuery - maps users to their groups and normalizes user or group DNs if ldap.
func (sys *IAMSys) createCleanEntitiesQuery(q madmin.PolicyEntitiesQuery, ldap bool) cleanEntitiesQuery {
cleanQ := cleanEntitiesQuery{
Users: make(map[string]set.StringSet),
Groups: set.CreateStringSet(q.Groups...),
Policies: set.CreateStringSet(q.Policy...),
}
if ldap {
// Validate and normalize users, then fetch and normalize their groups
// Also include unvalidated users for backward compatibility.
for _, user := range q.Users {
lookupRes, actualGroups, _ := sys.LDAPConfig.GetValidatedDNWithGroups(user)
if lookupRes != nil {
groupSet := set.CreateStringSet(actualGroups...)
// duplicates can be overwritten, fetched groups should be identical.
cleanQ.Users[lookupRes.NormDN] = groupSet
}
// Search for non-normalized DN as well for backward compatibility.
if _, ok := cleanQ.Users[user]; !ok {
cleanQ.Users[user] = nil
}
}
// Validate and normalize groups.
for _, group := range q.Groups {
lookupRes, underDN, _ := sys.LDAPConfig.GetValidatedGroupDN(nil, group)
if lookupRes != nil && underDN {
cleanQ.Groups.Add(lookupRes.NormDN)
}
}
} else {
for _, user := range q.Users {
info, err := sys.store.GetUserInfo(user)
var groupSet set.StringSet
if err == nil {
groupSet = set.CreateStringSet(info.MemberOf...)
}
cleanQ.Users[user] = groupSet
}
}
return cleanQ
}
// QueryLDAPPolicyEntities - queries policy associations for LDAP users/groups/policies.
func (sys *IAMSys) QueryLDAPPolicyEntities(ctx context.Context, q madmin.PolicyEntitiesQuery) (*madmin.PolicyEntitiesResult, error) {
if !sys.Initialized() {
@ -811,7 +873,8 @@ func (sys *IAMSys) QueryLDAPPolicyEntities(ctx context.Context, q madmin.PolicyE
select {
case <-sys.configLoaded:
pe := sys.store.ListPolicyMappings(q, sys.LDAPConfig.IsLDAPUserDN, sys.LDAPConfig.IsLDAPGroupDN)
cleanQuery := sys.createCleanEntitiesQuery(q, true)
pe := sys.store.ListPolicyMappings(cleanQuery, sys.LDAPConfig.IsLDAPUserDN, sys.LDAPConfig.IsLDAPGroupDN, sys.LDAPConfig.DecodeDN)
pe.Timestamp = UTCNow()
return &pe, nil
case <-ctx.Done():
@ -885,6 +948,7 @@ func (sys *IAMSys) QueryPolicyEntities(ctx context.Context, q madmin.PolicyEntit
select {
case <-sys.configLoaded:
cleanQuery := sys.createCleanEntitiesQuery(q, false)
var userPredicate, groupPredicate func(string) bool
if sys.LDAPConfig.Enabled() {
userPredicate = func(s string) bool {
@ -894,7 +958,7 @@ func (sys *IAMSys) QueryPolicyEntities(ctx context.Context, q madmin.PolicyEntit
return !sys.LDAPConfig.IsLDAPGroupDN(s)
}
}
pe := sys.store.ListPolicyMappings(q, userPredicate, groupPredicate)
pe := sys.store.ListPolicyMappings(cleanQuery, userPredicate, groupPredicate, nil)
pe.Timestamp = UTCNow()
return &pe, nil
case <-ctx.Done():
@ -1510,11 +1574,11 @@ func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap
// server and is under a configured base DN.
validatedParent, isUnderBaseDN, err := sys.LDAPConfig.GetValidatedUserDN(conn, parent)
if err != nil {
collectedErrors = append(collectedErrors, fmt.Errorf("could not validate `%s` exists in LDAP directory: %w", parent, err))
collectedErrors = append(collectedErrors, fmt.Errorf("could not validate parent exists in LDAP directory: %w", err))
continue
}
if validatedParent == nil || !isUnderBaseDN {
err := fmt.Errorf("DN `%s` was not found in the LDAP directory", parent)
err := fmt.Errorf("DN parent was not found in the LDAP directory")
collectedErrors = append(collectedErrors, err)
continue
}
@ -1529,11 +1593,11 @@ func (sys *IAMSys) NormalizeLDAPAccessKeypairs(ctx context.Context, accessKeyMap
// configured base DN.
validatedGroup, _, err := sys.LDAPConfig.GetValidatedGroupDN(conn, group)
if err != nil {
collectedErrors = append(collectedErrors, fmt.Errorf("could not validate `%s` exists in LDAP directory: %w", group, err))
collectedErrors = append(collectedErrors, fmt.Errorf("could not validate group exists in LDAP directory: %w", err))
continue
}
if validatedGroup == nil {
err := fmt.Errorf("DN `%s` was not found in the LDAP directory", group)
err := fmt.Errorf("DN group was not found in the LDAP directory")
collectedErrors = append(collectedErrors, err)
continue
}
@ -1623,7 +1687,7 @@ func (sys *IAMSys) NormalizeLDAPMappingImport(ctx context.Context, isGroup bool,
continue
}
if validatedDN == nil || !underBaseDN {
err := fmt.Errorf("DN `%s` was not found in the LDAP directory", k)
err := fmt.Errorf("DN was not found in the LDAP directory")
collectedErrors = append(collectedErrors, err)
continue
}
@ -1966,7 +2030,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
if dnResult == nil {
// dn not found - still attempt to detach if provided user is a DN.
if !isAttach && sys.LDAPConfig.IsLDAPUserDN(r.User) {
dn = r.User
dn = sys.LDAPConfig.QuickNormalizeDN(r.User)
} else {
err = errNoSuchUser
return
@ -1983,7 +2047,7 @@ func (sys *IAMSys) PolicyDBUpdateLDAP(ctx context.Context, isAttach bool,
}
if dnResult == nil || !underBaseDN {
if !isAttach {
dn = r.Group
dn = sys.LDAPConfig.QuickNormalizeDN(r.Group)
} else {
err = errNoSuchGroup
return
@ -2118,7 +2182,7 @@ func (sys *IAMSys) IsAllowedServiceAccount(args policy.Args, parentUser string)
var combinedPolicy policy.Policy
// Policies were found, evaluate all of them.
if !isOwnerDerived {
availablePoliciesStr, c := sys.store.FilterPolicies(strings.Join(svcPolicies, ","), "")
availablePoliciesStr, c := sys.store.MergePolicies(strings.Join(svcPolicies, ","))
if availablePoliciesStr == "" {
return false
}
@ -2210,22 +2274,16 @@ func (sys *IAMSys) IsAllowedSTS(args policy.Args, parentUser string) bool {
// 2. Combine the mapped policies into a single combined policy.
var combinedPolicy policy.Policy
// Policies were found, evaluate all of them.
if !isOwnerDerived {
var err error
combinedPolicy, err = sys.store.GetPolicy(strings.Join(policies, ","))
if errors.Is(err, errNoSuchPolicy) {
for _, pname := range policies {
_, err := sys.store.GetPolicy(pname)
if errors.Is(err, errNoSuchPolicy) {
availablePoliciesStr, c := sys.store.MergePolicies(strings.Join(policies, ","))
if availablePoliciesStr == "" {
// all policies presented in the claim should exist
iamLogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", pname, iamPolicyClaimNameOpenID()))
return false
}
}
iamLogIf(GlobalContext, fmt.Errorf("all policies were unexpectedly present!"))
return false
}
iamLogIf(GlobalContext, fmt.Errorf("expected policy (%s) missing from the JWT claim %s, rejecting the request", policies, iamPolicyClaimNameOpenID()))
return false
}
combinedPolicy = c
}
// 3. If an inline session-policy is present, evaluate it.
@ -2346,7 +2404,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
// GetCombinedPolicy returns a combined policy combining all policies
func (sys *IAMSys) GetCombinedPolicy(policies ...string) policy.Policy {
_, policy := sys.store.FilterPolicies(strings.Join(policies, ","), "")
_, policy := sys.store.MergePolicies(strings.Join(policies, ","))
return policy
}

View file

@ -24,10 +24,8 @@ import (
jwtgo "github.com/golang-jwt/jwt/v4"
jwtreq "github.com/golang-jwt/jwt/v4/request"
"github.com/hashicorp/golang-lru/v2/expirable"
"github.com/minio/minio/internal/auth"
xjwt "github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v3/policy"
)
@ -37,8 +35,8 @@ const (
// Default JWT token for web handlers is one day.
defaultJWTExpiry = 24 * time.Hour
// Inter-node JWT token expiry is 15 minutes.
defaultInterNodeJWTExpiry = 15 * time.Minute
// Inter-node JWT token expiry is 100 years approx.
defaultInterNodeJWTExpiry = 100 * 365 * 24 * time.Hour
)
var (
@ -50,17 +48,10 @@ var (
errMalformedAuth = errors.New("Malformed authentication input")
)
type cacheKey struct {
accessKey, secretKey, audience string
}
var cacheLRU = expirable.NewLRU[cacheKey, string](1000, nil, 15*time.Second)
func authenticateNode(accessKey, secretKey, audience string) (string, error) {
func authenticateNode(accessKey, secretKey string) (string, error) {
claims := xjwt.NewStandardClaims()
claims.SetExpiry(UTCNow().Add(defaultInterNodeJWTExpiry))
claims.SetAccessKey(accessKey)
claims.SetAudience(audience)
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, claims)
return jwt.SignedString([]byte(secretKey))
@ -141,27 +132,9 @@ func metricsRequestAuthenticate(req *http.Request) (*xjwt.MapClaims, []string, b
return claims, groups, owner, nil
}
// newCachedAuthToken returns a token that is cached up to 15 seconds.
// If globalActiveCred is updated it is reflected at once.
func newCachedAuthToken() func(audience string) string {
fn := func(accessKey, secretKey, audience string) (s string, err error) {
k := cacheKey{accessKey: accessKey, secretKey: secretKey, audience: audience}
var ok bool
s, ok = cacheLRU.Get(k)
if !ok {
s, err = authenticateNode(accessKey, secretKey, audience)
if err != nil {
return "", err
}
cacheLRU.Add(k, s)
}
return s, nil
}
return func(audience string) string {
cred := globalActiveCred
token, err := fn(cred.AccessKey, cred.SecretKey, audience)
logger.CriticalIf(GlobalContext, err)
return token
// newCachedAuthToken returns the cached token.
func newCachedAuthToken() func() string {
return func() string {
return globalNodeAuthToken
}
}

View file

@ -107,7 +107,7 @@ func BenchmarkParseJWTStandardClaims(b *testing.B) {
}
creds := globalActiveCred
token, err := authenticateNode(creds.AccessKey, creds.SecretKey, "")
token, err := authenticateNode(creds.AccessKey, creds.SecretKey)
if err != nil {
b.Fatal(err)
}
@ -138,7 +138,7 @@ func BenchmarkParseJWTMapClaims(b *testing.B) {
}
creds := globalActiveCred
token, err := authenticateNode(creds.AccessKey, creds.SecretKey, "")
token, err := authenticateNode(creds.AccessKey, creds.SecretKey)
if err != nil {
b.Fatal(err)
}
@ -176,7 +176,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
fn(creds.AccessKey, creds.SecretKey, "aud")
fn(creds.AccessKey, creds.SecretKey)
}
})
b.Run("cached", func(b *testing.B) {
@ -184,7 +184,7 @@ func BenchmarkAuthenticateNode(b *testing.B) {
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
fn("aud")
fn()
}
})
}

View file

@ -24,6 +24,7 @@ import (
"github.com/minio/kms-go/kes"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v3/policy"
@ -56,7 +57,7 @@ func (a kmsAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Request)
writeSuccessResponseJSON(w, resp)
}
// KMSMetricsHandler - POST /minio/kms/v1/metrics
// KMSMetricsHandler - GET /minio/kms/v1/metrics
func (a kmsAPIHandlers) KMSMetricsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSMetrics")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
@ -83,7 +84,7 @@ func (a kmsAPIHandlers) KMSMetricsHandler(w http.ResponseWriter, r *http.Request
}
}
// KMSAPIsHandler - POST /minio/kms/v1/apis
// KMSAPIsHandler - GET /minio/kms/v1/apis
func (a kmsAPIHandlers) KMSAPIsHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSAPIs")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
@ -114,7 +115,7 @@ type versionResponse struct {
Version string `json:"version"`
}
// KMSVersionHandler - POST /minio/kms/v1/version
// KMSVersionHandler - GET /minio/kms/v1/version
func (a kmsAPIHandlers) KMSVersionHandler(w http.ResponseWriter, r *http.Request) {
ctx := newContext(r, w, "KMSVersion")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
@ -159,7 +160,20 @@ func (a kmsAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Reque
return
}
if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{Name: r.Form.Get("key-id")}); err != nil {
keyID := r.Form.Get("key-id")
// Ensure policy allows the user to create this key name
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if !checkKMSActionAllowed(r, owner, cred, policy.KMSCreateKeyAction, keyID) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{Name: keyID}); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -171,6 +185,9 @@ func (a kmsAPIHandlers) KMSListKeysHandler(w http.ResponseWriter, r *http.Reques
ctx := newContext(r, w, "KMSListKeys")
defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r))
// This only checks if the action (kms:ListKeys) is allowed, it does not check
// each key name against the policy's Resources. We check that below, once
// we have the list of key names from the KMS.
objectAPI, _ := validateAdminReq(ctx, w, r, policy.KMSListKeysAction)
if objectAPI == nil {
return
@ -180,7 +197,7 @@ func (a kmsAPIHandlers) KMSListKeysHandler(w http.ResponseWriter, r *http.Reques
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrKMSNotConfigured), r.URL)
return
}
names, _, err := GlobalKMS.ListKeyNames(ctx, &kms.ListRequest{
allKeyNames, _, err := GlobalKMS.ListKeyNames(ctx, &kms.ListRequest{
Prefix: r.Form.Get("pattern"),
})
if err != nil {
@ -188,8 +205,24 @@ func (a kmsAPIHandlers) KMSListKeysHandler(w http.ResponseWriter, r *http.Reques
return
}
values := make([]kes.KeyInfo, 0, len(names))
for _, name := range names {
// Get the cred and owner for checking authz below.
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
// Now we have all the key names, for each of them, check whether the policy grants permission for
// the user to list it.
keyNames := []string{}
for _, name := range allKeyNames {
if checkKMSActionAllowed(r, owner, cred, policy.KMSListKeysAction, name) {
keyNames = append(keyNames, name)
}
}
values := make([]kes.KeyInfo, 0, len(keyNames))
for _, name := range keyNames {
values = append(values, kes.KeyInfo{
Name: name,
})
@ -224,6 +257,17 @@ func (a kmsAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Reque
KeyID: keyID,
}
// Ensure policy allows the user to get this key's status
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
if s3Err != ErrNone {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
if !checkKMSActionAllowed(r, owner, cred, policy.KMSKeyStatusAction, keyID) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
return
}
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: keyID, AssociatedData: kmsContext})
@ -274,3 +318,16 @@ func (a kmsAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Reque
}
writeSuccessResponseJSON(w, resp)
}
// checkKMSActionAllowed checks for authorization for a specific action on a resource.
func checkKMSActionAllowed(r *http.Request, owner bool, cred auth.Credentials, action policy.KMSAction, resource string) bool {
return globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.Action(action),
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
Claims: cred.Claims,
BucketName: resource, // overloading BucketName as that's what the policy engine uses to assemble a Resource.
})
}

851
cmd/kms-handlers_test.go Normal file
View file

@ -0,0 +1,851 @@
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/kms"
"github.com/minio/pkg/v3/policy"
)
const (
// KMS API paths
// For example: /minio/kms/v1/key/list?pattern=*
kmsURL = kmsPathPrefix + kmsAPIVersionPrefix
kmsStatusPath = kmsURL + "/status"
kmsMetricsPath = kmsURL + "/metrics"
kmsAPIsPath = kmsURL + "/apis"
kmsVersionPath = kmsURL + "/version"
kmsKeyCreatePath = kmsURL + "/key/create"
kmsKeyListPath = kmsURL + "/key/list"
kmsKeyStatusPath = kmsURL + "/key/status"
// Admin API paths
// For example: /minio/admin/v3/kms/status
adminURL = adminPathPrefix + adminAPIVersionPrefix
kmsAdminStatusPath = adminURL + "/kms/status"
kmsAdminKeyStatusPath = adminURL + "/kms/key/status"
kmsAdminKeyCreate = adminURL + "/kms/key/create"
)
const (
userAccessKey = "miniofakeuseraccesskey"
userSecretKey = "miniofakeusersecret"
)
type kmsTestCase struct {
name string
method string
path string
query map[string]string
// User credentials and policy for request
policy string
asRoot bool
// Wanted in response.
wantStatusCode int
wantKeyNames []string
wantResp []string
}
func TestKMSHandlersCreateKey(t *testing.T) {
adminTestBed, tearDown := setupKMSTest(t, true)
defer tearDown()
tests := []kmsTestCase{
// Create key test
{
name: "create key as user with no policy want forbidden",
method: http.MethodPost,
path: kmsKeyCreatePath,
query: map[string]string{"key-id": "new-test-key"},
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "create key as user with no resources specified want success",
method: http.MethodPost,
path: kmsKeyCreatePath,
query: map[string]string{"key-id": "new-test-key"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:CreateKey"] }`,
wantStatusCode: http.StatusOK,
},
{
name: "create key as user set policy to allow want success",
method: http.MethodPost,
path: kmsKeyCreatePath,
query: map[string]string{"key-id": "second-new-test-key"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:CreateKey"],
"Resource": ["arn:minio:kms:::second-new-test-*"] }`,
wantStatusCode: http.StatusOK,
},
{
name: "create key as user set policy to non matching resource want forbidden",
method: http.MethodPost,
path: kmsKeyCreatePath,
query: map[string]string{"key-id": "third-new-test-key"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:CreateKey"],
"Resource": ["arn:minio:kms:::non-matching-key-name"] }`,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
}
for testNum, test := range tests {
t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) {
execKMSTest(t, test, adminTestBed)
})
}
}
func TestKMSHandlersKeyStatus(t *testing.T) {
adminTestBed, tearDown := setupKMSTest(t, true)
defer tearDown()
tests := []kmsTestCase{
{
name: "create a first key root user",
method: http.MethodPost,
path: kmsKeyCreatePath,
query: map[string]string{"key-id": "abc-test-key"},
asRoot: true,
wantStatusCode: http.StatusOK,
},
{
name: "key status as root want success",
method: http.MethodGet,
path: kmsKeyStatusPath,
query: map[string]string{"key-id": "abc-test-key"},
asRoot: true,
wantStatusCode: http.StatusOK,
wantResp: []string{"abc-test-key"},
},
{
name: "key status as user no policy want forbidden",
method: http.MethodGet,
path: kmsKeyStatusPath,
query: map[string]string{"key-id": "abc-test-key"},
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "key status as user legacy no resources specified want success",
method: http.MethodGet,
path: kmsKeyStatusPath,
query: map[string]string{"key-id": "abc-test-key"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:KeyStatus"] }`,
wantStatusCode: http.StatusOK,
wantResp: []string{"abc-test-key"},
},
{
name: "key status as user set policy to allow only one key",
method: http.MethodGet,
path: kmsKeyStatusPath,
query: map[string]string{"key-id": "abc-test-key"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:KeyStatus"],
"Resource": ["arn:minio:kms:::abc-test-*"] }`,
wantStatusCode: http.StatusOK,
wantResp: []string{"abc-test-key"},
},
{
name: "key status as user set policy to allow non-matching key",
method: http.MethodGet,
path: kmsKeyStatusPath,
query: map[string]string{"key-id": "abc-test-key"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:KeyStatus"],
"Resource": ["arn:minio:kms:::xyz-test-key"] }`,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
}
for testNum, test := range tests {
t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) {
execKMSTest(t, test, adminTestBed)
})
}
}
func TestKMSHandlersAPIs(t *testing.T) {
adminTestBed, tearDown := setupKMSTest(t, true)
defer tearDown()
tests := []kmsTestCase{
// Version test
{
name: "version as root want success",
method: http.MethodGet,
path: kmsVersionPath,
asRoot: true,
wantStatusCode: http.StatusOK,
wantResp: []string{"version"},
},
{
name: "version as user with no policy want forbidden",
method: http.MethodGet,
path: kmsVersionPath,
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "version as user with policy ignores resource want success",
method: http.MethodGet,
path: kmsVersionPath,
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:Version"],
"Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`,
wantStatusCode: http.StatusOK,
wantResp: []string{"version"},
},
// APIs test
{
name: "apis as root want success",
method: http.MethodGet,
path: kmsAPIsPath,
asRoot: true,
wantStatusCode: http.StatusOK,
wantResp: []string{"stub/path"},
},
{
name: "apis as user with no policy want forbidden",
method: http.MethodGet,
path: kmsAPIsPath,
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "apis as user with policy ignores resource want success",
method: http.MethodGet,
path: kmsAPIsPath,
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:API"],
"Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`,
wantStatusCode: http.StatusOK,
wantResp: []string{"stub/path"},
},
// Metrics test
{
name: "metrics as root want success",
method: http.MethodGet,
path: kmsMetricsPath,
asRoot: true,
wantStatusCode: http.StatusOK,
wantResp: []string{"kms"},
},
{
name: "metrics as user with no policy want forbidden",
method: http.MethodGet,
path: kmsMetricsPath,
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "metrics as user with policy ignores resource want success",
method: http.MethodGet,
path: kmsMetricsPath,
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:Metrics"],
"Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`,
wantStatusCode: http.StatusOK,
wantResp: []string{"kms"},
},
// Status tests
{
name: "status as root want success",
method: http.MethodGet,
path: kmsStatusPath,
asRoot: true,
wantStatusCode: http.StatusOK,
wantResp: []string{"MinIO builtin"},
},
{
name: "status as user with no policy want forbidden",
method: http.MethodGet,
path: kmsStatusPath,
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "status as user with policy ignores resource want success",
method: http.MethodGet,
path: kmsStatusPath,
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:Status"],
"Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"]}`,
wantStatusCode: http.StatusOK,
wantResp: []string{"MinIO builtin"},
},
}
for testNum, test := range tests {
t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) {
execKMSTest(t, test, adminTestBed)
})
}
}
func TestKMSHandlersListKeys(t *testing.T) {
adminTestBed, tearDown := setupKMSTest(t, true)
defer tearDown()
tests := []kmsTestCase{
{
name: "create a first key root user",
method: http.MethodPost,
path: kmsKeyCreatePath,
query: map[string]string{"key-id": "abc-test-key"},
asRoot: true,
wantStatusCode: http.StatusOK,
},
{
name: "create a second key root user",
method: http.MethodPost,
path: kmsKeyCreatePath,
query: map[string]string{"key-id": "xyz-test-key"},
asRoot: true,
wantStatusCode: http.StatusOK,
},
// List keys tests
{
name: "list keys as root want all to be returned",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "*"},
asRoot: true,
wantStatusCode: http.StatusOK,
wantKeyNames: []string{"default-test-key", "abc-test-key", "xyz-test-key"},
},
{
name: "list keys as user with no policy want forbidden",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "*"},
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "list keys as user with no resources specified want success",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "*"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:ListKeys"]
}`,
wantStatusCode: http.StatusOK,
wantKeyNames: []string{"default-test-key", "abc-test-key", "xyz-test-key"},
},
{
name: "list keys as user set policy resource to allow only one key",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "*"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:ListKeys"],
"Resource": ["arn:minio:kms:::abc*"]}`,
wantStatusCode: http.StatusOK,
wantKeyNames: []string{"abc-test-key"},
},
{
name: "list keys as user set policy to allow only one key, use pattern that includes correct key",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "abc*"},
policy: `{"Effect": "Allow",
"Action": ["kms:ListKeys"],
"Resource": ["arn:minio:kms:::abc*"]}`,
wantStatusCode: http.StatusOK,
wantKeyNames: []string{"abc-test-key"},
},
{
name: "list keys as user set policy to allow only one key, use pattern that excludes correct key",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "xyz*"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:ListKeys"],
"Resource": ["arn:minio:kms:::abc*"]}`,
wantStatusCode: http.StatusOK,
wantKeyNames: []string{},
},
{
name: "list keys as user set policy that has no matching key resources",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "*"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["kms:ListKeys"],
"Resource": ["arn:minio:kms:::nonematch*"]}`,
wantStatusCode: http.StatusOK,
wantKeyNames: []string{},
},
{
name: "list keys as user set policy that allows listing but denies specific keys",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "*"},
asRoot: false,
// It looks like this should allow listing any key that isn't "default-test-key", however
// the policy engine matches all Deny statements first, without regard to Resources (for KMS).
// This is for backwards compatibility where historically KMS statements ignored Resources.
policy: `{
"Effect": "Allow",
"Action": ["kms:ListKeys"]
},{
"Effect": "Deny",
"Action": ["kms:ListKeys"],
"Resource": ["arn:minio:kms:::default-test-key"]
}`,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
}
for testNum, test := range tests {
t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) {
execKMSTest(t, test, adminTestBed)
})
}
}
func TestKMSHandlerAdminAPI(t *testing.T) {
adminTestBed, tearDown := setupKMSTest(t, true)
defer tearDown()
tests := []kmsTestCase{
// Create key tests
{
name: "create a key root user",
method: http.MethodPost,
path: kmsAdminKeyCreate,
query: map[string]string{"key-id": "abc-test-key"},
asRoot: true,
wantStatusCode: http.StatusOK,
},
{
name: "create key as user with no policy want forbidden",
method: http.MethodPost,
path: kmsAdminKeyCreate,
query: map[string]string{"key-id": "new-test-key"},
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "create key as user with no resources specified want success",
method: http.MethodPost,
path: kmsAdminKeyCreate,
query: map[string]string{"key-id": "new-test-key"},
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["admin:KMSCreateKey"] }`,
wantStatusCode: http.StatusOK,
},
{
name: "create key as user set policy to non matching resource want success",
method: http.MethodPost,
path: kmsAdminKeyCreate,
query: map[string]string{"key-id": "third-new-test-key"},
asRoot: false,
// Admin actions ignore Resources
policy: `{"Effect": "Allow",
"Action": ["admin:KMSCreateKey"],
"Resource": ["arn:minio:kms:::this-is-disregarded"] }`,
wantStatusCode: http.StatusOK,
},
// Status tests
{
name: "status as root want success",
method: http.MethodPost,
path: kmsAdminStatusPath,
asRoot: true,
wantStatusCode: http.StatusOK,
wantResp: []string{"MinIO builtin"},
},
{
name: "status as user with no policy want forbidden",
method: http.MethodPost,
path: kmsAdminStatusPath,
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "status as user with policy ignores resource want success",
method: http.MethodPost,
path: kmsAdminStatusPath,
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["admin:KMSKeyStatus"],
"Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`,
wantStatusCode: http.StatusOK,
wantResp: []string{"MinIO builtin"},
},
// Key status tests
{
name: "key status as root want success",
method: http.MethodGet,
path: kmsAdminKeyStatusPath,
asRoot: true,
wantStatusCode: http.StatusOK,
wantResp: []string{"key-id"},
},
{
name: "key status as user with no policy want forbidden",
method: http.MethodGet,
path: kmsAdminKeyStatusPath,
asRoot: false,
wantStatusCode: http.StatusForbidden,
wantResp: []string{"AccessDenied"},
},
{
name: "key status as user with policy ignores resource want success",
method: http.MethodGet,
path: kmsAdminKeyStatusPath,
asRoot: false,
policy: `{"Effect": "Allow",
"Action": ["admin:KMSKeyStatus"],
"Resource": ["arn:minio:kms:::does-not-matter-it-is-ignored"] }`,
wantStatusCode: http.StatusOK,
wantResp: []string{"key-id"},
},
}
for testNum, test := range tests {
t.Run(fmt.Sprintf("%d %s", testNum+1, test.name), func(t *testing.T) {
execKMSTest(t, test, adminTestBed)
})
}
}
// execKMSTest runs a single test case for KMS handlers
func execKMSTest(t *testing.T, test kmsTestCase, adminTestBed *adminErasureTestBed) {
var accessKey, secretKey string
if test.asRoot {
accessKey, secretKey = globalActiveCred.AccessKey, globalActiveCred.SecretKey
} else {
setupKMSUser(t, userAccessKey, userSecretKey, test.policy)
accessKey = userAccessKey
secretKey = userSecretKey
}
req := buildKMSRequest(t, test.method, test.path, accessKey, secretKey, test.query)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
t.Logf("HTTP req: %s, resp code: %d, resp body: %s", req.URL.String(), rec.Code, rec.Body.String())
// Check status code
if rec.Code != test.wantStatusCode {
t.Errorf("want status code %d, got %d", test.wantStatusCode, rec.Code)
}
// Check returned key list is correct
if test.wantKeyNames != nil {
gotKeyNames := keyNamesFromListKeysResp(t, rec.Body.Bytes())
if len(test.wantKeyNames) != len(gotKeyNames) {
t.Fatalf("want keys len: %d, got len: %d", len(test.wantKeyNames), len(gotKeyNames))
}
for i, wantKeyName := range test.wantKeyNames {
if gotKeyNames[i] != wantKeyName {
t.Fatalf("want key name %s, in position %d, got %s", wantKeyName, i, gotKeyNames[i])
}
}
}
// Check generic text in the response
if test.wantResp != nil {
for _, want := range test.wantResp {
if !strings.Contains(rec.Body.String(), want) {
t.Fatalf("want response to contain %s, got %s", want, rec.Body.String())
}
}
}
}
// TestKMSHandlerNotConfiguredOrInvalidCreds tests KMS handlers for situations where KMS is not configured
// or invalid credentials are provided.
func TestKMSHandlerNotConfiguredOrInvalidCreds(t *testing.T) {
adminTestBed, tearDown := setupKMSTest(t, false)
defer tearDown()
tests := []struct {
name string
method string
path string
query map[string]string
}{
{
name: "GET status",
method: http.MethodGet,
path: kmsStatusPath,
},
{
name: "GET metrics",
method: http.MethodGet,
path: kmsMetricsPath,
},
{
name: "GET apis",
method: http.MethodGet,
path: kmsAPIsPath,
},
{
name: "GET version",
method: http.MethodGet,
path: kmsVersionPath,
},
{
name: "POST key create",
method: http.MethodPost,
path: kmsKeyCreatePath,
query: map[string]string{"key-id": "master-key-id"},
},
{
name: "GET key list",
method: http.MethodGet,
path: kmsKeyListPath,
query: map[string]string{"pattern": "*"},
},
{
name: "GET key status",
method: http.MethodGet,
path: kmsKeyStatusPath,
query: map[string]string{"key-id": "master-key-id"},
},
}
// Test when the GlobalKMS is not configured
for _, test := range tests {
t.Run(test.name+" not configured", func(t *testing.T) {
req := buildKMSRequest(t, test.method, test.path, "", "", test.query)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusNotImplemented {
t.Errorf("want status code %d, got %d", http.StatusNotImplemented, rec.Code)
}
})
}
// Test when the GlobalKMS is configured but the credentials are invalid
GlobalKMS = kms.NewStub("default-test-key")
for _, test := range tests {
t.Run(test.name+" invalid credentials", func(t *testing.T) {
req := buildKMSRequest(t, test.method, test.path, userAccessKey, userSecretKey, test.query)
rec := httptest.NewRecorder()
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusForbidden {
t.Errorf("want status code %d, got %d", http.StatusForbidden, rec.Code)
}
})
}
}
func setupKMSTest(t *testing.T, enableKMS bool) (*adminErasureTestBed, func()) {
adminTestBed, err := prepareAdminErasureTestBed(context.Background())
if err != nil {
t.Fatal(err)
}
registerKMSRouter(adminTestBed.router)
if enableKMS {
GlobalKMS = kms.NewStub("default-test-key")
}
tearDown := func() {
adminTestBed.TearDown()
GlobalKMS = nil
}
return adminTestBed, tearDown
}
func buildKMSRequest(t *testing.T, method, path, accessKey, secretKey string, query map[string]string) *http.Request {
if len(query) > 0 {
queryVal := url.Values{}
for k, v := range query {
queryVal.Add(k, v)
}
path = path + "?" + queryVal.Encode()
}
if accessKey == "" && secretKey == "" {
accessKey = globalActiveCred.AccessKey
secretKey = globalActiveCred.SecretKey
}
req, err := newTestSignedRequestV4(method, path, 0, nil, accessKey, secretKey, nil)
if err != nil {
t.Fatal(err)
}
return req
}
// setupKMSUser is a test helper that creates a new user with the provided access key and secret key
// and applies the given policy to the user.
func setupKMSUser(t *testing.T, accessKey, secretKey, p string) {
ctx := context.Background()
createUserParams := madmin.AddOrUpdateUserReq{
SecretKey: secretKey,
Status: madmin.AccountEnabled,
}
_, err := globalIAMSys.CreateUser(ctx, accessKey, createUserParams)
if err != nil {
t.Fatal(err)
}
testKMSPolicyName := "testKMSPolicy"
if p != "" {
p = `{"Version":"2012-10-17","Statement":[` + p + `]}`
policyData, err := policy.ParseConfig(strings.NewReader(p))
if err != nil {
t.Fatal(err)
}
_, err = globalIAMSys.SetPolicy(ctx, testKMSPolicyName, *policyData)
if err != nil {
t.Fatal(err)
}
_, err = globalIAMSys.PolicyDBSet(ctx, accessKey, testKMSPolicyName, regUser, false)
if err != nil {
t.Fatal(err)
}
} else {
err = globalIAMSys.DeletePolicy(ctx, testKMSPolicyName, false)
if err != nil {
t.Fatal(err)
}
_, err = globalIAMSys.PolicyDBSet(ctx, accessKey, "", regUser, false)
if err != nil {
t.Fatal(err)
}
}
}
func keyNamesFromListKeysResp(t *testing.T, b []byte) []string {
var keyInfos []madmin.KMSKeyInfo
err := json.Unmarshal(b, &keyInfos)
if err != nil {
t.Fatalf("cannot unmarshal '%s', err: %v", b, err)
}
var gotKeyNames []string
for _, keyInfo := range keyInfos {
gotKeyNames = append(gotKeyNames, keyInfo.Name)
}
return gotKeyNames
}

View file

@ -1,117 +0,0 @@
// Copyright (c) 2015-2023 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"context"
"fmt"
"math/rand"
"time"
"github.com/tidwall/gjson"
)
const (
licUpdateCycle = 24 * time.Hour * 30
licRenewPath = "/api/cluster/renew-license"
)
// initlicenseUpdateJob start the periodic license update job in the background.
func initLicenseUpdateJob(ctx context.Context, objAPI ObjectLayer) {
go func() {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
// Leader node (that successfully acquires the lock inside licenceUpdaterLoop)
// will keep performing the license update. If the leader goes down for some
// reason, the lock will be released and another node will acquire it and
// take over because of this loop.
for {
licenceUpdaterLoop(ctx, objAPI)
// license update stopped for some reason.
// sleep for some time and try again.
duration := time.Duration(r.Float64() * float64(time.Hour))
if duration < time.Second {
// Make sure to sleep at least a second to avoid high CPU ticks.
duration = time.Second
}
time.Sleep(duration)
}
}()
}
func licenceUpdaterLoop(ctx context.Context, objAPI ObjectLayer) {
ctx, cancel := globalLeaderLock.GetLock(ctx)
defer cancel()
licenseUpdateTimer := time.NewTimer(licUpdateCycle)
defer licenseUpdateTimer.Stop()
for {
select {
case <-ctx.Done():
return
case <-licenseUpdateTimer.C:
if globalSubnetConfig.Registered() {
performLicenseUpdate(ctx, objAPI)
}
// Reset the timer for next cycle.
licenseUpdateTimer.Reset(licUpdateCycle)
}
}
}
func performLicenseUpdate(ctx context.Context, objectAPI ObjectLayer) {
// the subnet license renewal api renews the license only
// if required e.g. when it is expiring soon
url := globalSubnetConfig.BaseURL + licRenewPath
resp, err := globalSubnetConfig.Post(url, nil)
if err != nil {
subnetLogIf(ctx, fmt.Errorf("error from %s: %w", url, err))
return
}
r := gjson.Parse(resp).Get("license_v2")
if r.Index == 0 {
internalLogIf(ctx, fmt.Errorf("license not found in response from %s", url))
return
}
lic := r.String()
if lic == globalSubnetConfig.License {
// license hasn't changed.
return
}
kv := "subnet license=" + lic
result, err := setConfigKV(ctx, objectAPI, []byte(kv))
if err != nil {
internalLogIf(ctx, fmt.Errorf("error setting subnet license config: %w", err))
return
}
if result.Dynamic {
if err := applyDynamicConfigForSubSys(GlobalContext, objectAPI, result.Cfg, result.SubSys); err != nil {
subnetLogIf(ctx, fmt.Errorf("error applying subnet dynamic config: %w", err))
return
}
globalNotificationSys.SignalConfigReload(result.SubSys)
}
}

View file

@ -44,7 +44,7 @@ func createLockTestServer(ctx context.Context, t *testing.T) (string, *lockRESTS
},
}
creds := globalActiveCred
token, err := authenticateNode(creds.AccessKey, creds.SecretKey, "")
token, err := authenticateNode(creds.AccessKey, creds.SecretKey)
if err != nil {
t.Fatal(err)
}

View file

@ -184,10 +184,6 @@ func etcdLogOnceIf(ctx context.Context, err error, id string, errKind ...interfa
logger.LogOnceIf(ctx, "etcd", err, id, errKind...)
}
func subnetLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "subnet", err, errKind...)
}
func metricsLogIf(ctx context.Context, err error, errKind ...interface{}) {
logger.LogIf(ctx, "metrics", err, errKind...)
}

View file

@ -532,6 +532,9 @@ func (m *metaCacheEntriesSorted) fileInfoVersions(bucket, prefix, delimiter, aft
}
for _, version := range fiVersions {
if !version.VersionPurgeStatus().Empty() {
continue
}
versioned := vcfg != nil && vcfg.Versioned(entry.name)
versions = append(versions, version.ToObjectInfo(bucket, entry.name, versioned))
}
@ -593,7 +596,7 @@ func (m *metaCacheEntriesSorted) fileInfos(bucket, prefix, delimiter string) (ob
}
fi, err := entry.fileInfo(bucket)
if err == nil {
if err == nil && fi.VersionPurgeStatus().Empty() {
versioned := vcfg != nil && vcfg.Versioned(entry.name)
objects = append(objects, fi.ToObjectInfo(bucket, entry.name, versioned))
}

View file

@ -153,19 +153,7 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
} else {
// Continue listing
o.ID = c.id
go func(meta metacache) {
// Continuously update while we wait.
t := time.NewTicker(metacacheMaxClientWait / 10)
defer t.Stop()
select {
case <-ctx.Done():
// Request is done, stop updating.
return
case <-t.C:
meta.lastHandout = time.Now()
meta, _ = rpc.UpdateMetacacheListing(ctx, meta)
}
}(*c)
go c.keepAlive(ctx, rpc)
}
}
}
@ -219,6 +207,9 @@ func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (
o.ID = ""
}
if contextCanceled(ctx) {
return entries, ctx.Err()
}
// Do listing in-place.
// Create output for our results.
// Create filter for results.
@ -449,5 +440,10 @@ func (z *erasureServerPools) listAndSave(ctx context.Context, o *listPathOptions
xioutil.SafeClose(saveCh)
}()
return filteredResults()
entries, err = filteredResults()
if err == nil {
// Check if listing recorded an error.
err = meta.getErr()
}
return entries, err
}

View file

@ -805,6 +805,17 @@ func (m *metaCacheRPC) setErr(err string) {
*m.meta = meta
}
// getErr will return an error if the listing failed.
// The error is not type safe.
func (m *metaCacheRPC) getErr() error {
m.mu.Lock()
defer m.mu.Unlock()
if m.meta.status == scanStateError {
return errors.New(m.meta.error)
}
return nil
}
func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCacheRPC, entries <-chan metaCacheEntry) (err error) {
o := mc.o
o.debugf(color.Green("saveMetaCacheStream:")+" with options: %#v", o)

View file

@ -24,6 +24,8 @@ import (
"path"
"strings"
"time"
"github.com/minio/pkg/v3/console"
)
type scanStatus uint8
@ -97,6 +99,37 @@ func (m *metacache) worthKeeping() bool {
return true
}
// keepAlive will continuously update lastHandout until ctx is canceled.
func (m metacache) keepAlive(ctx context.Context, rpc *peerRESTClient) {
// we intentionally operate on a copy of m, so we can update without locks.
t := time.NewTicker(metacacheMaxClientWait / 10)
defer t.Stop()
for {
select {
case <-ctx.Done():
// Request is done, stop updating.
return
case <-t.C:
m.lastHandout = time.Now()
if m2, err := rpc.UpdateMetacacheListing(ctx, m); err == nil {
if m2.status != scanStateStarted {
if serverDebugLog {
console.Debugln("returning", m.id, "due to scan state", m2.status, time.Now().Format(time.RFC3339))
}
return
}
m = m2
if serverDebugLog {
console.Debugln("refreshed", m.id, time.Now().Format(time.RFC3339))
}
} else if serverDebugLog {
console.Debugln("error refreshing", m.id, time.Now().Format(time.RFC3339))
}
}
}
}
// baseDirFromPrefix will return the base directory given an object path.
// For example an object with name prefix/folder/object.ext will return `prefix/folder/`.
func baseDirFromPrefix(prefix string) string {
@ -116,13 +149,17 @@ func baseDirFromPrefix(prefix string) string {
// update cache with new status.
// The updates are conditional so multiple callers can update with different states.
func (m *metacache) update(update metacache) {
m.lastUpdate = UTCNow()
now := UTCNow()
m.lastUpdate = now
if m.lastHandout.After(m.lastHandout) {
m.lastHandout = UTCNow()
if update.lastHandout.After(m.lastHandout) {
m.lastHandout = update.lastUpdate
if m.lastHandout.After(now) {
m.lastHandout = now
}
}
if m.status == scanStateStarted && update.status == scanStateSuccess {
m.ended = UTCNow()
m.ended = now
}
if m.status == scanStateStarted && update.status != scanStateStarted {
@ -138,7 +175,7 @@ func (m *metacache) update(update metacache) {
if m.error == "" && update.error != "" {
m.error = update.error
m.status = scanStateError
m.ended = UTCNow()
m.ended = now
}
m.fileNotFound = m.fileNotFound || update.fileNotFound
}

View file

@ -39,6 +39,7 @@ const (
// Standard env prometheus auth type
const (
EnvPrometheusAuthType = "MINIO_PROMETHEUS_AUTH_TYPE"
EnvPrometheusOpenMetrics = "MINIO_PROMETHEUS_OPEN_METRICS"
)
type prometheusAuthType string
@ -58,14 +59,15 @@ func registerMetricsRouter(router *mux.Router) {
if authType == prometheusPublic {
auth = NoAuthMiddleware
}
metricsRouter.Handle(prometheusMetricsPathLegacy, auth(metricsHandler()))
metricsRouter.Handle(prometheusMetricsV2ClusterPath, auth(metricsServerHandler()))
metricsRouter.Handle(prometheusMetricsV2BucketPath, auth(metricsBucketHandler()))
metricsRouter.Handle(prometheusMetricsV2NodePath, auth(metricsNodeHandler()))
metricsRouter.Handle(prometheusMetricsV2ResourcePath, auth(metricsResourceHandler()))
// Metrics v3!
metricsV3Server := newMetricsV3Server(authType)
// Metrics v3
metricsV3Server := newMetricsV3Server(auth)
// Register metrics v3 handler. It also accepts an optional query
// parameter `?list` - see handler for details.

View file

@ -139,7 +139,7 @@ var (
)
// loadClusterUsageBucketMetrics - `MetricsLoaderFn` to load bucket usage metrics.
func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metricsCache, buckets []string) error {
func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metricsCache) error {
dataUsageInfo, err := c.dataUsageInfo.Get()
if err != nil {
metricsLogIf(ctx, err)
@ -153,11 +153,7 @@ func loadClusterUsageBucketMetrics(ctx context.Context, m MetricValues, c *metri
m.Set(usageSinceLastUpdateSeconds, float64(time.Since(dataUsageInfo.LastUpdate)))
for _, bucket := range buckets {
usage, ok := dataUsageInfo.BucketsUsage[bucket]
if !ok {
continue
}
for bucket, usage := range dataUsageInfo.BucketsUsage {
quota, err := globalBucketQuotaSys.Get(ctx, bucket)
if err != nil {
// Log and continue if we are unable to retrieve metrics for this

View file

@ -23,9 +23,12 @@ import (
"net/http"
"slices"
"strings"
"sync"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/mcontext"
"github.com/minio/mux"
"github.com/minio/pkg/v3/env"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
@ -33,41 +36,39 @@ import (
type promLogger struct{}
func (p promLogger) Println(v ...interface{}) {
s := make([]string, 0, len(v))
for _, val := range v {
s = append(s, fmt.Sprintf("%v", val))
}
err := fmt.Errorf("metrics handler error: %v", strings.Join(s, " "))
metricsLogIf(GlobalContext, err)
metricsLogIf(GlobalContext, fmt.Errorf("metrics handler error: %v", v))
}
type metricsV3Server struct {
registry *prometheus.Registry
opts promhttp.HandlerOpts
authFn func(http.Handler) http.Handler
auth func(http.Handler) http.Handler
metricsData *metricsV3Collection
}
func newMetricsV3Server(authType prometheusAuthType) *metricsV3Server {
var (
globalMetricsV3CollectorPaths []collectorPath
globalMetricsV3Once sync.Once
)
func newMetricsV3Server(auth func(h http.Handler) http.Handler) *metricsV3Server {
registry := prometheus.NewRegistry()
authFn := AuthMiddleware
if authType == prometheusPublic {
authFn = NoAuthMiddleware
}
metricGroups := newMetricGroups(registry)
globalMetricsV3Once.Do(func() {
globalMetricsV3CollectorPaths = metricGroups.collectorPaths
})
return &metricsV3Server{
registry: registry,
opts: promhttp.HandlerOpts{
ErrorLog: promLogger{},
ErrorHandling: promhttp.HTTPErrorOnError,
ErrorHandling: promhttp.ContinueOnError,
Registry: registry,
MaxRequestsInFlight: 2,
EnableOpenMetrics: env.Get(EnvPrometheusOpenMetrics, config.EnableOff) == config.EnableOn,
ProcessStartTime: globalBootTime,
},
authFn: authFn,
auth: auth,
metricsData: metricGroups,
}
}
@ -163,7 +164,7 @@ func (h *metricsV3Server) handle(path string, isListingRequest bool, buckets []s
http.Error(w, "Metrics Resource Not found", http.StatusNotFound)
})
// Require that metrics path has at least component.
// Require that metrics path has one component at least.
if path == "/" {
return notFoundHandler
}
@ -221,7 +222,7 @@ func (h *metricsV3Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
pathComponents := mux.Vars(r)["pathComps"]
isListingRequest := r.Form.Has("list")
buckets := []string{}
var buckets []string
if strings.HasPrefix(pathComponents, "/bucket/") {
// bucket specific metrics, extract the bucket name from the path.
// it's the last part of the path. e.g. /bucket/api/<bucket-name>
@ -246,5 +247,5 @@ func (h *metricsV3Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
})
// Add authentication
h.authFn(tracedHandler).ServeHTTP(w, r)
h.auth(tracedHandler).ServeHTTP(w, r)
}

View file

@ -35,8 +35,8 @@ type collectorPath string
// converted to snake-case (by replaced '/' and '-' with '_') and prefixed with
// `minio_`.
func (cp collectorPath) metricPrefix() string {
s := strings.TrimPrefix(string(cp), "/")
s = strings.ReplaceAll(s, "/", "_")
s := strings.TrimPrefix(string(cp), SlashSeparator)
s = strings.ReplaceAll(s, SlashSeparator, "_")
s = strings.ReplaceAll(s, "-", "_")
return "minio_" + s
}
@ -56,8 +56,8 @@ func (cp collectorPath) isDescendantOf(arg string) bool {
if len(arg) >= len(descendant) {
return false
}
if !strings.HasSuffix(arg, "/") {
arg += "/"
if !strings.HasSuffix(arg, SlashSeparator) {
arg += SlashSeparator
}
return strings.HasPrefix(descendant, arg)
}
@ -72,10 +72,11 @@ const (
GaugeMT
// HistogramMT - represents a histogram metric.
HistogramMT
// rangeL - represents a range label.
rangeL = "range"
)
// rangeL - represents a range label.
const rangeL = "range"
func (mt MetricType) String() string {
switch mt {
case CounterMT:

View file

@ -270,7 +270,7 @@ func newMetricGroups(r *prometheus.Registry) *metricsV3Collection {
loadClusterUsageObjectMetrics,
)
clusterUsageBucketsMG := NewBucketMetricsGroup(clusterUsageBucketsCollectorPath,
clusterUsageBucketsMG := NewMetricsGroup(clusterUsageBucketsCollectorPath,
[]MetricDescriptor{
usageSinceLastUpdateSecondsMD,
usageBucketTotalBytesMD,

View file

@ -501,8 +501,8 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj
reader *GetObjectReader
perr error
)
// avoid proxying if version is a delete marker
if !isErrMethodNotAllowed(err) && !(gr != nil && gr.ObjInfo.DeleteMarker) {
if (isErrObjectNotFound(err) || isErrVersionNotFound(err) || isErrReadQuorum(err)) && !(gr != nil && gr.ObjInfo.DeleteMarker) {
proxytgts := getProxyTargets(ctx, bucket, object, opts)
if !proxytgts.Empty() {
globalReplicationStats.incProxy(bucket, getObjectAPI, false)
@ -1028,7 +1028,7 @@ func (api objectAPIHandlers) headObjectHandler(ctx context.Context, objectAPI Ob
objInfo, err := getObjectInfo(ctx, bucket, object, opts)
var proxy proxyResult
if err != nil && !objInfo.DeleteMarker && !isErrMethodNotAllowed(err) {
if err != nil && !objInfo.DeleteMarker && (isErrObjectNotFound(err) || isErrVersionNotFound(err) || isErrReadQuorum(err)) {
// proxy HEAD to replication target if active-active replication configured on bucket
proxytgts := getProxyTargets(ctx, bucket, object, opts)
if !proxytgts.Empty() {

View file

@ -692,8 +692,8 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str
{"small-1", []int64{509}, make(map[string]string)},
{"small-2", []int64{5 * oneMiB}, make(map[string]string)},
// // // cases 4-7: multipart part objects
{"mp-0", []int64{5 * oneMiB, 1}, make(map[string]string)},
{"mp-1", []int64{5*oneMiB + 1, 1}, make(map[string]string)},
{"mp-0", []int64{5 * oneMiB, 10}, make(map[string]string)},
{"mp-1", []int64{5*oneMiB + 1, 10}, make(map[string]string)},
{"mp-2", []int64{5487701, 5487799, 3}, make(map[string]string)},
{"mp-3", []int64{10499807, 10499963, 7}, make(map[string]string)},
// cases 8-11: small single part objects with encryption
@ -702,17 +702,13 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str
{"enc-small-1", []int64{509}, mapCopy(metaWithSSEC)},
{"enc-small-2", []int64{5 * oneMiB}, mapCopy(metaWithSSEC)},
// cases 12-15: multipart part objects with encryption
{"enc-mp-0", []int64{5 * oneMiB, 1}, mapCopy(metaWithSSEC)},
{"enc-mp-1", []int64{5*oneMiB + 1, 1}, mapCopy(metaWithSSEC)},
{"enc-mp-0", []int64{5 * oneMiB, 10}, mapCopy(metaWithSSEC)},
{"enc-mp-1", []int64{5*oneMiB + 1, 10}, mapCopy(metaWithSSEC)},
{"enc-mp-2", []int64{5487701, 5487799, 3}, mapCopy(metaWithSSEC)},
{"enc-mp-3", []int64{10499807, 10499963, 7}, mapCopy(metaWithSSEC)},
}
// SSEC can't be used with compression
globalCompressConfigMu.Lock()
globalCompressEnabled := globalCompressConfig.Enabled
globalCompressConfigMu.Unlock()
if globalCompressEnabled {
objectInputs = objectInputs[0:8]
if testing.Short() {
objectInputs = append(objectInputs[0:5], objectInputs[8:11]...)
}
// iterate through the above set of inputs and upload the object.
for _, input := range objectInputs {
@ -768,6 +764,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str
readers = append(readers, NewDummyDataGen(p, cumulativeSum))
cumulativeSum += p
}
refReader := io.LimitReader(ioutilx.NewSkipReader(io.MultiReader(readers...), off), length)
if ok, msg := cmpReaders(refReader, rec.Body); !ok {
t.Fatalf("(%s) Object: %s Case %d ByteRange: %s --> data mismatch! (msg: %s)", instanceType, oi.objectName, i+1, byteRange, msg)

View file

@ -19,6 +19,7 @@ package cmd
import (
"crypto/subtle"
"encoding/hex"
"io"
"net/http"
"net/url"
@ -33,6 +34,7 @@ import (
"github.com/minio/minio/internal/auth"
levent "github.com/minio/minio/internal/config/lambda/event"
"github.com/minio/minio/internal/hash/sha256"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
)
@ -77,16 +79,13 @@ func getLambdaEventData(bucket, object string, cred auth.Credentials, r *http.Re
return levent.Event{}, err
}
token, err := authenticateNode(cred.AccessKey, cred.SecretKey, u.RawQuery)
if err != nil {
return levent.Event{}, err
}
ckSum := sha256.Sum256([]byte(cred.AccessKey + u.RawQuery))
eventData := levent.Event{
GetObjectContext: &levent.GetObjectContext{
InputS3URL: u.String(),
OutputRoute: shortuuid.New(),
OutputToken: token,
OutputToken: hex.EncodeToString(ckSum[:]),
},
UserRequest: levent.UserRequest{
URL: r.URL.String(),
@ -199,7 +198,7 @@ func fwdStatusToAPIError(resp *http.Response) *APIError {
return nil
}
// GetObjectLamdbaHandler - GET Object with transformed data via lambda functions
// GetObjectLambdaHandler - GET Object with transformed data via lambda functions
// ----------
// This implementation of the GET operation applies lambda functions and returns the
// response generated via the lambda functions. To use this API, you must have READ access

View file

@ -559,21 +559,17 @@ func execExtended(t *testing.T, fn func(t *testing.T, init func(), bucketOptions
t.Run("default", func(t *testing.T) {
fn(t, nil, MakeBucketOptions{})
})
t.Run("defaultVerioned", func(t *testing.T) {
t.Run("default+versioned", func(t *testing.T) {
fn(t, nil, MakeBucketOptions{VersioningEnabled: true})
})
if testing.Short() {
return
}
t.Run("compressed", func(t *testing.T) {
fn(t, func() {
resetCompressEncryption()
enableCompression(t, false, []string{"*"}, []string{"*"})
}, MakeBucketOptions{})
})
t.Run("compressedVerioned", func(t *testing.T) {
t.Run("compressed+versioned", func(t *testing.T) {
fn(t, func() {
resetCompressEncryption()
enableCompression(t, false, []string{"*"}, []string{"*"})
@ -588,7 +584,7 @@ func execExtended(t *testing.T, fn func(t *testing.T, init func(), bucketOptions
enableEncryption(t)
}, MakeBucketOptions{})
})
t.Run("encryptedVerioned", func(t *testing.T) {
t.Run("encrypted+versioned", func(t *testing.T) {
fn(t, func() {
resetCompressEncryption()
enableEncryption(t)
@ -603,7 +599,7 @@ func execExtended(t *testing.T, fn func(t *testing.T, init func(), bucketOptions
enableCompression(t, true, []string{"*"}, []string{"*"})
}, MakeBucketOptions{})
})
t.Run("compressed+encryptedVerioned", func(t *testing.T) {
t.Run("compressed+encrypted+versioned", func(t *testing.T) {
fn(t, func() {
resetCompressEncryption()
enableCompression(t, true, []string{"*"}, []string{"*"})

View file

@ -364,7 +364,7 @@ func checkPostPolicy(formValues http.Header, postPolicyForm PostPolicyForm) erro
for key := range checkHeader {
logKeys = append(logKeys, key)
}
return fmt.Errorf("Each form field that you specify in a form (except %s) must appear in the list of conditions.", strings.Join(logKeys, ", "))
return fmt.Errorf("Each form field that you specify in a form must appear in the list of policy conditions. %q not specified in the policy.", strings.Join(logKeys, ", "))
}
return nil

View file

@ -166,13 +166,13 @@ func connectLoadInitFormats(verboseLogging bool, firstDisk bool, storageDisks []
if err != nil && !errors.Is(err, errXLBackend) && !errors.Is(err, errUnformattedDisk) {
if errors.Is(err, errDiskNotFound) && verboseLogging {
if globalEndpoints.NEndpoints() > 1 {
logger.Error("Unable to connect to %s: %v", endpoints[i], isServerResolvable(endpoints[i], time.Second))
logger.Info("Unable to connect to %s: %v, will be retried", endpoints[i], isServerResolvable(endpoints[i], time.Second))
} else {
logger.Fatal(err, "Unable to connect to %s: %v", endpoints[i], isServerResolvable(endpoints[i], time.Second))
}
} else {
if globalEndpoints.NEndpoints() > 1 {
logger.Error("Unable to use the drive %s: %v", endpoints[i], err)
logger.Info("Unable to use the drive %s: %v, will be retried", endpoints[i], err)
} else {
logger.Fatal(errInvalidArgument, "Unable to use the drive %s: %v", endpoints[i], err)
}

View file

@ -39,7 +39,7 @@ func registerDistErasureRouters(router *mux.Router, endpointServerPools Endpoint
registerLockRESTHandlers()
// Add grid to router
router.Handle(grid.RoutePath, adminMiddleware(globalGrid.Load().Handler(), noGZFlag, noObjLayerFlag))
router.Handle(grid.RoutePath, adminMiddleware(globalGrid.Load().Handler(storageServerRequestValidate), noGZFlag, noObjLayerFlag))
}
// List of some generic middlewares which are applied for all incoming requests.

View file

@ -841,13 +841,14 @@ func serverMain(ctx *cli.Context) {
// Verify kernel release and version.
if oldLinux() {
warnings = append(warnings, color.YellowBold("- Detected Linux kernel version older than 4.0.0 release, there are some known potential performance problems with this kernel version. MinIO recommends a minimum of 4.x.x linux kernel version for best performance"))
warnings = append(warnings, color.YellowBold("Detected Linux kernel version older than 4.0 release, there are some known potential performance problems with this kernel version. MinIO recommends a minimum of 4.x linux kernel version for best performance"))
}
maxProcs := runtime.GOMAXPROCS(0)
cpuProcs := runtime.NumCPU()
if maxProcs < cpuProcs {
warnings = append(warnings, color.YellowBold("- Detected GOMAXPROCS(%d) < NumCPU(%d), please make sure to provide all PROCS to MinIO for optimal performance", maxProcs, cpuProcs))
warnings = append(warnings, color.YellowBold("Detected GOMAXPROCS(%d) < NumCPU(%d), please make sure to provide all PROCS to MinIO for optimal performance",
maxProcs, cpuProcs))
}
// Initialize grid
@ -897,7 +898,7 @@ func serverMain(ctx *cli.Context) {
})
}
if !globalDisableFreezeOnBoot {
if globalEnableSyncBoot {
// Freeze the services until the bucket notification subsystem gets initialized.
bootstrapTrace("freezeServices", freezeServices)
}
@ -921,16 +922,18 @@ func serverMain(ctx *cli.Context) {
}
bootstrapTrace("waitForQuorum", func() {
result := newObject.Health(context.Background(), HealthOptions{})
result := newObject.Health(context.Background(), HealthOptions{NoLogging: true})
for !result.HealthyRead {
if debugNoExit {
logger.Info("Not waiting for quorum since we are debugging.. possible cause unhealthy sets (%s)", result)
logger.Info("Not waiting for quorum since we are debugging.. possible cause unhealthy sets")
logger.Info(result.String())
break
}
d := time.Duration(r.Float64() * float64(time.Second))
logger.Info("Waiting for quorum READ healthcheck to succeed.. possible cause unhealthy sets (%s), retrying in %s", result, d)
logger.Info("Waiting for quorum READ healthcheck to succeed retrying in %s.. possible cause unhealthy sets", d)
logger.Info(result.String())
time.Sleep(d)
result = newObject.Health(context.Background(), HealthOptions{})
result = newObject.Health(context.Background(), HealthOptions{NoLogging: true})
}
})
@ -953,11 +956,11 @@ func serverMain(ctx *cli.Context) {
}
if !globalServerCtxt.StrictS3Compat {
warnings = append(warnings, color.YellowBold("- Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production"))
warnings = append(warnings, color.YellowBold("Strict AWS S3 compatible incoming PUT, POST content payload validation is turned off, caution is advised do not use in production"))
}
})
if globalActiveCred.Equal(auth.DefaultCredentials) {
msg := fmt.Sprintf("- Detected default credentials '%s', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables",
msg := fmt.Sprintf("Detected default credentials '%s', we recommend that you change these values with 'MINIO_ROOT_USER' and 'MINIO_ROOT_PASSWORD' environment variables",
globalActiveCred)
warnings = append(warnings, color.YellowBold(msg))
}
@ -1000,10 +1003,11 @@ func serverMain(ctx *cli.Context) {
}()
go func() {
if !globalDisableFreezeOnBoot {
if globalEnableSyncBoot {
defer bootstrapTrace("unfreezeServices", unfreezeServices)
t := time.AfterFunc(5*time.Minute, func() {
warnings = append(warnings, color.YellowBold("- Initializing the config subsystem is taking longer than 5 minutes. Please set '_MINIO_DISABLE_API_FREEZE_ON_BOOT=true' to not freeze the APIs"))
warnings = append(warnings,
color.YellowBold("- Initializing the config subsystem is taking longer than 5 minutes. Please remove 'MINIO_SYNC_BOOT=on' to not freeze the APIs"))
})
defer t.Stop()
}
@ -1029,16 +1033,6 @@ func serverMain(ctx *cli.Context) {
globalTransitionState.Init(newObject)
})
// Initialize batch job pool.
bootstrapTrace("newBatchJobPool", func() {
globalBatchJobPool = newBatchJobPool(GlobalContext, newObject, 100)
})
// Initialize the license update job
bootstrapTrace("initLicenseUpdateJob", func() {
initLicenseUpdateJob(GlobalContext, newObject)
})
go func() {
// Initialize transition tier configuration manager
bootstrapTrace("globalTierConfigMgr.Init", func() {
@ -1103,22 +1097,21 @@ func serverMain(ctx *cli.Context) {
})
}
// Initialize batch job pool.
bootstrapTrace("newBatchJobPool", func() {
globalBatchJobPool = newBatchJobPool(GlobalContext, newObject, 100)
})
// Prints the formatted startup message, if err is not nil then it prints additional information as well.
printStartupMessage(getAPIEndpoints(), err)
// Print a warning at the end of the startup banner so it is more noticeable
if newObject.BackendInfo().StandardSCParity == 0 {
warnings = append(warnings, color.YellowBold("- The standard parity is set to 0. This can lead to data loss."))
if newObject.BackendInfo().StandardSCParity == 0 && !globalIsErasureSD {
warnings = append(warnings, color.YellowBold("The standard parity is set to 0. This can lead to data loss."))
}
objAPI := newObjectLayerFn()
if objAPI != nil {
printStorageInfo(objAPI.StorageInfo(GlobalContext, true))
}
if len(warnings) > 0 {
logger.Info(color.Yellow("STARTUP WARNINGS:"))
for _, warn := range warnings {
logger.Info(warn)
}
logger.Warning(warn)
}
}()

View file

@ -82,11 +82,7 @@ func setMaxResources(ctx serverCtxt) (err error) {
}
if ctx.MemLimit > 0 {
maxLimit = ctx.MemLimit
}
if maxLimit > 0 {
debug.SetMemoryLimit(int64(maxLimit))
debug.SetMemoryLimit(int64(ctx.MemLimit))
}
// Do not use RLIMIT_AS as that is not useful and at times on systems < 4Gi

View file

@ -23,7 +23,6 @@ import (
"net/url"
"strings"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/logger"
xnet "github.com/minio/pkg/v3/net"
@ -37,7 +36,11 @@ func getFormatStr(strLen int, padding int) string {
// Prints the formatted startup message.
func printStartupMessage(apiEndpoints []string, err error) {
logger.Info(color.Bold(MinioBannerName))
banner := strings.Repeat("-", len(MinioBannerName))
if globalIsDistErasure {
logger.Startup(color.Bold(banner))
}
logger.Startup(color.Bold(MinioBannerName))
if err != nil {
if globalConsoleSys != nil {
globalConsoleSys.Send(GlobalContext, fmt.Sprintf("Server startup failed with '%v', some features may be missing", err))
@ -47,7 +50,7 @@ func printStartupMessage(apiEndpoints []string, err error) {
if !globalSubnetConfig.Registered() {
var builder strings.Builder
startupBanner(&builder)
logger.Info(builder.String())
logger.Startup(builder.String())
}
strippedAPIEndpoints := stripStandardPorts(apiEndpoints, globalMinioHost)
@ -61,6 +64,9 @@ func printStartupMessage(apiEndpoints []string, err error) {
// Prints documentation message.
printObjectAPIMsg()
if globalIsDistErasure {
logger.Startup(color.Bold(banner))
}
}
// Returns true if input is IPv6
@ -113,21 +119,21 @@ func printServerCommonMsg(apiEndpoints []string) {
apiEndpointStr := strings.TrimSpace(strings.Join(apiEndpoints, " "))
// Colorize the message and print.
logger.Info(color.Blue("API: ") + color.Bold(fmt.Sprintf("%s ", apiEndpointStr)))
logger.Startup(color.Blue("API: ") + color.Bold(fmt.Sprintf("%s ", apiEndpointStr)))
if color.IsTerminal() && (!globalServerCtxt.Anonymous && !globalServerCtxt.JSON && globalAPIConfig.permitRootAccess()) {
logger.Info(color.Blue(" RootUser: ") + color.Bold("%s ", cred.AccessKey))
logger.Info(color.Blue(" RootPass: ") + color.Bold("%s \n", cred.SecretKey))
logger.Startup(color.Blue(" RootUser: ") + color.Bold("%s ", cred.AccessKey))
logger.Startup(color.Blue(" RootPass: ") + color.Bold("%s \n", cred.SecretKey))
if region != "" {
logger.Info(color.Blue(" Region: ") + color.Bold("%s", fmt.Sprintf(getFormatStr(len(region), 2), region)))
logger.Startup(color.Blue(" Region: ") + color.Bold("%s", fmt.Sprintf(getFormatStr(len(region), 2), region)))
}
}
if globalBrowserEnabled {
consoleEndpointStr := strings.Join(stripStandardPorts(getConsoleEndpoints(), globalMinioConsoleHost), " ")
logger.Info(color.Blue("WebUI: ") + color.Bold(fmt.Sprintf("%s ", consoleEndpointStr)))
logger.Startup(color.Blue("WebUI: ") + color.Bold(fmt.Sprintf("%s ", consoleEndpointStr)))
if color.IsTerminal() && (!globalServerCtxt.Anonymous && !globalServerCtxt.JSON && globalAPIConfig.permitRootAccess()) {
logger.Info(color.Blue(" RootUser: ") + color.Bold("%s ", cred.AccessKey))
logger.Info(color.Blue(" RootPass: ") + color.Bold("%s ", cred.SecretKey))
logger.Startup(color.Blue(" RootUser: ") + color.Bold("%s ", cred.AccessKey))
logger.Startup(color.Blue(" RootPass: ") + color.Bold("%s ", cred.SecretKey))
}
}
@ -137,7 +143,7 @@ func printServerCommonMsg(apiEndpoints []string) {
// Prints startup message for Object API access, prints link to our SDK documentation.
func printObjectAPIMsg() {
logger.Info(color.Blue("\nDocs: ") + "https://min.io/docs/minio/linux/index.html")
logger.Startup(color.Blue("\nDocs: ") + "https://min.io/docs/minio/linux/index.html")
}
func printLambdaTargets() {
@ -149,7 +155,7 @@ func printLambdaTargets() {
for _, arn := range globalLambdaTargetList.List(globalSite.Region()) {
arnMsg += color.Bold(fmt.Sprintf("%s ", arn))
}
logger.Info(arnMsg + "\n")
logger.Startup(arnMsg + "\n")
}
// Prints bucket notification configurations.
@ -168,7 +174,7 @@ func printEventNotifiers() {
arnMsg += color.Bold(fmt.Sprintf("%s ", arn))
}
logger.Info(arnMsg + "\n")
logger.Startup(arnMsg + "\n")
}
// Prints startup message for command line access. Prints link to our documentation
@ -181,35 +187,9 @@ func printCLIAccessMsg(endPoint string, alias string) {
// Configure 'mc', following block prints platform specific information for minio client.
if color.IsTerminal() && (!globalServerCtxt.Anonymous && globalAPIConfig.permitRootAccess()) {
logger.Info(color.Blue("\nCLI: ") + mcQuickStartGuide)
logger.Startup(color.Blue("\nCLI: ") + mcQuickStartGuide)
mcMessage := fmt.Sprintf("$ mc alias set '%s' '%s' '%s' '%s'", alias,
endPoint, cred.AccessKey, cred.SecretKey)
logger.Info(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
}
// Get formatted disk/storage info message.
func getStorageInfoMsg(storageInfo StorageInfo) string {
var msg string
var mcMessage string
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(storageInfo.Disks)
if storageInfo.Backend.Type == madmin.Erasure {
if offlineDisks.Sum() > 0 {
mcMessage = "Use `mc admin info` to look for latest server/drive info\n"
}
diskInfo := fmt.Sprintf(" %d Online, %d Offline. ", onlineDisks.Sum(), offlineDisks.Sum())
msg += color.Blue("Status:") + fmt.Sprintf(getFormatStr(len(diskInfo), 8), diskInfo)
if len(mcMessage) > 0 {
msg = fmt.Sprintf("%s %s", mcMessage, msg)
}
}
return msg
}
// Prints startup message of storage capacity and erasure information.
func printStorageInfo(storageInfo StorageInfo) {
if msg := getStorageInfoMsg(storageInfo); msg != "" {
logger.Info(msg)
logger.Startup(fmt.Sprintf(getFormatStr(len(mcMessage), 3), mcMessage))
}
}

View file

@ -21,32 +21,9 @@ import (
"context"
"os"
"reflect"
"strings"
"testing"
"github.com/minio/madmin-go/v3"
)
// Tests if we generate storage info.
func TestStorageInfoMsg(t *testing.T) {
infoStorage := StorageInfo{}
infoStorage.Disks = []madmin.Disk{
{Endpoint: "http://127.0.0.1:9000/data/1/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9000/data/2/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9000/data/3/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9000/data/4/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9001/data/1/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9001/data/2/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9001/data/3/", State: madmin.DriveStateOk},
{Endpoint: "http://127.0.0.1:9001/data/4/", State: madmin.DriveStateOffline},
}
infoStorage.Backend.Type = madmin.Erasure
if msg := getStorageInfoMsg(infoStorage); !strings.Contains(msg, "7 Online, 1 Offline") {
t.Fatal("Unexpected storage info message, found:", msg)
}
}
// Tests stripping standard ports from apiEndpoints.
func TestStripStandardPorts(t *testing.T) {
apiEndpoints := []string{"http://127.0.0.1:9000", "http://127.0.0.2:80", "https://127.0.0.3:443"}

View file

@ -35,6 +35,7 @@ import (
"time"
"github.com/dustin/go-humanize"
jwtgo "github.com/golang-jwt/jwt/v4"
"github.com/minio/minio-go/v7/pkg/set"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/pkg/v3/policy"
@ -122,6 +123,9 @@ func runAllTests(suite *TestSuiteCommon, c *check) {
suite.TestObjectMultipartListError(c)
suite.TestObjectValidMD5(c)
suite.TestObjectMultipart(c)
suite.TestMetricsV3Handler(c)
suite.TestBucketSQSNotificationWebHook(c)
suite.TestBucketSQSNotificationAMQP(c)
suite.TearDownSuite(c)
}
@ -189,6 +193,36 @@ func (s *TestSuiteCommon) TearDownSuite(c *check) {
s.testServer.Stop()
}
const (
defaultPrometheusJWTExpiry = 100 * 365 * 24 * time.Hour
)
func (s *TestSuiteCommon) TestMetricsV3Handler(c *check) {
jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.StandardClaims{
ExpiresAt: time.Now().UTC().Add(defaultPrometheusJWTExpiry).Unix(),
Subject: s.accessKey,
Issuer: "prometheus",
})
token, err := jwt.SignedString([]byte(s.secretKey))
c.Assert(err, nil)
for _, cpath := range globalMetricsV3CollectorPaths {
request, err := newTestSignedRequest(http.MethodGet, s.endPoint+minioReservedBucketPath+metricsV3Path+string(cpath),
0, nil, s.accessKey, s.secretKey, s.signer)
c.Assert(err, nil)
request.Header.Set("Authorization", "Bearer "+token)
// execute the request.
response, err := s.client.Do(request)
c.Assert(err, nil)
// assert the http response status code.
c.Assert(response.StatusCode, http.StatusOK)
}
}
func (s *TestSuiteCommon) TestBucketSQSNotificationWebHook(c *check) {
// Sample bucket notification.
bucketNotificationBuf := `<NotificationConfiguration><QueueConfiguration><Event>s3:ObjectCreated:Put</Event><Filter><S3Key><FilterRule><Name>prefix</Name><Value>images/</Value></FilterRule></S3Key></Filter><Id>1</Id><Queue>arn:minio:sqs:us-east-1:444455556666:webhook</Queue></QueueConfiguration></NotificationConfiguration>`

View file

@ -161,11 +161,13 @@ internalAuth:
return nil, errNoSuchUser
}
if caPublicKey != nil {
if caPublicKey != nil && pass == nil {
err := validateKey(c, key)
if err != nil {
return nil, errAuthentication
}
} else {
// Temporary credentials are not allowed.

View file

@ -194,9 +194,12 @@ func (s *TestSuiteIAM) SFTPInvalidServiceAccountPassword(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
if err != nil {
c.Fatalf("unable to set policy: %v", err)
userReq := madmin.PolicyAssociationReq{
Policies: []string{"readwrite"},
User: accessKey,
}
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
c.Fatalf("Unable to attach policy: %v", err)
}
newSSHCon := newSSHConnMock(accessKey + "=svc")
@ -222,9 +225,12 @@ func (s *TestSuiteIAM) SFTPServiceAccountLogin(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
if err != nil {
c.Fatalf("unable to set policy: %v", err)
userReq := madmin.PolicyAssociationReq{
Policies: []string{"readwrite"},
User: accessKey,
}
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
c.Fatalf("Unable to attach policy: %v", err)
}
newSSHCon := newSSHConnMock(accessKey + "=svc")
@ -270,9 +276,12 @@ func (s *TestSuiteIAM) SFTPValidLDAPLoginWithPassword(c *check) {
}
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, userDN, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
User: userDN,
}
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
c.Fatalf("Unable to attach policy: %v", err)
}
newSSHCon := newSSHConnMock("dillon=ldap")

View file

@ -2250,10 +2250,18 @@ func (c *SiteReplicationSys) toErrorFromErrMap(errMap map[string]error, actionNa
return nil
}
// Get ordered list of keys of errMap
keys := []string{}
for d := range errMap {
keys = append(keys, d)
}
sort.Strings(keys)
var success int
msgs := []string{}
for d, err := range errMap {
for _, d := range keys {
name := c.state.Peers[d].Name
err := errMap[d]
if err == nil {
msgs = append(msgs, fmt.Sprintf("'%s' on site %s (%s): succeeded", actionName, name, d))
success++
@ -2261,7 +2269,7 @@ func (c *SiteReplicationSys) toErrorFromErrMap(errMap map[string]error, actionNa
msgs = append(msgs, fmt.Sprintf("'%s' on site %s (%s): failed(%v)", actionName, name, d, err))
}
}
if success == len(errMap) {
if success == len(keys) {
return nil
}
return fmt.Errorf("Site replication error(s): \n%s", strings.Join(msgs, "\n"))
@ -5225,7 +5233,7 @@ func (c *SiteReplicationSys) healBucketReplicationConfig(ctx context.Context, ob
}
if replMismatch {
replLogIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket)))
replLogOnceIf(ctx, c.annotateErr(configureReplication, c.PeerBucketConfigureReplHandler(ctx, bucket)), "heal-bucket-relication-config")
}
return nil
}
@ -5318,7 +5326,10 @@ func (c *SiteReplicationSys) healPolicies(ctx context.Context, objAPI ObjectLaye
UpdatedAt: lastUpdate,
})
if err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err))
replLogOnceIf(
ctx,
fmt.Errorf("Unable to heal IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err),
fmt.Sprintf("heal-policy-%s", policy))
}
}
return nil
@ -5379,7 +5390,8 @@ func (c *SiteReplicationSys) healUserPolicies(ctx context.Context, objAPI Object
UpdatedAt: lastUpdate,
})
if err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping for %s from peer site %s -> site %s : %w", user, latestPeerName, peerName, err))
replLogOnceIf(ctx, fmt.Errorf("Unable to heal IAM user policy mapping from peer site %s -> site %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-user-policy-%s", user))
}
}
return nil
@ -5442,7 +5454,9 @@ func (c *SiteReplicationSys) healGroupPolicies(ctx context.Context, objAPI Objec
UpdatedAt: lastUpdate,
})
if err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal IAM group policy mapping for %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal IAM group policy mapping for from peer site %s -> site %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-group-policy-%s", group))
}
}
return nil
@ -5503,13 +5517,17 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
if creds.IsServiceAccount() {
claims, err := globalIAMSys.GetClaimsForSvcAcc(ctx, creds.AccessKey)
if err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal service account from peer site %s -> %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-user-%s", user))
continue
}
_, policy, err := globalIAMSys.GetServiceAccount(ctx, creds.AccessKey)
if err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal service account from peer site %s -> %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-user-%s", user))
continue
}
@ -5517,7 +5535,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
if policy != nil {
policyJSON, err = json.Marshal(policy)
if err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal service account from peer site %s -> %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-user-%s", user))
continue
}
}
@ -5540,7 +5560,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
},
UpdatedAt: lastUpdate,
}); err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal service account %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal service account from peer site %s -> %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-user-%s", user))
}
continue
}
@ -5553,7 +5575,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
// policy. The session token will contain info about policy to
// be applied.
if !errors.Is(err, errNoSuchUser) {
replLogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal temporary credentials from peer site %s -> %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-user-%s", user))
continue
}
} else {
@ -5571,7 +5595,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
},
UpdatedAt: lastUpdate,
}); err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal temporary credentials %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal temporary credentials from peer site %s -> %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-user-%s", user))
}
continue
}
@ -5587,7 +5613,9 @@ func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer,
},
UpdatedAt: lastUpdate,
}); err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal user %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal user from peer site %s -> %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-user-%s", user))
}
}
return nil
@ -5651,7 +5679,9 @@ func (c *SiteReplicationSys) healGroups(ctx context.Context, objAPI ObjectLayer,
},
UpdatedAt: lastUpdate,
}); err != nil {
replLogIf(ctx, fmt.Errorf("Unable to heal group %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err))
replLogOnceIf(ctx,
fmt.Errorf("Unable to heal group from peer site %s -> site %s : %w", latestPeerName, peerName, err),
fmt.Sprintf("heal-group-%s", group))
}
}
return nil

View file

@ -109,6 +109,21 @@ func (s *storageRESTServer) writeErrorResponse(w http.ResponseWriter, err error)
// DefaultSkewTime - skew time is 15 minutes between minio peers.
const DefaultSkewTime = 15 * time.Minute
// validateStorageRequestToken will validate the token against the provided audience.
func validateStorageRequestToken(token string) error {
claims := xjwt.NewStandardClaims()
if err := xjwt.ParseWithStandardClaims(token, claims, []byte(globalActiveCred.SecretKey)); err != nil {
return errAuthentication
}
owner := claims.AccessKey == globalActiveCred.AccessKey || claims.Subject == globalActiveCred.AccessKey
if !owner {
return errAuthentication
}
return nil
}
// Authenticates storage client's requests and validates for skewed time.
func storageServerRequestValidate(r *http.Request) error {
token, err := jwtreq.AuthorizationHeaderExtractor.ExtractToken(r)
@ -119,30 +134,23 @@ func storageServerRequestValidate(r *http.Request) error {
return errMalformedAuth
}
claims := xjwt.NewStandardClaims()
if err = xjwt.ParseWithStandardClaims(token, claims, []byte(globalActiveCred.SecretKey)); err != nil {
return errAuthentication
if err = validateStorageRequestToken(token); err != nil {
return err
}
owner := claims.AccessKey == globalActiveCred.AccessKey || claims.Subject == globalActiveCred.AccessKey
if !owner {
return errAuthentication
}
if claims.Audience != r.URL.RawQuery {
return errAuthentication
}
requestTimeStr := r.Header.Get("X-Minio-Time")
requestTime, err := time.Parse(time.RFC3339, requestTimeStr)
nanoTime, err := strconv.ParseInt(r.Header.Get("X-Minio-Time"), 10, 64)
if err != nil {
return errMalformedAuth
}
utcNow := UTCNow()
delta := requestTime.Sub(utcNow)
localTime := UTCNow()
remoteTime := time.Unix(0, nanoTime)
delta := remoteTime.Sub(localTime)
if delta < 0 {
delta *= -1
}
if delta > DefaultSkewTime {
return errSkewedAuthTime
}

View file

@ -315,6 +315,7 @@ func newStorageRESTHTTPServerClient(t testing.TB) *storageRESTClient {
url.Path = t.TempDir()
globalMinioHost, globalMinioPort = mustSplitHostPort(url.Host)
globalNodeAuthToken, _ = authenticateNode(globalActiveCred.AccessKey, globalActiveCred.SecretKey)
endpoint, err := NewEndpoint(url.String())
if err != nil {

View file

@ -116,9 +116,12 @@ func (s *TestSuiteIAM) TestSTSServiceAccountsWithUsername(c *check) {
c.Fatalf("policy add error: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, "dillon", false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: "dillon",
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("Unable to attach policy: %v", err)
}
assumeRole := cr.STSAssumeRole{
@ -231,9 +234,12 @@ func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("Unable to attach policy: %v", err)
}
// confirm that the user is able to access the bucket
@ -332,9 +338,12 @@ func (s *TestSuiteIAM) TestSTSWithTags(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("Unable to attach policy: %v", err)
}
// confirm that the user is able to access the bucket
@ -420,9 +429,12 @@ func (s *TestSuiteIAM) TestSTS(c *check) {
c.Fatalf("Unable to set user: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: accessKey,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("Unable to attach policy: %v", err)
}
// confirm that the user is able to access the bucket
@ -515,9 +527,12 @@ func (s *TestSuiteIAM) TestSTSWithGroupPolicy(c *check) {
c.Fatalf("unable to add user to group: %v", err)
}
err = s.adm.SetPolicy(ctx, policy, "test-group", true)
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
Group: "test-group",
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
c.Fatalf("Unable to attach policy: %v", err)
}
// confirm that the user is able to access the bucket - permission comes
@ -718,6 +733,7 @@ func TestIAMWithLDAPServerSuite(t *testing.T) {
suite.SetUpSuite(c)
suite.SetUpLDAP(c, ldapServer)
suite.TestLDAPSTS(c)
suite.TestLDAPPolicyEntitiesLookup(c)
suite.TestLDAPUnicodeVariations(c)
suite.TestLDAPSTSServiceAccounts(c)
suite.TestLDAPSTSServiceAccountsWithUsername(c)
@ -749,6 +765,7 @@ func TestIAMWithLDAPNonNormalizedBaseDNConfigServerSuite(t *testing.T) {
suite.SetUpSuite(c)
suite.SetUpLDAPWithNonNormalizedBaseDN(c, ldapServer)
suite.TestLDAPSTS(c)
suite.TestLDAPPolicyEntitiesLookup(c)
suite.TestLDAPUnicodeVariations(c)
suite.TestLDAPSTSServiceAccounts(c)
suite.TestLDAPSTSServiceAccountsWithUsername(c)
@ -984,6 +1001,7 @@ func (s *TestSuiteIAM) TestIAMExport(c *check, caseNum int, content iamTestConte
}
for userDN, policies := range content.ldapUserPolicyMappings {
// No need to detach, we are starting from a clean slate after exporting.
_, err := s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
Policies: policies,
User: userDN,
@ -1194,14 +1212,21 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
// Attempting to set a non-existent policy should fail.
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy+"x", userDN, false)
_, err = s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy + "x"},
User: userDN,
})
if err == nil {
c.Fatalf("should not be able to set non-existent policy")
c.Fatalf("should not be able to attach non-existent policy")
}
err = s.adm.SetPolicy(ctx, policy, userDN, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
User: userDN,
}
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to attach user policy: %v", err)
}
value, err := ldapID.Retrieve()
@ -1240,10 +1265,8 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
c.Fatalf("unexpected non-access-denied err: %v", err)
}
// Remove the policy assignment on the user DN:
err = s.adm.SetPolicy(ctx, "", userDN, false)
if err != nil {
c.Fatalf("Unable to remove policy setting: %v", err)
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to detach user policy: %v", err)
}
_, err = ldapID.Retrieve()
@ -1253,9 +1276,13 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
// Set policy via group and validate policy assignment.
groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, groupDN, true)
if err != nil {
c.Fatalf("Unable to set group policy: %v", err)
groupReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
Group: groupDN,
}
if _, err = s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil {
c.Fatalf("Unable to attach group policy: %v", err)
}
value, err = ldapID.Retrieve()
@ -1278,6 +1305,10 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
// Validate that the client cannot remove any objects
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
c.Assert(err.Error(), "Access Denied.")
if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil {
c.Fatalf("Unable to detach group policy: %v", err)
}
}
func (s *TestSuiteIAM) TestLDAPUnicodeVariationsLegacyAPI(c *check) {
@ -1490,12 +1521,13 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
// \uFE52 is the unicode dot SMALL FULL STOP used below:
userDNWithUnicodeDot := "uid=svc﹒algorithm,OU=swengg,DC=min,DC=io"
_, err = s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
User: userDNWithUnicodeDot,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to attach user policy: %v", err)
}
value, err := ldapID.Retrieve()
@ -1534,12 +1566,9 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
}
// Remove the policy assignment on the user DN:
_, err = s.adm.DetachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
Policies: []string{policy},
User: userDNWithUnicodeDot,
})
if err != nil {
c.Fatalf("Unable to remove policy setting: %v", err)
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to detach user policy: %v", err)
}
_, err = ldapID.Retrieve()
@ -1550,11 +1579,12 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
// Set policy via group and validate policy assignment.
actualGroupDN := mustNormalizeDN("cn=project.c,ou=groups,ou=swengg,dc=min,dc=io")
groupDNWithUnicodeDot := "cn=project﹒c,ou=groups,ou=swengg,dc=min,dc=io"
_, err = s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
groupReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
Group: groupDNWithUnicodeDot,
})
if err != nil {
}
if _, err = s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil {
c.Fatalf("Unable to attach group policy: %v", err)
}
@ -1594,6 +1624,10 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
// Validate that the client cannot remove any objects
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
c.Assert(err.Error(), "Access Denied.")
if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil {
c.Fatalf("Unable to detach group policy: %v", err)
}
}
func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) {
@ -1630,9 +1664,13 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) {
}
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, userDN, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
User: userDN,
}
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to attach user policy: %v", err)
}
ldapID := cr.LDAPIdentity{
@ -1687,6 +1725,11 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) {
// 6. Check that service account cannot be created for some other user.
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
// Detach the policy from the user
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to detach user policy: %v", err)
}
}
func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) {
@ -1723,9 +1766,14 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) {
}
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, userDN, false)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
User: userDN,
}
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to attach user policy: %v", err)
}
ldapID := cr.LDAPIdentity{
@ -1776,6 +1824,10 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) {
// 3. Check S3 access for download
c.mustDownload(ctx, svcClient, bucket)
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to detach user policy: %v", err)
}
}
// In this test, the parent users gets their permissions from a group, rather
@ -1814,9 +1866,13 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) {
}
groupDN := "cn=projecta,ou=groups,ou=swengg,dc=min,dc=io"
err = s.adm.SetPolicy(ctx, policy, groupDN, true)
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
Group: groupDN,
}
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to attach user policy: %v", err)
}
ldapID := cr.LDAPIdentity{
@ -1871,18 +1927,24 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) {
// 6. Check that service account cannot be created for some other user.
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
// Detach the user policy
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to detach user policy: %v", err)
}
}
func (s *TestSuiteIAM) TestLDAPCyrillicUser(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, err := s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
userReq := madmin.PolicyAssociationReq{
Policies: []string{"readwrite"},
User: "uid=Пользователь,ou=people,ou=swengg,dc=min,dc=io",
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
if _, err := s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to attach user policy: %v", err)
}
cases := []struct {
@ -1940,6 +2002,10 @@ func (s *TestSuiteIAM) TestLDAPCyrillicUser(c *check) {
c.Fatalf("Test %d: unexpected dn claim: %s", i+1, dnClaim)
}
}
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
c.Fatalf("Unable to detach user policy: %v", err)
}
}
func (s *TestSuiteIAM) TestLDAPAttributesLookup(c *check) {
@ -1947,12 +2013,13 @@ func (s *TestSuiteIAM) TestLDAPAttributesLookup(c *check) {
defer cancel()
groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io"
_, err := s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
groupReq := madmin.PolicyAssociationReq{
Policies: []string{"readwrite"},
Group: groupDN,
})
if err != nil {
c.Fatalf("Unable to set policy: %v", err)
}
if _, err := s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil {
c.Fatalf("Unable to attach user policy: %v", err)
}
cases := []struct {
@ -2025,6 +2092,90 @@ func (s *TestSuiteIAM) TestLDAPAttributesLookup(c *check) {
c.Fatalf("Test %d: unexpected sshPublicKey type: %s", i+1, parts[0])
}
}
if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil {
c.Fatalf("Unable to detach group policy: %v", err)
}
}
func (s *TestSuiteIAM) TestLDAPPolicyEntitiesLookup(c *check) {
ctx, cancel := context.WithTimeout(context.Background(), testDefaultTimeout)
defer cancel()
groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io"
groupPolicy := "readwrite"
groupReq := madmin.PolicyAssociationReq{
Policies: []string{groupPolicy},
Group: groupDN,
}
_, err := s.adm.AttachPolicyLDAP(ctx, groupReq)
if err != nil {
c.Fatalf("Unable to attach group policy: %v", err)
}
type caseTemplate struct {
inDN string
expectedOutDN string
expectedGroupDN string
expectedGroupPolicy string
}
cases := []caseTemplate{
{
inDN: "uid=dillon,ou=people,ou=swengg,dc=min,dc=io",
expectedOutDN: "uid=dillon,ou=people,ou=swengg,dc=min,dc=io",
expectedGroupDN: groupDN,
expectedGroupPolicy: groupPolicy,
},
}
policy := "readonly"
for _, testCase := range cases {
userReq := madmin.PolicyAssociationReq{
Policies: []string{policy},
User: testCase.inDN,
}
_, err := s.adm.AttachPolicyLDAP(ctx, userReq)
if err != nil {
c.Fatalf("Unable to attach policy: %v", err)
}
entities, err := s.adm.GetLDAPPolicyEntities(ctx, madmin.PolicyEntitiesQuery{
Users: []string{testCase.inDN},
Policy: []string{policy},
})
if err != nil {
c.Fatalf("Unable to fetch policy entities: %v", err)
}
// switch statement to check all the conditions
switch {
case len(entities.UserMappings) != 1:
c.Fatalf("Expected to find exactly one user mapping")
case entities.UserMappings[0].User != testCase.expectedOutDN:
c.Fatalf("Expected user DN `%s`, found `%s`", testCase.expectedOutDN, entities.UserMappings[0].User)
case len(entities.UserMappings[0].Policies) != 1:
c.Fatalf("Expected exactly one policy attached to user")
case entities.UserMappings[0].Policies[0] != policy:
c.Fatalf("Expected attached policy `%s`, found `%s`", policy, entities.UserMappings[0].Policies[0])
case len(entities.UserMappings[0].MemberOfMappings) != 1:
c.Fatalf("Expected exactly one group attached to user")
case entities.UserMappings[0].MemberOfMappings[0].Group != testCase.expectedGroupDN:
c.Fatalf("Expected attached group `%s`, found `%s`", testCase.expectedGroupDN, entities.UserMappings[0].MemberOfMappings[0].Group)
case len(entities.UserMappings[0].MemberOfMappings[0].Policies) != 1:
c.Fatalf("Expected exactly one policy attached to group")
case entities.UserMappings[0].MemberOfMappings[0].Policies[0] != testCase.expectedGroupPolicy:
c.Fatalf("Expected attached policy `%s`, found `%s`", testCase.expectedGroupPolicy, entities.UserMappings[0].MemberOfMappings[0].Policies[0])
}
_, err = s.adm.DetachPolicyLDAP(ctx, userReq)
if err != nil {
c.Fatalf("Unable to detach policy: %v", err)
}
}
_, err = s.adm.DetachPolicyLDAP(ctx, groupReq)
if err != nil {
c.Fatalf("Unable to detach group policy: %v", err)
}
}
func (s *TestSuiteIAM) TestOpenIDSTS(c *check) {

View file

@ -83,6 +83,8 @@ func TestMain(m *testing.M) {
SecretKey: auth.DefaultSecretKey,
}
globalNodeAuthToken, _ = authenticateNode(auth.DefaultAccessKey, auth.DefaultSecretKey)
// disable ENVs which interfere with tests.
for _, env := range []string{
crypto.EnvKMSAutoEncryption,
@ -100,7 +102,7 @@ func TestMain(m *testing.M) {
// Disable printing console messages during tests.
color.Output = io.Discard
// Disable Error logging in testing.
logger.DisableErrorLog = true
logger.DisableLog = true
// Uncomment the following line to see trace logs during unit tests.
// logger.AddTarget(console.New())

View file

@ -64,6 +64,12 @@ var (
Message: "Specified remote backend is not empty",
StatusCode: http.StatusBadRequest,
}
errTierInvalidConfig = AdminError{
Code: "XMinioAdminTierInvalidConfig",
Message: "Unable to setup remote tier, check tier configuration",
StatusCode: http.StatusBadRequest,
}
)
const (

View file

@ -98,12 +98,6 @@ func TestReleaseTagToNFromTimeConversion(t *testing.T) {
}
func TestDownloadURL(t *testing.T) {
sci := globalIsCICD
globalIsCICD = false
defer func() {
globalIsCICD = sci
}()
minioVersion1 := releaseTimeToReleaseTag(UTCNow())
durl := getDownloadURL(minioVersion1)
if IsDocker() {
@ -164,9 +158,6 @@ func TestUserAgent(t *testing.T) {
}
for i, testCase := range testCases {
sci := globalIsCICD
globalIsCICD = false
if testCase.envName != "" {
t.Setenv(testCase.envName, testCase.envValue)
if testCase.envName == "MESOS_CONTAINER_NAME" {
@ -182,7 +173,6 @@ func TestUserAgent(t *testing.T) {
if !strings.Contains(str, expectedStr) {
t.Errorf("Test %d: expected: %s, got: %s", i+1, expectedStr, str)
}
globalIsCICD = sci
os.Unsetenv("MARATHON_APP_LABEL_DCOS_PACKAGE_VERSION")
os.Unsetenv(testCase.envName)
}
@ -190,12 +180,6 @@ func TestUserAgent(t *testing.T) {
// Tests if the environment we are running is in DCOS.
func TestIsDCOS(t *testing.T) {
sci := globalIsCICD
globalIsCICD = false
defer func() {
globalIsCICD = sci
}()
t.Setenv("MESOS_CONTAINER_NAME", "mesos-1111")
dcos := IsDCOS()
if !dcos {
@ -210,12 +194,6 @@ func TestIsDCOS(t *testing.T) {
// Tests if the environment we are running is in kubernetes.
func TestIsKubernetes(t *testing.T) {
sci := globalIsCICD
globalIsCICD = false
defer func() {
globalIsCICD = sci
}()
t.Setenv("KUBERNETES_SERVICE_HOST", "10.11.148.5")
kubernetes := IsKubernetes()
if !kubernetes {

View file

@ -144,7 +144,8 @@ func newWarmBackend(ctx context.Context, tier madmin.TierConfig, probe bool) (d
return nil, errTierTypeUnsupported
}
if err != nil {
return nil, errTierTypeUnsupported
tierLogIf(ctx, err)
return nil, errTierInvalidConfig
}
if probe {

View file

@ -236,31 +236,18 @@ func newXLStorage(ep Endpoint, cleanUp bool) (s *xlStorage, err error) {
return s, err
}
info, err := disk.GetInfo(s.drivePath, true)
info, rootDrive, err := getDiskInfo(s.drivePath)
if err != nil {
return s, err
}
s.major = info.Major
s.minor = info.Minor
s.fsType = info.FSType
if !globalIsCICD && !globalIsErasureSD {
var rootDrive bool
if globalRootDiskThreshold > 0 {
// Use MINIO_ROOTDISK_THRESHOLD_SIZE to figure out if
// this disk is a root disk. treat those disks with
// size less than or equal to the threshold as rootDrives.
rootDrive = info.Total <= globalRootDiskThreshold
} else {
rootDrive, err = disk.IsRootDisk(s.drivePath, SlashSeparator)
if err != nil {
return nil, err
}
}
if rootDrive {
return s, errDriveIsRoot
}
}
// Sanitize before setting it
if info.NRRequests > 0 {
@ -333,10 +320,11 @@ func newXLStorage(ep Endpoint, cleanUp bool) (s *xlStorage, err error) {
s.diskInfoCache.InitOnce(time.Second, cachevalue.Opts{},
func(ctx context.Context) (DiskInfo, error) {
dcinfo := DiskInfo{}
di, err := getDiskInfo(s.drivePath)
di, root, err := getDiskInfo(s.drivePath)
if err != nil {
return dcinfo, err
}
dcinfo.RootDisk = root
dcinfo.Major = di.Major
dcinfo.Minor = di.Minor
dcinfo.Total = di.Total
@ -345,6 +333,10 @@ func newXLStorage(ep Endpoint, cleanUp bool) (s *xlStorage, err error) {
dcinfo.UsedInodes = di.Files - di.Ffree
dcinfo.FreeInodes = di.Ffree
dcinfo.FSType = di.FSType
if root {
return dcinfo, errDriveIsRoot
}
diskID, err := s.GetDiskID()
// Healing is 'true' when
// - if we found an unformatted disk (no 'format.json')
@ -360,10 +352,22 @@ func newXLStorage(ep Endpoint, cleanUp bool) (s *xlStorage, err error) {
}
// getDiskInfo returns given disk information.
func getDiskInfo(drivePath string) (di disk.Info, err error) {
func getDiskInfo(drivePath string) (di disk.Info, rootDrive bool, err error) {
if err = checkPathLength(drivePath); err == nil {
di, err = disk.GetInfo(drivePath, false)
if !globalIsCICD && !globalIsErasureSD {
if globalRootDiskThreshold > 0 {
// Use MINIO_ROOTDISK_THRESHOLD_SIZE to figure out if
// this disk is a root disk. treat those disks with
// size less than or equal to the threshold as rootDrives.
rootDrive = di.Total <= globalRootDiskThreshold
} else {
rootDrive, err = disk.IsRootDisk(drivePath, SlashSeparator)
}
}
}
switch {
case osIsNotExist(err):
err = errDiskNotFound
@ -373,7 +377,7 @@ func getDiskInfo(drivePath string) (di disk.Info, err error) {
err = errFaultyDisk
}
return di, err
return
}
// Implements stringer compatible interface.

View file

@ -196,7 +196,7 @@ func TestXLStorageGetDiskInfo(t *testing.T) {
// Check test cases.
for _, testCase := range testCases {
if _, err := getDiskInfo(testCase.diskPath); err != testCase.expectedErr {
if _, _, err := getDiskInfo(testCase.diskPath); err != testCase.expectedErr {
t.Fatalf("expected: %s, got: %s", testCase.expectedErr, err)
}
}

View file

@ -178,7 +178,7 @@ When an object has only one version as a delete marker, the latter can be automa
{
"ID": "Removing all delete markers",
"Expiration": {
"DeleteMarker": true
"ExpiredObjectDeleteMarker": true
},
"Status": "Enabled"
}

View file

@ -52,7 +52,9 @@ export MINIO_ROOT_USER="minioadmin"
export MINIO_ROOT_PASSWORD="minioadmin"
./minio server --address ":9001" /tmp/xl/1/{1...4}/ 2>&1 >/tmp/dc1.log &
pid1=$!
./minio server --address ":9002" /tmp/xl/2/{1...4}/ 2>&1 >/tmp/dc2.log &
pid2=$!
sleep 3
@ -69,6 +71,8 @@ export MC_HOST_myminio2=http://minioadmin:minioadmin@localhost:9002
./mc replicate add myminio1/testbucket --remote-bucket http://minioadmin:minioadmin@localhost:9002/testbucket/ --priority 1
# Test replication of delete markers and permanent deletes
./mc cp README.md myminio1/testbucket/dir/file
./mc cp README.md myminio1/testbucket/dir/file
@ -111,5 +115,33 @@ if [ $ret -ne 0 ]; then
exit 1
fi
# Test listing of non replicated permanent deletes
set -x
./mc mb myminio1/foobucket/ myminio2/foobucket/ --with-versioning
./mc replicate add myminio1/foobucket --remote-bucket http://minioadmin:minioadmin@localhost:9002/foobucket/ --priority 1
./mc cp README.md myminio1/foobucket/dir/file
versionId="$(./mc ls --json --versions myminio1/foobucket/dir/ | jq -r .versionId)"
kill ${pid2} && wait ${pid2} || true
aws s3api --endpoint-url http://localhost:9001 --profile minioadmin delete-object --bucket foobucket --key dir/file --version-id "$versionId"
out="$(./mc ls myminio1/foobucket/dir/)"
if [ "$out" != "" ]; then
echo "BUG: non versioned listing should not show pending/failed replicated delete:"
echo "$out"
exit 1
fi
out="$(./mc ls --versions myminio1/foobucket/dir/)"
if [ "$out" != "" ]; then
echo "BUG: versioned listing should not show pending/failed replicated deletes:"
echo "$out"
exit 1
fi
echo "Success"
catch

View file

@ -43,8 +43,8 @@ unset MINIO_KMS_KES_KEY_FILE
unset MINIO_KMS_KES_ENDPOINT
unset MINIO_KMS_KES_KEY_NAME
wget -q -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
go install -v github.com/minio/mc@master
cp -a $(go env GOPATH)/bin/mc ./mc
if [ ! -f mc.RELEASE.2021-03-12T03-36-59Z ]; then
wget -q -O mc.RELEASE.2021-03-12T03-36-59Z https://dl.minio.io/client/mc/release/linux-amd64/archive/mc.RELEASE.2021-03-12T03-36-59Z &&

View file

@ -74,7 +74,7 @@ pools:
- Each pool expects a minimum of 2 nodes per pool, and unique non-repeating hosts for each argument.
- Each pool expects each host in this pool has the same number of drives specified as any other host.
- Mixing `local-path` and `distributed-path` is not allowed, doing so would cause MinIO to refuse starting the server.
- Ellipses notation (e.g. `{1...10}`) or bracket notations are fully allowed (e.g. `{a,c,f}`) to have multiple entries in one line.
- Ellipses and bracket notation (e.g. `{1...10}`) are allowed.
> NOTE: MinIO environmental variables still take precedence over the `config.yaml` file, however `config.yaml` is preferred over MinIO internal config KV settings via `mc admin config set alias/ <sub-system>`.
@ -88,3 +88,4 @@ In subsequent releases we are planning to extend this to provide things like
and decommissioning to provide a functionality that smaller deployments
care about.
- Fully allow bracket notation (e.g. `{a,c,f}`) to have multiple entries on one line.

View file

@ -22,6 +22,12 @@ export MINIO_CI_CD=1
if [ ! -f ./mc ]; then
os="$(uname -s)"
arch="$(uname -m)"
case "${arch}" in
"x86_64")
arch="amd64"
;;
esac
wget -O mc https://dl.minio.io/client/mc/release/${os,,}-${arch,,}/mc &&
chmod +x mc
fi

View file

@ -8,10 +8,8 @@ pkill minio
pkill kes
rm -rf /tmp/xl
if [ ! -f ./mc ]; then
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x mc
fi
go install -v github.com/minio/mc@master
cp -a $(go env GOPATH)/bin/mc ./mc
if [ ! -f ./kes ]; then
wget --quiet -O kes https://github.com/minio/kes/releases/latest/download/kes-linux-amd64 &&
@ -39,37 +37,37 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
(minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) &
pid=$!
./mc ready myminio
mc ready myminio
./mc admin user add myminio/ minio123 minio123
mc admin user add myminio/ minio123 minio123
./mc admin policy create myminio/ deny-non-sse-kms-pol ./docs/iam/policies/deny-non-sse-kms-objects.json
./mc admin policy create myminio/ deny-invalid-sse-kms-pol ./docs/iam/policies/deny-objects-with-invalid-sse-kms-key-id.json
mc admin policy create myminio/ deny-non-sse-kms-pol ./docs/iam/policies/deny-non-sse-kms-objects.json
mc admin policy create myminio/ deny-invalid-sse-kms-pol ./docs/iam/policies/deny-objects-with-invalid-sse-kms-key-id.json
./mc admin policy attach myminio deny-non-sse-kms-pol --user minio123
./mc admin policy attach myminio deny-invalid-sse-kms-pol --user minio123
./mc admin policy attach myminio consoleAdmin --user minio123
mc admin policy attach myminio deny-non-sse-kms-pol --user minio123
mc admin policy attach myminio deny-invalid-sse-kms-pol --user minio123
mc admin policy attach myminio consoleAdmin --user minio123
./mc mb -l myminio/test-bucket
./mc mb -l myminio/multi-key-poc
mc mb -l myminio/test-bucket
mc mb -l myminio/multi-key-poc
export MC_HOST_myminio1="http://minio123:minio123@localhost:9000/"
./mc cp /etc/issue myminio1/test-bucket
mc cp /etc/issue myminio1/test-bucket
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: PutObject to bucket: test-bucket should succeed. Failed"
exit 1
fi
./mc cp /etc/issue myminio1/multi-key-poc | grep -q "Insufficient permissions to access this path"
mc cp /etc/issue myminio1/multi-key-poc | grep -q "Insufficient permissions to access this path"
ret=$?
if [ $ret -eq 0 ]; then
echo "BUG: PutObject to bucket: multi-key-poc without sse-kms should fail. Succedded"
exit 1
fi
./mc cp /etc/hosts myminio1/multi-key-poc/hosts --enc-kms "myminio1/multi-key-poc/hosts=minio-default-key"
mc cp /etc/hosts myminio1/multi-key-poc/hosts --enc-kms "myminio1/multi-key-poc/hosts=minio-default-key"
ret=$?
if [ $ret -ne 0 ]; then
echo "BUG: PutObject to bucket: multi-key-poc with valid sse-kms should succeed. Failed"

View file

@ -2,7 +2,7 @@ version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:RELEASE.2024-06-22T05-26-45Z
image: quay.io/minio/minio:RELEASE.2024-07-16T23-46-41Z
command: server --console-address ":9001" http://minio{1...4}/data{1...2}
expose:
- "9000"

View file

@ -31,8 +31,6 @@ MINIO_IDENTITY_OPENID_CLAIM_USERINFO (on|off) Enable fetching claims f
MINIO_IDENTITY_OPENID_KEYCLOAK_REALM (string) Specify Keycloak 'realm' name, only honored if vendor was set to 'keycloak' as value, if no realm is specified 'master' is default
MINIO_IDENTITY_OPENID_KEYCLOAK_ADMIN_URL (string) Specify Keycloak 'admin' REST API endpoint e.g. http://localhost:8080/auth/admin/
MINIO_IDENTITY_OPENID_REDIRECT_URI_DYNAMIC (on|off) Enable 'Host' header based dynamic redirect URI (default: 'off')
MINIO_IDENTITY_OPENID_CLAIM_PREFIX (string) [DEPRECATED use 'claim_name'] JWT claim namespace prefix e.g. "customer1/"
MINIO_IDENTITY_OPENID_REDIRECT_URI (string) [DEPRECATED use env 'MINIO_BROWSER_REDIRECT_URL'] Configure custom redirect_uri for OpenID login flow callback
MINIO_IDENTITY_OPENID_COMMENT (sentence) optionally add a comment to this setting
```

26
docs/tuning/README.md Normal file
View file

@ -0,0 +1,26 @@
# How to enable 'minio' performance profile with tuned?
## Prerequisites
Please make sure the following packages are already installed via `dnf` or `apt`:
- `tuned`
- `curl`
### Install `tuned.conf` performance profile
#### Step 1 - download `tuned.conf` from the referenced link
```
wget https://raw.githubusercontent.com/minio/minio/master/docs/tuning/tuned.conf
```
#### Step 2 - install tuned.conf as supported performance profile on all nodes
```
sudo mkdir -p /usr/lib/tuned/minio/
sudo mv tuned.conf /usr/lib/tuned/minio
```
#### Step 3 - to enable minio performance profile on all the nodes
```
sudo tuned-adm profile minio
```

83
docs/tuning/tuned.conf Normal file
View file

@ -0,0 +1,83 @@
[main]
summary=Maximum server performance for MinIO
[vm]
transparent_hugepage=madvise
[sysfs]
/sys/kernel/mm/transparent_hugepage/defrag=defer+madvise
/sys/kernel/mm/transparent_hugepage/khugepaged/max_ptes_none=0
[cpu]
force_latency=1
governor=performance
energy_perf_bias=performance
min_perf_pct=100
[sysctl]
fs.xfs.xfssyncd_centisecs=72000
net.core.busy_read=50
net.core.busy_poll=50
kernel.numa_balancing=1
# Do not use swap at all
vm.swappiness=0
vm.vfs_cache_pressure=50
# Start writeback at 3% memory
vm.dirty_background_ratio=3
# Force writeback at 10% memory
vm.dirty_ratio=10
# Quite a few memory map
# areas may be consumed
vm.max_map_count=524288
# Default is 500000 = 0.5ms
kernel.sched_migration_cost_ns=5000000
# stalled hdd io threads
kernel.hung_task_timeout_secs=85
# network tuning for bigger throughput
net.core.netdev_max_backlog=250000
net.core.somaxconn=16384
net.ipv4.tcp_syncookies=0
net.ipv4.tcp_max_syn_backlog=16384
net.core.wmem_max=4194304
net.core.rmem_max=4194304
net.core.rmem_default=4194304
net.core.wmem_default=4194304
net.ipv4.tcp_rmem="4096 87380 4194304"
net.ipv4.tcp_wmem="4096 65536 4194304"
# Reduce CPU utilization
net.ipv4.tcp_timestamps=0
# Increase throughput
net.ipv4.tcp_sack=1
# Low latency mode for TCP
net.ipv4.tcp_low_latency=1
# The following variable is used to tell the kernel how
# much of the socket buffer space should be used for TCP
# window size, and how much to save for an application buffer.
net.ipv4.tcp_adv_win_scale=1
# disable RFC2861 behavior
net.ipv4.tcp_slow_start_after_idle = 0
# Fix faulty network setups
net.ipv4.tcp_mtu_probing=1
net.ipv4.tcp_base_mss=1280
# Disable ipv6
net.ipv6.conf.all.disable_ipv6=1
net.ipv6.conf.default.disable_ipv6=1
net.ipv6.conf.lo.disable_ipv6=1
[bootloader]
# Avoid firing timers for all CPUs at the same time. This is irrelevant for
# full nohz systems
cmdline=skew_tick=1

76
go.mod
View file

@ -1,6 +1,6 @@
module github.com/minio/minio
go 1.21
go 1.22
require (
cloud.google.com/go/storage v1.42.0
@ -32,7 +32,6 @@ require (
github.com/golang-jwt/jwt/v4 v4.5.0
github.com/gomodule/redigo v1.9.2
github.com/google/uuid v1.6.0
github.com/hashicorp/golang-lru/v2 v2.0.7
github.com/inconshreveable/mousetrap v1.1.0
github.com/json-iterator/go v1.1.12
github.com/klauspost/compress v1.17.9
@ -40,22 +39,22 @@ require (
github.com/klauspost/filepathx v1.1.1
github.com/klauspost/pgzip v1.2.6
github.com/klauspost/readahead v1.4.0
github.com/klauspost/reedsolomon v1.12.1
github.com/klauspost/reedsolomon v1.12.3
github.com/lib/pq v1.10.9
github.com/lithammer/shortuuid/v4 v4.0.0
github.com/miekg/dns v1.1.59
github.com/miekg/dns v1.1.61
github.com/minio/cli v1.24.2
github.com/minio/console v1.6.0
github.com/minio/console v1.6.3
github.com/minio/csvparser v1.0.0
github.com/minio/dnscache v0.1.1
github.com/minio/dperf v0.5.3
github.com/minio/highwayhash v1.0.2
github.com/minio/highwayhash v1.0.3
github.com/minio/kms-go/kes v0.3.0
github.com/minio/kms-go/kms v0.4.0
github.com/minio/madmin-go/v3 v3.0.55
github.com/minio/minio-go/v7 v7.0.72-0.20240610154810-fa174cbf14b0
github.com/minio/madmin-go/v3 v3.0.58
github.com/minio/minio-go/v7 v7.0.73
github.com/minio/mux v1.9.0
github.com/minio/pkg/v3 v3.0.2
github.com/minio/pkg/v3 v3.0.9
github.com/minio/selfupdate v0.6.0
github.com/minio/simdjson-go v0.4.5
github.com/minio/sio v0.4.0
@ -63,27 +62,26 @@ require (
github.com/minio/zipindex v0.3.0
github.com/mitchellh/go-homedir v1.1.0
github.com/nats-io/nats-server/v2 v2.9.23
github.com/nats-io/nats.go v1.35.0
github.com/nats-io/nats.go v1.36.0
github.com/nats-io/stan.go v0.10.4
github.com/ncw/directio v1.0.5
github.com/nsqio/go-nsq v1.1.0
github.com/philhofer/fwd v1.1.2
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986
github.com/pierrec/lz4 v2.6.1+incompatible
github.com/pkg/errors v0.9.1
github.com/pkg/sftp v1.13.6
github.com/pkg/xattr v0.4.9
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1
github.com/prometheus/common v0.54.0
github.com/prometheus/common v0.55.0
github.com/prometheus/procfs v0.15.1
github.com/puzpuzpuz/xsync/v3 v3.1.0
github.com/puzpuzpuz/xsync/v3 v3.2.0
github.com/rabbitmq/amqp091-go v1.10.0
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475
github.com/rs/cors v1.11.0
github.com/secure-io/sio-go v0.3.1
github.com/shirou/gopsutil/v3 v3.24.5
github.com/tidwall/gjson v1.17.1
github.com/tinylib/msgp v1.1.9
github.com/tinylib/msgp v1.2.0
github.com/valyala/bytebufferpool v1.0.0
github.com/xdg/scram v1.0.5
github.com/zeebo/xxh3 v1.0.2
@ -93,13 +91,13 @@ require (
go.uber.org/zap v1.27.0
goftp.io/server/v2 v2.0.1
golang.org/x/crypto v0.24.0
golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
golang.org/x/oauth2 v0.21.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.21.0
golang.org/x/term v0.21.0
golang.org/x/time v0.5.0
google.golang.org/api v0.184.0
google.golang.org/api v0.187.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
@ -108,10 +106,10 @@ require (
aead.dev/mem v0.2.0 // indirect
aead.dev/minisign v0.3.0 // indirect
cloud.google.com/go v0.115.0 // indirect
cloud.google.com/go/auth v0.5.1 // indirect
cloud.google.com/go/auth v0.6.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect
cloud.google.com/go/compute/metadata v0.3.0 // indirect
cloud.google.com/go/iam v1.1.8 // indirect
cloud.google.com/go/compute/metadata v0.4.0 // indirect
cloud.google.com/go/iam v1.1.10 // indirect
filippo.io/edwards25519 v1.1.0 // indirect
github.com/Azure/azure-pipeline-go v0.2.3 // indirect
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
@ -127,7 +125,7 @@ require (
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/charmbracelet/bubbles v0.18.0 // indirect
github.com/charmbracelet/bubbletea v0.26.4 // indirect
github.com/charmbracelet/bubbletea v0.26.6 // indirect
github.com/charmbracelet/lipgloss v0.11.0 // indirect
github.com/charmbracelet/x/ansi v0.1.2 // indirect
github.com/charmbracelet/x/input v0.1.2 // indirect
@ -145,6 +143,7 @@ require (
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/frankban/quicktest v1.14.4 // indirect
github.com/go-asn1-ber/asn1-ber v1.5.7 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-jose/go-jose/v4 v4.0.2 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
@ -165,12 +164,12 @@ require (
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba // indirect
github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 // indirect
github.com/google/s2a-go v0.1.7 // indirect
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.4 // indirect
github.com/gorilla/websocket v1.5.2 // indirect
github.com/googleapis/gax-go/v2 v2.12.5 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-hclog v1.2.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
@ -183,7 +182,7 @@ require (
github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect
github.com/jcmturner/rpc/v2 v2.0.3 // indirect
github.com/jedib0t/go-pretty/v6 v6.5.9 // indirect
github.com/jessevdk/go-flags v1.5.0 // indirect
github.com/jessevdk/go-flags v1.6.1 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/juju/ratelimit v1.0.2 // indirect
github.com/kr/fs v0.1.0 // indirect
@ -204,7 +203,7 @@ require (
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/minio/colorjson v1.0.8 // indirect
github.com/minio/filepath v1.0.0 // indirect
github.com/minio/mc v0.0.0-20240612143403-e7c9a733c680 // indirect
github.com/minio/mc v0.0.0-20240702213905-74032bc16a3f // indirect
github.com/minio/md5-simd v1.1.2 // indirect
github.com/minio/pkg/v2 v2.0.19 // indirect
github.com/minio/websocket v1.6.0 // indirect
@ -216,8 +215,9 @@ require (
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/reflow v0.3.0 // indirect
github.com/muesli/termenv v0.15.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nats-io/jwt/v2 v2.5.0 // indirect
github.com/nats-io/nats-streaming-server v0.24.3 // indirect
github.com/nats-io/nats-streaming-server v0.24.6 // indirect
github.com/nats-io/nkeys v0.4.7 // indirect
github.com/nats-io/nuid v1.0.1 // indirect
github.com/oklog/ulid v1.3.1 // indirect
@ -229,34 +229,34 @@ require (
github.com/rivo/uniseg v0.4.7 // indirect
github.com/rjeczalik/notify v0.9.3 // indirect
github.com/rs/xid v1.5.0 // indirect
github.com/safchain/ethtool v0.3.0 // indirect
github.com/safchain/ethtool v0.4.1 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/tidwall/gjson v1.17.1 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tklauser/go-sysconf v0.3.14 // indirect
github.com/tklauser/numcpus v0.8.0 // indirect
github.com/unrolled/secure v1.14.0 // indirect
github.com/unrolled/secure v1.15.0 // indirect
github.com/vbauerster/mpb/v8 v8.7.3 // indirect
github.com/xdg/stringprep v1.0.3 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
github.com/yusufpapurcu/wmi v1.2.4 // indirect
go.etcd.io/etcd/client/pkg/v3 v3.5.14 // indirect
go.mongodb.org/mongo-driver v1.15.0 // indirect
go.mongodb.org/mongo-driver v1.16.0 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 // indirect
go.opentelemetry.io/otel v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.27.0 // indirect
go.opentelemetry.io/otel/trace v1.27.0 // indirect
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/mod v0.18.0 // indirect
golang.org/x/net v0.26.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/tools v0.22.0 // indirect
google.golang.org/genproto v0.0.0-20240610135401-a8a62080eff3 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3 // indirect
google.golang.org/grpc v1.64.0 // indirect
google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
)

164
go.sum
View file

@ -6,16 +6,16 @@ aead.dev/minisign v0.3.0/go.mod h1:NLvG3Uoq3skkRMDuc3YHpWUTMTrSExqm+Ij73W13F6Y=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.115.0 h1:CnFSK6Xo3lDYRoBKEcAtia6VSC837/ZkJuRduSFnr14=
cloud.google.com/go v0.115.0/go.mod h1:8jIM5vVgoAEoiVxQ/O4BFTfHqulPZgs/ufEzMcFMdWU=
cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw=
cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s=
cloud.google.com/go/auth v0.6.1 h1:T0Zw1XM5c1GlpN2HYr2s+m3vr1p2wy+8VN+Z1FKxW38=
cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4=
cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4=
cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q=
cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc=
cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k=
cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0=
cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE=
cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU=
cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
cloud.google.com/go/compute/metadata v0.4.0 h1:vHzJCWaM4g8XIcm8kopr3XmDA4Gy/lblD3EhhSux05c=
cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M=
cloud.google.com/go/iam v1.1.10 h1:ZSAr64oEhQSClwBL670MsJAW5/RLiC6kfw3Bqmd5ZDI=
cloud.google.com/go/iam v1.1.10/go.mod h1:iEgMq62sg8zx446GCaijmA2Miwg5o3UbO+nI47WHJps=
cloud.google.com/go/longrunning v0.5.8 h1:QThI5BFSlYlS7K0wnABCdmKsXbG/htLc3nTPzrfOgeU=
cloud.google.com/go/longrunning v0.5.8/go.mod h1:oJDErR/mm5h44gzsfjQlxd6jyjFvuBPOxR1TLy2+cQk=
cloud.google.com/go/storage v1.42.0 h1:4QtGpplCVt1wz6g5o1ifXd656P5z+yNgzdw1tVfp0cU=
cloud.google.com/go/storage v1.42.0/go.mod h1:HjMXRFq65pGKFn6hxj6x3HCyR41uSB72Z0SO/Vn6JFQ=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
@ -89,8 +89,8 @@ github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UF
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/charmbracelet/bubbles v0.18.0 h1:PYv1A036luoBGroX6VWjQIE9Syf2Wby2oOl/39KLfy0=
github.com/charmbracelet/bubbles v0.18.0/go.mod h1:08qhZhtIwzgrtBjAcJnij1t1H0ZRjwHyGsy6AL11PSw=
github.com/charmbracelet/bubbletea v0.26.4 h1:2gDkkzLZaTjMl/dQBpNVtnvcCxsh/FCkimep7FC9c40=
github.com/charmbracelet/bubbletea v0.26.4/go.mod h1:P+r+RRA5qtI1DOHNFn0otoNwB4rn+zNAzSj/EXz6xU0=
github.com/charmbracelet/bubbletea v0.26.6 h1:zTCWSuST+3yZYZnVSvbXwKOPRSNZceVeqpzOLN2zq1s=
github.com/charmbracelet/bubbletea v0.26.6/go.mod h1:dz8CWPlfCCGLFbBlTY4N7bjLiyOGDJEnd2Muu7pOWhk=
github.com/charmbracelet/lipgloss v0.11.0 h1:UoAcbQ6Qml8hDwSWs0Y1cB5TEQuZkDPH/ZqwWWYTG4g=
github.com/charmbracelet/lipgloss v0.11.0/go.mod h1:1UdRTH9gYgpcdNN5oBtjbu/IzNKtzVtb7sqN1t9LNn8=
github.com/charmbracelet/x/ansi v0.1.2 h1:6+LR39uG8DE6zAmbu023YlqjJHkYXDF1z36ZwzO4xZY=
@ -180,6 +180,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/go-asn1-ber/asn1-ber v1.5.5/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-asn1-ber/asn1-ber v1.5.7 h1:DTX+lbVTWaTw1hQ+PbZPlnDZPEIs0SS/GCZAl535dDk=
github.com/go-asn1-ber/asn1-ber v1.5.7/go.mod h1:hEBeB/ic+5LoWskz+yKT7vGhhPYkProFKoKdwZRWMe0=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-jose/go-jose/v4 v4.0.2 h1:R3l3kkBds16bO7ZFAEEcofK0MkrAJt3jlJznWZG0nvk=
github.com/go-jose/go-jose/v4 v4.0.2/go.mod h1:WVf9LFMHh/QVrmqrOfqun0C45tMe3RoiKJMPvgWwLfY=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@ -275,8 +277,8 @@ github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/
github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/pprof v0.0.0-20240227163752-401108e1b7e7/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba h1:ql1qNgCyOB7iAEk8JTNM+zJrgIbnyCKX/wdlyPufP5g=
github.com/google/pprof v0.0.0-20240528025155-186aa0362fba/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0 h1:e+8XbKB6IMn8A4OAyZccO4pYfB3s7bt6azNIPE7AnPg=
github.com/google/pprof v0.0.0-20240625030939-27f56978b8b0/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
@ -288,13 +290,13 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs=
github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0=
github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg=
github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI=
github.com/googleapis/gax-go/v2 v2.12.5 h1:8gw9KZK8TiVKB6q3zHY3SBzLnrGp6HQjyfYBYGmXdxA=
github.com/googleapis/gax-go/v2 v2.12.5/go.mod h1:BUDKcWo+RaKq5SC9vVYL0wLADa3VcfswbOMMRmB9H3E=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v1.5.2 h1:qoW6V1GT3aZxybsbC6oLnailWnB+qTMVwMreOso9XUw=
github.com/gorilla/websocket v1.5.2/go.mod h1:0n9H61RBAcf5/38py2MCYbxzPIY9rOkpvvMT24Rqs30=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -320,11 +322,9 @@ github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/raft v1.3.6 h1:v5xW5KzByoerQlN/o31VJrFNiozgzGyDoMgDJgXpsto=
github.com/hashicorp/raft v1.3.6/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
github.com/hashicorp/raft v1.3.9 h1:9yuo1aR0bFTr1cw7pj3S2Bk6MhJCsnr2NAxvIBrP2x4=
github.com/hashicorp/raft v1.3.9/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM=
github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@ -343,8 +343,8 @@ github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZ
github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=
github.com/jedib0t/go-pretty/v6 v6.5.9 h1:ACteMBRrrmm1gMsXe9PSTOClQ63IXDUt03H5U+UV8OU=
github.com/jedib0t/go-pretty/v6 v6.5.9/go.mod h1:zbn98qrYlh95FIhwwsbIip0LYpwSG8SUOScs+v9/t0E=
github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc=
github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4=
github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc=
github.com/jlaffaye/ftp v0.0.0-20190624084859-c1312a7102bf/go.mod h1:lli8NYPQOFy3O++YmYbqVgOcQ1JPCwdOy+5zSjKJ9qY=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
@ -371,8 +371,8 @@ github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/klauspost/readahead v1.4.0 h1:w4hQ3BpdLjBnRQkZyNi+nwdHU7eGP9buTexWK9lU7gY=
github.com/klauspost/readahead v1.4.0/go.mod h1:7bolpMKhT5LKskLwYXGSDOyA2TYtMFgdgV0Y8gy7QhA=
github.com/klauspost/reedsolomon v1.12.1 h1:NhWgum1efX1x58daOBGCFWcxtEhOhXKKl1HAPQUp03Q=
github.com/klauspost/reedsolomon v1.12.1/go.mod h1:nEi5Kjb6QqtbofI6s+cbG/j1da11c96IBYBSnVGtuBs=
github.com/klauspost/reedsolomon v1.12.3 h1:tzUznbfc3OFwJaTebv/QdhnFf2Xvb7gZ24XaHLBPmdc=
github.com/klauspost/reedsolomon v1.12.3/go.mod h1:3K5rXwABAvzGeR01r6pWZieUALXO/Tq7bFKGIb4m4WI=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
@ -434,14 +434,14 @@ github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/miekg/dns v1.1.59 h1:C9EXc/UToRwKLhK5wKU/I4QVsBUc8kE6MkHBkeypWZs=
github.com/miekg/dns v1.1.59/go.mod h1:nZpewl5p6IvctfgrckopVx2OlSEHPRO/U4SYkRklrEk=
github.com/miekg/dns v1.1.61 h1:nLxbwF3XxhwVSm8g9Dghm9MHPaUZuqhPiGL+675ZmEs=
github.com/miekg/dns v1.1.61/go.mod h1:mnAarhS3nWaW+NVP2wTkYVIZyHNJ098SJZUki3eykwQ=
github.com/minio/cli v1.24.2 h1:J+fCUh9mhPLjN3Lj/YhklXvxj8mnyE/D6FpFduXJ2jg=
github.com/minio/cli v1.24.2/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY=
github.com/minio/colorjson v1.0.8 h1:AS6gEQ1dTRYHmC4xuoodPDRILHP/9Wz5wYUGDQfPLpg=
github.com/minio/colorjson v1.0.8/go.mod h1:wrs39G/4kqNlGjwqHvPlAnXuc2tlPszo6JKdSBCLN8w=
github.com/minio/console v1.6.0 h1:G3mjhGV2Pox1Sqjwp/jRbRY7WiKsVyCLaZkxoIOaMCU=
github.com/minio/console v1.6.0/go.mod h1:XJ3HKHmigs1MgjaNjUwpyuOAJnwqlSMB+QnZCZ+BROY=
github.com/minio/console v1.6.3 h1:XGI/Oyq3J2vs+a1cobE87m4L059jr3q1Scej7hrEcbM=
github.com/minio/console v1.6.3/go.mod h1:yFhhM3Y3uT4N1WtphcYr3QAd7WYLU8CEuTcIiDpksWs=
github.com/minio/csvparser v1.0.0 h1:xJEHcYK8ZAjeW4hNV9Zu30u+/2o4UyPnYgyjWp8b7ZU=
github.com/minio/csvparser v1.0.0/go.mod h1:lKXskSLzPgC5WQyzP7maKH7Sl1cqvANXo9YCto8zbtM=
github.com/minio/dnscache v0.1.1 h1:AMYLqomzskpORiUA1ciN9k7bZT1oB3YZN4cEIi88W5o=
@ -450,27 +450,28 @@ github.com/minio/dperf v0.5.3 h1:D58ZrMfxrRw83EvAhr4FggvRT0DwWXsWrvsM8Xne+EM=
github.com/minio/dperf v0.5.3/go.mod h1:WrI7asRe/kv5zmnZ4XwHY74PV8OyUN+efeKINRgk5UI=
github.com/minio/filepath v1.0.0 h1:fvkJu1+6X+ECRA6G3+JJETj4QeAYO9sV43I79H8ubDY=
github.com/minio/filepath v1.0.0/go.mod h1:/nRZA2ldl5z6jT9/KQuvZcQlxZIMQoFFQPvEXx9T/Bw=
github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g=
github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY=
github.com/minio/highwayhash v1.0.3 h1:kbnuUMoHYyVl7szWjSxJnxw11k2U709jqFPPmIUyD6Q=
github.com/minio/highwayhash v1.0.3/go.mod h1:GGYsuwP/fPD6Y9hMiXuapVvlIUEhFhMTh0rxU3ik1LQ=
github.com/minio/kms-go/kes v0.3.0 h1:SU8VGVM/Hk9w1OiSby3OatkcojooUqIdDHl6dtM6NkY=
github.com/minio/kms-go/kes v0.3.0/go.mod h1:w6DeVT878qEOU3nUrYVy1WOT5H1Ig9hbDIh698NYJKY=
github.com/minio/kms-go/kms v0.4.0 h1:cLPZceEp+05xHotVBaeFJrgL7JcXM4lBy6PU0idkE7I=
github.com/minio/kms-go/kms v0.4.0/go.mod h1:q12CehiIy2qgBnDKq6Q7wmPi2PHSyRVug5DKp0HAVeE=
github.com/minio/madmin-go/v3 v3.0.55 h1:Vm5AWS0kFoWwoJX4epskjVwmmS64xMNORMZaGR3cbK8=
github.com/minio/madmin-go/v3 v3.0.55/go.mod h1:IFAwr0XMrdsLovxAdCcuq/eoL4nRuMVQQv0iubJANQw=
github.com/minio/mc v0.0.0-20240612143403-e7c9a733c680 h1:Ns5mhSm86qJx6a9GJ1kzHkZMjRMZrQGsptakVRmq4QA=
github.com/minio/mc v0.0.0-20240612143403-e7c9a733c680/go.mod h1:21/cb+wUd+lLRsdX7ACqyO8DzPNSpXftp1bOkQlIbh8=
github.com/minio/madmin-go/v3 v3.0.58 h1:CUhb6FsBvgPfP1iOWvMGqlrB1epYpJw0i/yGXPH12WQ=
github.com/minio/madmin-go/v3 v3.0.58/go.mod h1:IFAwr0XMrdsLovxAdCcuq/eoL4nRuMVQQv0iubJANQw=
github.com/minio/mc v0.0.0-20240702213905-74032bc16a3f h1:UN7hxbfLhBssFfoqS4zNIBDMC57qgLpbym6v0XYLe2s=
github.com/minio/mc v0.0.0-20240702213905-74032bc16a3f/go.mod h1:kJaOnJZfmThdTEUR/9GlLbKYiqx+a5oFQac8wWaDogA=
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=
github.com/minio/minio-go/v6 v6.0.46/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
github.com/minio/minio-go/v7 v7.0.72-0.20240610154810-fa174cbf14b0 h1:7e4w0tbj1NpxxyiGB7CutxpKBnXus/RU1CwN3Sm4gDY=
github.com/minio/minio-go/v7 v7.0.72-0.20240610154810-fa174cbf14b0/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo=
github.com/minio/minio-go/v7 v7.0.73 h1:qr2vi96Qm7kZ4v7LLebjte+MQh621fFWnv93p12htEo=
github.com/minio/minio-go/v7 v7.0.73/go.mod h1:qydcVzV8Hqtj1VtEocfxbmVFa2siu6HGa+LDEPogjD8=
github.com/minio/mux v1.9.0 h1:dWafQFyEfGhJvK6AwLOt83bIG5bxKxKJnKMCi0XAaoA=
github.com/minio/mux v1.9.0/go.mod h1:1pAare17ZRL5GpmNL+9YmqHoWnLmMZF9C/ioUCfy0BQ=
github.com/minio/pkg/v2 v2.0.19 h1:r187/k/oVH9H0DDwvLY5WipkJaZ4CLd4KI3KgIUExR0=
github.com/minio/pkg/v2 v2.0.19/go.mod h1:luK9LAhQlAPzSuF6F326XSCKjMc1G3Tbh+a9JYwqh8M=
github.com/minio/pkg/v3 v3.0.2 h1:PX0HhnCdndHxCJ2rF2Cy3HocAyQR97fj9CRMixh5n8M=
github.com/minio/pkg/v3 v3.0.2/go.mod h1:53gkSUVHcfYoskOs5YAJ3D99nsd2SKru90rdE9whlXU=
github.com/minio/pkg/v3 v3.0.9 h1:LFmPKkmqWYGs8Y689zs0EKkJ/9l6rnBcLtjWNLG0lEI=
github.com/minio/pkg/v3 v3.0.9/go.mod h1:7I+o1o3vbrxVKBiFE5ifUADQMUnhiKdhqmQiq65ylm8=
github.com/minio/selfupdate v0.6.0 h1:i76PgT0K5xO9+hjzKcacQtO7+MjJ4JKA8Ak8XQ9DDwU=
github.com/minio/selfupdate v0.6.0/go.mod h1:bO02GTIPCMQFTEvE5h4DjYB58bCoZ35XLeBf0buTDdM=
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
@ -506,20 +507,23 @@ github.com/muesli/reflow v0.3.0 h1:IFsN6K9NfGtjeggFP+68I4chLZV2yIKsXJFNZ+eWh6s=
github.com/muesli/reflow v0.3.0/go.mod h1:pbwTDkVPibjO2kyvBQRBxTWEEGDGq0FlB1BIKtnHY/8=
github.com/muesli/termenv v0.15.2 h1:GohcuySI0QmI3wN8Ok9PtKGkgkFIk7y6Vpb5PvrY+Wo=
github.com/muesli/termenv v0.15.2/go.mod h1:Epx+iuz8sNs7mNKhxzH4fWXGNpZwUaJKRS1noLXviQ8=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nats-io/jwt/v2 v2.2.1-0.20220113022732-58e87895b296/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
github.com/nats-io/jwt/v2 v2.2.1-0.20220330180145-442af02fd36a/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k=
github.com/nats-io/jwt/v2 v2.5.0 h1:WQQ40AAlqqfx+f6ku+i0pOVm+ASirD4fUh+oQsiE9Ak=
github.com/nats-io/jwt/v2 v2.5.0/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI=
github.com/nats-io/nats-server/v2 v2.7.4/go.mod h1:1vZ2Nijh8tcyNe8BDVyTviCd9NYzRbubQYiEHsvOQWc=
github.com/nats-io/nats-server/v2 v2.8.2/go.mod h1:vIdpKz3OG+DCg4q/xVPdXHoztEyKDWRtykQ4N7hd7C4=
github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ1wbikExU=
github.com/nats-io/nats-server/v2 v2.9.23/go.mod h1:wEjrEy9vnqIGE4Pqz4/c75v9Pmaq7My2IgFmnykc4C0=
github.com/nats-io/nats-streaming-server v0.24.3 h1:uZez8jBkXscua++jaDsK7DhpSAkizdetar6yWbPMRco=
github.com/nats-io/nats-streaming-server v0.24.3/go.mod h1:rqWfyCbxlhKj//fAp8POdQzeADwqkVhZcoWlbhkuU5w=
github.com/nats-io/nats-streaming-server v0.24.6 h1:iIZXuPSznnYkiy0P3L0AP9zEN9Etp+tITbbX1KKeq4Q=
github.com/nats-io/nats-streaming-server v0.24.6/go.mod h1:tdKXltY3XLeBJ21sHiZiaPl+j8sK3vcCKBWVyxeQs10=
github.com/nats-io/nats.go v1.13.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nats.go v1.13.1-0.20220308171302-2f2f6968e98d/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nats.go v1.14.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nats.go v1.15.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w=
github.com/nats-io/nats.go v1.22.1/go.mod h1:tLqubohF7t4z3du1QDPYJIQQyhb4wl6DhjxEajSI7UA=
github.com/nats-io/nats.go v1.35.0 h1:XFNqNM7v5B+MQMKqVGAyHwYhyKb48jrenXNxIU20ULk=
github.com/nats-io/nats.go v1.35.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU=
github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4=
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
@ -540,8 +544,8 @@ github.com/orisano/pixelmatch v0.0.0-20220722002657-fb0b55479cde/go.mod h1:nZgzb
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU=
github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw=
github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0=
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986 h1:jYi87L8j62qkXzaYHAQAhEapgukhenIMZRBKTNRLHJ4=
github.com/philhofer/fwd v1.1.3-0.20240612014219-fbbf4953d986/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=
github.com/pierrec/lz4 v2.6.1+incompatible h1:9UY3+iC23yxF0UfGaYrGplQ+79Rg+h/q9FV9ix19jjM=
github.com/pierrec/lz4 v2.6.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ=
@ -577,8 +581,8 @@ github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQy
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.54.0 h1:ZlZy0BgJhTwVZUn7dLOkwCZHUkrAqd3WYtcFCWnM1D8=
github.com/prometheus/common v0.54.0/go.mod h1:/TQgMJP5CuVYveyT7n/0Ix8yLNNXy9yRSkhnLTHPDIQ=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
@ -588,8 +592,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/prometheus/prom2json v1.3.3 h1:IYfSMiZ7sSOfliBoo89PcufjWO4eAR0gznGcETyaUgo=
github.com/prometheus/prom2json v1.3.3/go.mod h1:Pv4yIPktEkK7btWsrUTWDDDrnpUrAELaOCj+oFwlgmc=
github.com/puzpuzpuz/xsync/v3 v3.1.0 h1:EewKT7/LNac5SLiEblJeUu8z5eERHrmRLnMQL2d7qX4=
github.com/puzpuzpuz/xsync/v3 v3.1.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/puzpuzpuz/xsync/v3 v3.2.0 h1:9AzuUeF88YC5bK8u2vEG1Fpvu4wgpM1wfPIExfaaDxQ=
github.com/puzpuzpuz/xsync/v3 v3.2.0/go.mod h1:VjzYrABPabuM4KyBh1Ftq6u8nhwY5tBPKP9jpmh0nnA=
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM=
@ -608,8 +612,8 @@ github.com/rs/cors v1.11.0/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU
github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc=
github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/safchain/ethtool v0.3.0 h1:gimQJpsI6sc1yIqP/y8GYgiXn/NjgvpM0RNoWLVVmP0=
github.com/safchain/ethtool v0.3.0/go.mod h1:SA9BwrgyAqNo7M+uaL6IYbxpm5wk3L7Mm6ocLW+CJUs=
github.com/safchain/ethtool v0.4.1 h1:S6mEleTADqgynileXoiapt/nKnatyR6bmIHoF+h2ADo=
github.com/safchain/ethtool v0.4.1/go.mod h1:XLLnZmy4OCRTkksP/UiMjij96YmIsBfmBQcs7H6tA48=
github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg=
github.com/secure-io/sio-go v0.3.1 h1:dNvY9awjabXTYGsTF1PiCySl9Ltofk9GA3VdWlo7rRc=
github.com/secure-io/sio-go v0.3.1/go.mod h1:+xbkjDzPjwh4Axd07pRKSNriS9SCiYksWnZqdnfpQxs=
@ -655,16 +659,16 @@ github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhso
github.com/tidwall/pretty v1.2.1 h1:qjsOFOWWQl+N3RsoF5/ssm1pHmJJwhjlSbZ51I6wMl4=
github.com/tidwall/pretty v1.2.1/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU=
github.com/tinylib/msgp v1.1.6/go.mod h1:75BAfg2hauQhs3qedfdDZmWAPcFMAvJE5b9rGOMufyw=
github.com/tinylib/msgp v1.1.9 h1:SHf3yoO2sGA0veCJeCBYLHuttAVFHGm2RHgNodW7wQU=
github.com/tinylib/msgp v1.1.9/go.mod h1:BCXGB54lDD8qUEPmiG0cQQUANC4IUQyB2ItS2UDlO/k=
github.com/tinylib/msgp v1.2.0 h1:0uKB/662twsVBpYUPbokj4sTSKhWFKB7LopO2kWK8lY=
github.com/tinylib/msgp v1.2.0/go.mod h1:2vIGs3lcUo8izAATNobrCHevYZC/LMsJtw4JPiYPHro=
github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/unrolled/secure v1.14.0 h1:u9vJTU/pR4Bny0ntLUMxdfLtmIRGvQf2sEFuA0TG9AE=
github.com/unrolled/secure v1.14.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40=
github.com/unrolled/secure v1.15.0 h1:q7x+pdp8jAHnbzxu6UheP8fRlG/rwYTb8TPuQ3rn9Og=
github.com/unrolled/secure v1.15.0/go.mod h1:BmF5hyM6tXczk3MpQkFf1hpKSRqCyhqcbiQtiAF7+40=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
github.com/vbauerster/mpb/v8 v8.7.3 h1:n/mKPBav4FFWp5fH4U0lPpXfiOmCEgl5Yx/NM3tKJA0=
@ -693,22 +697,22 @@ go.etcd.io/etcd/client/pkg/v3 v3.5.14 h1:SaNH6Y+rVEdxfpA2Jr5wkEvN6Zykme5+YnbCkxv
go.etcd.io/etcd/client/pkg/v3 v3.5.14/go.mod h1:8uMgAokyG1czCtIdsq+AGyYQMvpIKnSvPjFMunkgeZI=
go.etcd.io/etcd/client/v3 v3.5.14 h1:CWfRs4FDaDoSz81giL7zPpZH2Z35tbOrAJkkjMqOupg=
go.etcd.io/etcd/client/v3 v3.5.14/go.mod h1:k3XfdV/VIHy/97rqWjoUzrj9tk7GgJGH9J8L4dNXmAk=
go.mongodb.org/mongo-driver v1.15.0 h1:rJCKC8eEliewXjZGf0ddURtl7tTVy1TK3bfl0gkUSLc=
go.mongodb.org/mongo-driver v1.15.0/go.mod h1:Vzb0Mk/pa7e6cWw85R4F/endUC3u0U9jGcNU603k65c=
go.mongodb.org/mongo-driver v1.16.0 h1:tpRsfBJMROVHKpdGyc1BBEzzjDUWjItxbVSZ8Ls4BQ4=
go.mongodb.org/mongo-driver v1.16.0/go.mod h1:oB6AhJQvFQL4LEHyXi6aJzQJtBiTQHiAd83l0GdFaiw=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0 h1:vS1Ao/R55RNV4O7TA2Qopok8yN+X0LIP6RVWLFkprck=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.52.0/go.mod h1:BMsdeOxN04K0L5FNUBfjFdvwWGNe/rkmSwH4Aelu/X0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0 h1:9l89oX4ba9kHbBol3Xin3leYJ+252h0zszDtBwyKe2A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.52.0/go.mod h1:XLZfZboOJWHNKUv7eH0inh0E9VV6eWDFB/9yJyTLPp0=
go.opentelemetry.io/otel v1.27.0 h1:9BZoF3yMK/O1AafMiQTVu0YDj5Ea4hPhxCs7sGva+cg=
go.opentelemetry.io/otel v1.27.0/go.mod h1:DMpAK8fzYRzs+bi3rS5REupisuqTheUlSZJ1WnZaPAQ=
go.opentelemetry.io/otel/metric v1.27.0 h1:hvj3vdEKyeCi4YaYfNjv2NUje8FqKqUY8IlF0FxV/ik=
go.opentelemetry.io/otel/metric v1.27.0/go.mod h1:mVFgmRlhljgBiuk/MP/oKylr4hs85GZAylncepAX/ak=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
go.opentelemetry.io/otel/trace v1.27.0 h1:IqYb813p7cmbHk0a5y6pD5JPakbVfftRXABGt5/Rscw=
go.opentelemetry.io/otel/trace v1.27.0/go.mod h1:6RiD1hkAprV4/q+yd2ln1HG9GoPx39SuvvstaLBl+l4=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@ -733,8 +737,7 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211209193657-4570a0811e8b/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220112180741-5e0467b6c7ce/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220307211146-efcb8507fb70/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
@ -745,8 +748,8 @@ golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOM
golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM=
golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY=
golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@ -822,14 +825,13 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210228012217-479acdf4ea46/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220111092808-5a964db01320/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220307203707-22a9840ba4d7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220408201424-a24fb2fb8a0f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -891,26 +893,26 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 h1:+cNy6SZtPcJQH3LJVLOSmiC7MMxXNOb3PU/VUEz+EhU=
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
google.golang.org/api v0.184.0 h1:dmEdk6ZkJNXy1JcDhn/ou0ZUq7n9zropG2/tR4z+RDg=
google.golang.org/api v0.184.0/go.mod h1:CeDTtUEiYENAf8PPG5VZW2yNp2VM3VWbCeTioAZBTBA=
google.golang.org/api v0.187.0 h1:Mxs7VATVC2v7CY+7Xwm4ndkX71hpElcvx0D1Ji/p1eo=
google.golang.org/api v0.187.0/go.mod h1:KIHlTc4x7N7gKKuVsdmfBXN13yEEWXWFURWY6SBp2gk=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20240610135401-a8a62080eff3 h1:8RTI1cmuvdY9J7q/jpJWEj5UfgWjhV5MCoXaYmwLBYQ=
google.golang.org/genproto v0.0.0-20240610135401-a8a62080eff3/go.mod h1:qb66gsewNb7Ghv1enkhJiRfYGWUklv3n6G8UvprOhzA=
google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 h1:QW9+G6Fir4VcRXVH8x3LilNAb6cxBGLa6+GM4hRwexE=
google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3/go.mod h1:kdrSS/OiLkPrNUpzD4aHgCq2rVuC/YRxok32HXZ4vRE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3 h1:9Xyg6I9IWQZhRVfCWjKK+l6kI0jHcPesVlMnT//aHNo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240610135401-a8a62080eff3/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094 h1:6whtk83KtD3FkGrVb2hFXuQ+ZMbCNdakARIn/aHMmG8=
google.golang.org/genproto v0.0.0-20240701130421-f6361c86f094/go.mod h1:Zs4wYw8z1zr6RNF4cwYb31mvN/EGaKAdQjNCF3DW6K4=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094 h1:0+ozOGcrp+Y8Aq8TLNN2Aliibms5LEzsq99ZZmAGYm0=
google.golang.org/genproto/googleapis/api v0.0.0-20240701130421-f6361c86f094/go.mod h1:fJ/e3If/Q67Mj99hin0hMhiNyCRmt6BQ2aWIJshUSJw=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094 h1:BwIjyKYGsK9dMCBOorzRri8MQwmi7mT9rGHsCEinZkA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240701130421-f6361c86f094/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -928,8 +930,6 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View file

@ -74,12 +74,16 @@ func (r *MonitoredReader) Read(buf []byte) (n int, err error) {
need = int(math.Min(float64(b), float64(need)))
tokens = need
}
// reduce tokens requested according to availability
av := int(r.throttle.Tokens())
if av < tokens && av > 0 {
tokens = av
need = int(math.Min(float64(tokens), float64(need)))
}
err = r.throttle.WaitN(r.ctx, tokens)
if err != nil {
return
}
n, err = r.r.Read(buf[:need])
if err != nil {
r.lastErr = err

View file

@ -572,6 +572,7 @@ func FilterObjectLockMetadata(metadata map[string]string, filterRetention, filte
dst := metadata
var copied bool
delKey := func(key string) {
key = strings.ToLower(key)
if _, ok := metadata[key]; !ok {
return
}

Some files were not shown because too many files have changed in this diff Show more