mirror of https://github.com/minio/minio.git
Compare commits
285 Commits
RELEASE.20
...
master
Author | SHA1 | Date |
---|---|---|
Harshavardhana | ba54b39c02 | |
Anis Eleuch | 2a75225569 | |
Klaus Post | e72429c79c | |
Klaus Post | c5b3f5553f | |
Klaus Post | d3ae0aaad3 | |
Klaus Post | d67bccf861 | |
Anis Eleuch | 1277ad69a6 | |
Harshavardhana | 8f93e81afb | |
Harshavardhana | 4af31e654b | |
Harshavardhana | aad50579ba | |
Harshavardhana | 38d059b0ae | |
Klaus Post | bd4eeb4522 | |
jiuker | 03e3493288 | |
Harshavardhana | 64baedf5a4 | |
Minio Trusted | 2f64d5f77e | |
Anis Eleuch | f79a4ef4d0 | |
Taran Pelkey | 2d53854b19 | |
Harshavardhana | e5c83535af | |
jiuker | c904ef966e | |
Minio Trusted | 8f266e0772 | |
Harshavardhana | e0fe7cc391 | |
Harshavardhana | 9d20dec56a | |
Harshavardhana | 597a785253 | |
Harshavardhana | 7d75b1e758 | |
Aditya Manthramurthy | 5f78691fcf | |
Shireesh Anjal | a591e06ae5 | |
Harshavardhana | 443c93c634 | |
Shireesh Anjal | 5659cddc84 | |
Shireesh Anjal | 2a03a34bde | |
Shubhendu | 1654a9b7e6 | |
Shireesh Anjal | 673a521711 | |
Harshavardhana | 2e23076688 | |
Klaus Post | b92ac55250 | |
Shireesh Anjal | 7981509cc8 | |
Krishnan Parthasarathi | 6d5bc045bc | |
Harshavardhana | d38e020b29 | |
Poorna | 7d29030292 | |
Shubhendu | 7c7650b7c3 | |
Harshavardhana | ca80eced24 | |
Anis Eleuch | d0e0b81d8e | |
jiuker | 391baa1c9a | |
Harshavardhana | ae14681c3e | |
Klaus Post | 4d698841f4 | |
jiuker | 9906b3ade9 | |
Anis Eleuch | bf1769d3e0 | |
Harshavardhana | 63e1ad9f29 | |
Klaus Post | 2c7bcee53f | |
Harshavardhana | 1fd90c93ff | |
Poorna | e947a844c9 | |
Poorna | 4e2d39293a | |
Krishnan Parthasarathi | 1228d6bf1a | |
Shireesh Anjal | fc4561c64c | |
Klaus Post | 3b7747b42b | |
Harshavardhana | e432e79324 | |
Harshavardhana | 08d74819b6 | |
Poorna | aa3fde1784 | |
Harshavardhana | 0b3eb7f218 | |
Anis Eleuch | 69c9496c71 | |
Klaus Post | b792b36495 | |
Harshavardhana | d3db7d31a3 | |
Shireesh Anjal | c05ca63158 | |
Klaus Post | 6d3e0c7db6 | |
Shireesh Anjal | 0e59e50b39 | |
Klaus Post | d4b391de1b | |
Shubhendu | de4d3dac00 | |
Olli Janatuinen | 534e7161df | |
Harshavardhana | 9b219cd646 | |
Shireesh Anjal | 3bab4822f3 | |
coderwander | 3c5f2d8916 | |
Shireesh Anjal | 5808190398 | |
Shireesh Anjal | b2a82248b1 | |
dependabot[bot] | 4e5fcca8b9 | |
Klaus Post | c36eaedb93 | |
Poorna | 7752b03add | |
jiuker | 01bfc78535 | |
Shireesh Anjal | 074d70112d | |
Harshavardhana | e8d14c0d90 | |
Shireesh Anjal | 60d7e8143a | |
Klaus Post | 9667a170de | |
Shubhendu | abae30f9e1 | |
Minio Trusted | f9311bc9d1 | |
Harshavardhana | b598402738 | |
Harshavardhana | bd026b913f | |
Harshavardhana | 72ff69d9bb | |
Harshavardhana | f30417d9a8 | |
jiuker | 47a4ad3cd7 | |
Klaus Post | 2f7a10ab31 | |
Harshavardhana | b534dc69ab | |
Harshavardhana | 7b7d2ea7d4 | |
Aditya Manthramurthy | e00de1c302 | |
Harshavardhana | 3549e583a6 | |
Andi | f5e3eedf34 | |
Harshavardhana | 519dbfebf6 | |
Harshavardhana | 9a267f9270 | |
Anis Eleuch | 67bd71b7a5 | |
Klaus Post | ec49fff583 | |
Andreas Auernhammer | 8b660e18f2 | |
Harshavardhana | 981497799a | |
Minio Trusted | b9bdc17465 | |
Olli Janatuinen | b413ff9fdb | |
Harshavardhana | 6a15580817 | |
Cesar N | 39633a5581 | |
Alex | 1e83f15e2f | |
Harshavardhana | 888d2bb1d8 | |
Klaus Post | 847ee5ac45 | |
jiuker | 9a9a49aa84 | |
Harshavardhana | a03ca80269 | |
Harshavardhana | 523bd769f1 | |
Harshavardhana | 8ff70ea5a9 | |
Harshavardhana | da3e7747ca | |
Klaus Post | 4afb59e63f | |
Harshavardhana | 1526e7ece3 | |
Krishnan Parthasarathi | 6c07bfee8a | |
Poorna | 446c760820 | |
Shireesh Anjal | 04f92f1291 | |
Klaus Post | 4a60a7794d | |
Bala FA | e5b16adb1c | |
Harshavardhana | 402a3ac719 | |
Aditya Manthramurthy | f3d61c51fc | |
Klaus Post | 0cde17ae5d | |
Harshavardhana | 8c1bba681b | |
Klaus Post | dbfb5e797b | |
Harshavardhana | 08ff702434 | |
Klaus Post | 0e2148264a | |
Minio Trusted | a75f42344b | |
Krishnan Parthasarathi | 7926401cbd | |
Harshavardhana | 8161411c5d | |
Klaus Post | f64dea2aac | |
Shubhendu | 6579304d8c | |
jiuker | 6bb10a81a6 | |
Klaus Post | 3cf8a7c888 | |
Minio Trusted | 2e38bb5175 | |
Harshavardhana | a372c6a377 | |
Harshavardhana | 93b2f8a0c5 | |
opencmit2 | 1a6568a25d | |
Poorna | 9e95703efc | |
Anis Eleuch | d8e05aca81 | |
Praveen raj Mani | 410a1ac040 | |
Shireesh Anjal | 4caa3422bd | |
Dennis Marttinen | a658b976f5 | |
Anis Eleuch | 135874ebdc | |
Harshavardhana | f4f1c42cba | |
Poorna | e7aa26dc29 | |
Harshavardhana | c54ffde568 | |
Anis Eleuch | 9a3c992d7a | |
Aditya Manthramurthy | 0c855638de | |
Cesar N | 943d815783 | |
Ramon de Klein | 4c0acba62d | |
Aditya Manthramurthy | 62c3cdee75 | |
Aditya Manthramurthy | 3212d0c8cd | |
Harshavardhana | 1d03bea965 | |
Klaus Post | fbfeb59658 | |
Ramon de Klein | 701da1282a | |
jiuker | df93ff92ba | |
Shireesh Anjal | 77d5331e85 | |
Bala FA | 14cdadfb56 | |
Harshavardhana | f3a52cc195 | |
Aditya Manthramurthy | 7640cd24c9 | |
Shireesh Anjal | f7b665347e | |
Harshavardhana | 9693c382a8 | |
jiuker | ee1047bd52 | |
Seiya | 5ea5ab162b | |
Klaus Post | b5a09ff96b | |
Harshavardhana | 95c65f4e8f | |
Harshavardhana | 6bfff7532e | |
Harshavardhana | 1aa8896ad6 | |
Krishnan Parthasarathi | 3e32ceb39f | |
dependabot[bot] | ca1350b092 | |
jiuker | 9205434ed3 | |
Harshavardhana | cd50e9b4bc | |
Klaus Post | ec816f3840 | |
Klaus Post | 5f774951b1 | |
Harshavardhana | 2ca9befd2a | |
Harshavardhana | 72f5cb577e | |
Robert Lützner | 928c0181bf | |
Harshavardhana | 03767d26da | |
Sveinn | 108e6f92d4 | |
Harshavardhana | d653a59fc0 | |
Minio Trusted | 01bfdf949a | |
Aditya Manthramurthy | 98f7821eb3 | |
jiuker | 2d3898e0d5 | |
Aditya Manthramurthy | ae46ce9937 | |
Anis Eleuch | dfc112c06b | |
Shireesh Anjal | ca5fab8656 | |
Shireesh Anjal | 6df76ca73c | |
Harshavardhana | f65dd3e5a2 | |
Harshavardhana | a8d601b64a | |
Viktor Szépe | 73b4794cf7 | |
Klaus Post | e2709ea129 | |
Allan Roger Reid | 740ec80819 | |
Harshavardhana | d95e054282 | |
Allan Roger Reid | 7c1f9667d1 | |
Klaus Post | 9246990496 | |
Markus Wagner | 0cf3d93360 | |
Harshavardhana | cb06aee5ac | |
Shubhendu | 1c70e9ed1b | |
jiuker | f3d6a2dd37 | |
Harshavardhana | d1c58fc2eb | |
Allan Roger Reid | b8f05b1471 | |
Klaus Post | e7baf78ee8 | |
guangwu | 87299eba10 | |
Shubhendu | d3a07c29ba | |
Aditya Manthramurthy | 8d39b715dc | |
Harshavardhana | 7e3166475d | |
Klaus Post | 5206c0e883 | |
Harshavardhana | 41ec038523 | |
Shireesh Anjal | 08d3d06a06 | |
Harshavardhana | 074febd9e1 | |
Harshavardhana | aa8d25797b | |
Alex | 8d7d4adb91 | |
Poorna | ffa91f9794 | |
Harshavardhana | 0c31e61343 | |
Harshavardhana | 9b926f7dbe | |
Harshavardhana | 35d8728990 | |
Allan Roger Reid | f7ed9a75ba | |
jiuker | 9496c17e13 | |
jiuker | ed64e91f06 | |
jiuker | a481825ae1 | |
Harshavardhana | 7bb0f32332 | |
Anis Eleuch | c6f8dc431e | |
Alexander Thaller | 78f177b8ee | |
Anis Eleuch | 787c44c39d | |
Anis Eleuch | f06fee0364 | |
Harshavardhana | c957e0d426 | |
Harshavardhana | 04101d472f | |
Minio Trusted | 51fc145161 | |
Taran Pelkey | 9d63bb1b41 | |
Aditya Manthramurthy | 8ff2a7a2b9 | |
Harshavardhana | 91f91d8f47 | |
Harshavardhana | a207bd6790 | |
Harshavardhana | 96d226c0b1 | |
Krishnan Parthasarathi | a86d98826d | |
Harshavardhana | 1bb670ecba | |
Aditya Manthramurthy | c9e9a8e2b9 | |
jiuker | 272367ccd2 | |
Anis Eleuch | 95bf4a57b6 | |
Harshavardhana | 2228eb61cb | |
Alexander Thaller | 5f07eb2d17 | |
Shubhendu | d96d696841 | |
Harshavardhana | e18c0ab9bf | |
Andreas Auernhammer | faeb2b7e79 | |
Anis Eleuch | 97ce11cb6b | |
Harshavardhana | d7daae4762 | |
jiuker | 3d86ae12bc | |
Sveinn | ba46ee5dfa | |
Klaus Post | 912bbb2f1d | |
Harshavardhana | 4f660a8eb7 | |
Praveen raj Mani | ae4fb1b72e | |
Klaus Post | b435806d91 | |
Minio Trusted | 06929258bc | |
Harshavardhana | cb577835d9 | |
Harshavardhana | 7f35f74f14 | |
Klaus Post | 3d6194e93c | |
Harshavardhana | 72c7845f7e | |
Harshavardhana | 1c99597a06 | |
Harshavardhana | feb9d8480b | |
Aditya Manthramurthy | 4e670458b8 | |
Aditya Manthramurthy | 48deccdc40 | |
Andi Bräu | 2eee744e34 | |
Kaan Kabalak | 3f72439b8a | |
Shubhendu | 468a9fae83 | |
Shubhendu | d87f91720b | |
Klaus Post | aa0eec16ab | |
Shubhendu | d63e603040 | |
jiuker | 8222a640ac | |
Aditya Manthramurthy | 7e45d84ace | |
Harshavardhana | 139a606f0a | |
Harshavardhana | 289223b6de | |
Harshavardhana | c61dd16a1e | |
Harshavardhana | 3e38fa54a5 | |
jiuker | 4a02189ba0 | |
Shubhendu | 3d4fc28ec9 | |
jiuker | ec3a3bb10d | |
Harshavardhana | 364d3a0ac9 | |
Harshavardhana | cb536a73eb | |
Minio Trusted | 428155add9 | |
Poorna | 8bce123bba | |
Harshavardhana | 0a56dbde2f | |
Klaus Post | 7ff4164d65 | |
Shubhendu | 53a14c7301 | |
Harshavardhana | dc45a5010d | |
jiuker | 4b9192034c | |
Harshavardhana | deeadd1a37 | |
Sveinn | 1fc4203c19 | |
Minio Trusted | 15b930be1f |
|
@ -9,6 +9,6 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@v1
|
||||
uses: actions/dependency-review-action@v4
|
||||
|
|
|
@ -3,12 +3,11 @@ name: Crosscompile
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
@ -21,11 +20,11 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3
|
||||
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
|
|
@ -3,8 +3,7 @@ name: FIPS Build Test
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -21,11 +20,11 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
|
||||
|
|
|
@ -3,8 +3,7 @@ name: Healing Functional Tests
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -21,11 +20,11 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
|
|
@ -3,12 +3,11 @@ name: Linters and Tests
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
concurrency:
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
|
@ -21,23 +20,24 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
os: [ubuntu-latest, windows-latest]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest, Windows]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'windows-latest'
|
||||
if: matrix.os == 'Windows'
|
||||
env:
|
||||
CGO_ENABLED: 0
|
||||
GO111MODULE: on
|
||||
run: |
|
||||
Set-MpPreference -DisableRealtimeMonitoring $true
|
||||
netsh int ipv4 set dynamicport tcp start=60000 num=61000
|
||||
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
|
||||
go test -v --timeout 50m ./...
|
||||
go test -v --timeout 120m ./...
|
||||
- name: Build on ${{ matrix.os }}
|
||||
if: matrix.os == 'ubuntu-latest'
|
||||
env:
|
||||
|
|
|
@ -3,8 +3,7 @@ name: Functional Tests
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -21,11 +20,11 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
|
|
@ -3,8 +3,7 @@ name: Helm Chart linting
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -20,7 +19,7 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Helm
|
||||
uses: azure/setup-helm@v3
|
||||
|
|
|
@ -3,8 +3,7 @@ name: IAM integration
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -62,7 +61,7 @@ jobs:
|
|||
# are turned off - i.e. if ldap="", then ldap server is not enabled for
|
||||
# the tests.
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
ldap: ["", "localhost:389"]
|
||||
etcd: ["", "http://localhost:2379"]
|
||||
openid: ["", "http://127.0.0.1:5556/dex"]
|
||||
|
@ -76,8 +75,8 @@ jobs:
|
|||
openid: "http://127.0.0.1:5556/dex"
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
@ -112,6 +111,12 @@ jobs:
|
|||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
go run docs/iam/access-manager-plugin.go &
|
||||
make test-iam
|
||||
- name: Test MinIO Old Version data to IAM import current version
|
||||
if: matrix.ldap == 'ldaphost:389'
|
||||
env:
|
||||
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
|
||||
run: |
|
||||
make test-iam-ldap-upgrade-import
|
||||
- name: Test LDAP for automatic site replication
|
||||
if: matrix.ldap == 'localhost:389'
|
||||
run: |
|
||||
|
|
|
@ -4,7 +4,6 @@ on:
|
|||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -25,12 +24,12 @@ jobs:
|
|||
sudo -S rm -rf ${GITHUB_WORKSPACE}
|
||||
mkdir ${GITHUB_WORKSPACE}
|
||||
- name: checkout-step
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: setup-go-step
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.x
|
||||
go-version: 1.22.x
|
||||
|
||||
- name: github sha short
|
||||
id: vars
|
||||
|
@ -56,6 +55,10 @@ jobs:
|
|||
run: |
|
||||
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
- name: resiliency
|
||||
run: |
|
||||
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "resiliency" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
|
||||
|
||||
- name: The job must cleanup
|
||||
if: ${{ always() }}
|
||||
run: |
|
||||
|
|
|
@ -0,0 +1,78 @@
|
|||
version: '3.7'
|
||||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:${JOB_NAME}
|
||||
command: server --console-address ":9001" http://minio{1...4}/rdata{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
- "9001"
|
||||
environment:
|
||||
MINIO_CI_CD: "on"
|
||||
MINIO_ROOT_USER: "minio"
|
||||
MINIO_ROOT_PASSWORD: "minio123"
|
||||
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
|
||||
MINIO_DRIVE_MAX_TIMEOUT: "5s"
|
||||
healthcheck:
|
||||
test: ["CMD", "mc", "ready", "local"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
# starts 4 docker containers running minio server instances.
|
||||
# using nginx reverse proxy, load balancing, you can access
|
||||
# it through port 9000.
|
||||
services:
|
||||
minio1:
|
||||
<<: *minio-common
|
||||
hostname: minio1
|
||||
volumes:
|
||||
- rdata1-1:/rdata1
|
||||
- rdata1-2:/rdata2
|
||||
|
||||
minio2:
|
||||
<<: *minio-common
|
||||
hostname: minio2
|
||||
volumes:
|
||||
- rdata2-1:/rdata1
|
||||
- rdata2-2:/rdata2
|
||||
|
||||
minio3:
|
||||
<<: *minio-common
|
||||
hostname: minio3
|
||||
volumes:
|
||||
- rdata3-1:/rdata1
|
||||
- rdata3-2:/rdata2
|
||||
|
||||
minio4:
|
||||
<<: *minio-common
|
||||
hostname: minio4
|
||||
volumes:
|
||||
- rdata4-1:/rdata1
|
||||
- rdata4-2:/rdata2
|
||||
|
||||
nginx:
|
||||
image: nginx:1.19.2-alpine
|
||||
hostname: nginx
|
||||
volumes:
|
||||
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
||||
depends_on:
|
||||
- minio1
|
||||
- minio2
|
||||
- minio3
|
||||
- minio4
|
||||
|
||||
## By default this config uses default local driver,
|
||||
## For custom volumes replace with volume driver configuration.
|
||||
volumes:
|
||||
rdata1-1:
|
||||
rdata1-2:
|
||||
rdata2-1:
|
||||
rdata2-2:
|
||||
rdata3-1:
|
||||
rdata3-2:
|
||||
rdata4-1:
|
||||
rdata4-2:
|
|
@ -23,10 +23,9 @@ http {
|
|||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
|
|
@ -23,14 +23,14 @@ http {
|
|||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio5:9000;
|
||||
server minio6:9000;
|
||||
server minio7:9000;
|
||||
server minio8:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio5:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio6:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio7:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio8:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
|
|
@ -23,10 +23,10 @@ http {
|
|||
# include /etc/nginx/conf.d/*.conf;
|
||||
|
||||
upstream minio {
|
||||
server minio1:9000;
|
||||
server minio2:9000;
|
||||
server minio3:9000;
|
||||
server minio4:9000;
|
||||
server minio1:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio2:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio3:9000 max_fails=1 fail_timeout=10s;
|
||||
server minio4:9000 max_fails=1 fail_timeout=10s;
|
||||
}
|
||||
|
||||
upstream console {
|
||||
|
|
|
@ -24,11 +24,6 @@ if [ ! -f ./mc ]; then
|
|||
chmod +x mc
|
||||
fi
|
||||
|
||||
(
|
||||
cd ./docs/debugging/s3-check-md5
|
||||
go install -v
|
||||
)
|
||||
|
||||
export RELEASE=RELEASE.2023-08-29T23-07-35Z
|
||||
|
||||
docker-compose -f docker-compose-site1.yaml up -d
|
||||
|
@ -48,10 +43,10 @@ sleep 30s
|
|||
|
||||
sleep 5
|
||||
|
||||
s3-check-md5 -h
|
||||
./s3-check-md5 -h
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
|
@ -67,8 +62,8 @@ fi
|
|||
|
||||
sleep 5
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
## we do not need to fail here, since we are going to test
|
||||
## upgrading to master, healing and being able to recover
|
||||
|
@ -96,8 +91,8 @@ for i in $(seq 1 10); do
|
|||
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
|
||||
done
|
||||
|
||||
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
|
||||
|
||||
if [ $failed_count_site1 -ne 0 ]; then
|
||||
echo "failed with multipart on site1 uploads"
|
||||
|
@ -109,6 +104,43 @@ if [ $failed_count_site2 -ne 0 ]; then
|
|||
exit 1
|
||||
fi
|
||||
|
||||
# Add user group test
|
||||
./mc admin user add site1 site-replication-issue-user site-replication-issue-password
|
||||
./mc admin group add site1 site-replication-issue-group site-replication-issue-user
|
||||
|
||||
max_wait_attempts=30
|
||||
wait_interval=5
|
||||
|
||||
attempt=1
|
||||
while true; do
|
||||
diff <(./mc admin group info site1 site-replication-issue-group) <(./mc admin group info site2 site-replication-issue-group)
|
||||
|
||||
if [[ $? -eq 0 ]]; then
|
||||
echo "Outputs are consistent."
|
||||
break
|
||||
fi
|
||||
|
||||
remaining_attempts=$((max_wait_attempts - attempt))
|
||||
if ((attempt >= max_wait_attempts)); then
|
||||
echo "Outputs remain inconsistent after $max_wait_attempts attempts. Exiting with error."
|
||||
exit 1
|
||||
else
|
||||
echo "Outputs are inconsistent. Waiting for $wait_interval seconds (attempt $attempt/$max_wait_attempts)."
|
||||
sleep $wait_interval
|
||||
fi
|
||||
|
||||
((attempt++))
|
||||
done
|
||||
|
||||
status=$(./mc admin group info site1 site-replication-issue-group --json | jq .groupStatus | tr -d '"')
|
||||
|
||||
if [[ $status == "enabled" ]]; then
|
||||
echo "Success"
|
||||
else
|
||||
echo "Expected status: enabled, actual status: $status"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cleanup
|
||||
|
||||
## change working directory
|
||||
|
|
|
@ -3,8 +3,7 @@ name: MinIO advanced tests
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -22,11 +21,11 @@ jobs:
|
|||
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
@ -36,6 +35,18 @@ jobs:
|
|||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-decom
|
||||
|
||||
- name: Test ILM
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-ilm
|
||||
|
||||
- name: Test PBAC
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
|
||||
make test-pbac
|
||||
|
||||
- name: Test Config File
|
||||
run: |
|
||||
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
|
||||
|
|
|
@ -3,8 +3,7 @@ name: Root lockdown tests
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -21,12 +20,12 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
|
|
@ -16,7 +16,7 @@ docker volume rm $(docker volume ls -f dangling=true) || true
|
|||
cd .github/workflows/mint
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml up -d
|
||||
sleep 30s
|
||||
sleep 1m
|
||||
|
||||
docker system prune -f || true
|
||||
docker volume prune -f || true
|
||||
|
@ -26,6 +26,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true
|
|||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
|
||||
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
|
||||
|
||||
# Pause one node, to check that all S3 calls work while one node goes wrong
|
||||
[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4
|
||||
|
||||
docker run --rm --net=mint_default \
|
||||
--name="mint-${MODE}-${JOB_NAME}" \
|
||||
-e SERVER_ENDPOINT="nginx:9000" \
|
||||
|
@ -35,6 +38,18 @@ docker run --rm --net=mint_default \
|
|||
-e MINT_MODE="${MINT_MODE}" \
|
||||
docker.io/minio/mint:edge
|
||||
|
||||
# FIXME: enable this after fixing aws-sdk-java-v2 tests
|
||||
# # unpause the node, to check that all S3 calls work while one node goes wrong
|
||||
# [ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml unpause minio4
|
||||
# [ "${MODE}" == "resiliency" ] && docker run --rm --net=mint_default \
|
||||
# --name="mint-${MODE}-${JOB_NAME}" \
|
||||
# -e SERVER_ENDPOINT="nginx:9000" \
|
||||
# -e ACCESS_KEY="${ACCESS_KEY}" \
|
||||
# -e SECRET_KEY="${SECRET_KEY}" \
|
||||
# -e ENABLE_HTTPS=0 \
|
||||
# -e MINT_MODE="${MINT_MODE}" \
|
||||
# docker.io/minio/mint:edge
|
||||
|
||||
docker-compose -f minio-${MODE}.yaml down || true
|
||||
sleep 10s
|
||||
|
||||
|
|
|
@ -3,8 +3,7 @@ name: Shell formatting checks
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
@ -14,7 +13,7 @@ jobs:
|
|||
name: runner / shfmt
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: luizm/action-sh-checker@master
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
name: Test GitHub Action
|
||||
name: Spelling
|
||||
on: [pull_request]
|
||||
|
||||
jobs:
|
||||
|
|
|
@ -3,8 +3,7 @@ name: Upgrade old version tests
|
|||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next
|
||||
- master
|
||||
|
||||
# This ensures that previous jobs for the PR are canceled when the PR is
|
||||
# updated.
|
||||
|
@ -21,12 +20,12 @@ jobs:
|
|||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
go-version: [1.21.x]
|
||||
go-version: [1.22.x]
|
||||
os: [ubuntu-latest]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.go-version }}
|
||||
check-latest: true
|
||||
|
|
|
@ -17,15 +17,15 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: 1.21.8
|
||||
go-version: 1.22.3
|
||||
check-latest: true
|
||||
- name: Get official govulncheck
|
||||
run: go install golang.org/x/vuln/cmd/govulncheck@latest
|
||||
shell: bash
|
||||
- name: Run govulncheck
|
||||
run: govulncheck ./...
|
||||
run: govulncheck -show verbose ./...
|
||||
shell: bash
|
||||
|
|
|
@ -43,4 +43,13 @@ docs/debugging/inspect/inspect
|
|||
docs/debugging/pprofgoparser/pprofgoparser
|
||||
docs/debugging/reorder-disks/reorder-disks
|
||||
docs/debugging/populate-hard-links/populate-hardlinks
|
||||
docs/debugging/xattr/xattr
|
||||
docs/debugging/xattr/xattr
|
||||
hash-set
|
||||
healing-bin
|
||||
inspect
|
||||
pprofgoparser
|
||||
reorder-disks
|
||||
s3-check-md5
|
||||
s3-verify
|
||||
xattr
|
||||
xl-meta
|
||||
|
|
17
.typos.toml
17
.typos.toml
|
@ -12,20 +12,27 @@ extend-ignore-re = [
|
|||
"[0-9A-Za-z/+=]{64}",
|
||||
"ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF",
|
||||
"eyJmb28iOiJiYXIifQ",
|
||||
"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.*",
|
||||
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
|
||||
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
|
||||
'sessionToken',
|
||||
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
"encrypter" = "encrypter"
|
||||
"kms" = "kms"
|
||||
"requestor" = "requestor"
|
||||
|
||||
[default.extend-identifiers]
|
||||
"bui" = "bui"
|
||||
"toi" = "toi"
|
||||
"ot" = "ot"
|
||||
"dm2nd" = "dm2nd"
|
||||
"HashiCorp" = "HashiCorp"
|
||||
|
||||
[type.go.extend-identifiers]
|
||||
"bui" = "bui"
|
||||
"dm2nd" = "dm2nd"
|
||||
"ot" = "ot"
|
||||
"ParseND" = "ParseND"
|
||||
"ParseNDStream" = "ParseNDStream"
|
||||
"pn" = "pn"
|
||||
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
|
||||
"thr" = "thr"
|
||||
"toi" = "toi"
|
||||
|
|
|
@ -21,6 +21,11 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
|
|||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
|||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
|
|
@ -21,6 +21,11 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
|
|||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
|||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
|
|
@ -16,6 +16,11 @@ RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archiv
|
|||
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
|
||||
chmod +x /go/bin/minio
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
||||
|
@ -41,6 +46,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
|||
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
|
|
@ -21,6 +21,11 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
|
|||
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
|
||||
chmod +x /go/bin/mc
|
||||
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
|
||||
chmod +x /go/bin/curl; \
|
||||
fi
|
||||
|
||||
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
|
||||
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
|
||||
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
|
||||
|
@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
|
|||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /go/bin/minio /usr/bin/minio
|
||||
COPY --from=build /go/bin/mc /usr/bin/mc
|
||||
COPY --from=build /go/bin/cur* /usr/bin/
|
||||
|
||||
COPY CREDITS /licenses/CREDITS
|
||||
COPY LICENSE /licenses/LICENSE
|
||||
|
|
49
Makefile
49
Makefile
|
@ -45,7 +45,7 @@ lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
|
|||
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
|
||||
|
||||
check: test
|
||||
test: verifiers build build-debugging ## builds minio, runs linters, tests
|
||||
test: verifiers build ## builds minio, runs linters, tests
|
||||
@echo "Running unit tests"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue ./...
|
||||
|
||||
|
@ -53,12 +53,21 @@ test-root-disable: install-race
|
|||
@echo "Running minio root lockdown tests"
|
||||
@env bash $(PWD)/buildscripts/disable-root.sh
|
||||
|
||||
test-ilm: install-race
|
||||
@echo "Running ILM tests"
|
||||
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
|
||||
|
||||
test-pbac: install-race
|
||||
@echo "Running bucket policies tests"
|
||||
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
|
||||
|
||||
test-decom: install-race
|
||||
@echo "Running minio decom tests"
|
||||
@env bash $(PWD)/docs/distributed/decom.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
|
||||
@env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh
|
||||
|
||||
test-versioning: install-race
|
||||
@echo "Running minio versioning tests"
|
||||
|
@ -81,6 +90,10 @@ test-iam: build ## verify IAM (external IDP, etcd backends)
|
|||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
|
||||
|
||||
test-iam-ldap-upgrade-import: build ## verify IAM (external LDAP IDP)
|
||||
@echo "Running upgrade tests for IAM (LDAP backend)"
|
||||
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
|
||||
|
||||
test-sio-error:
|
||||
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
|
||||
|
||||
|
@ -107,37 +120,39 @@ test-site-replication-oidc: install-race ## verify automatic site replication
|
|||
test-site-replication-minio: install-race ## verify automatic site replication
|
||||
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
|
||||
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
|
||||
@echo "Running tests for automatic site replication of SSE-C objects"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication.sh)
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with SSE-KMS enabled for bucket"
|
||||
@(env bash $(PWD)/docs/site-replication/run-sse-kms-object-replication.sh)
|
||||
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
|
||||
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
|
||||
|
||||
verify: ## verify minio various setups
|
||||
verify: install-race ## verify minio various setups
|
||||
@echo "Verifying build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
verify-healing: ## verify healing and replacing disks with minio binary
|
||||
verify-healing: install-race ## verify healing and replacing disks with minio binary
|
||||
@echo "Verify healing build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing.sh)
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
|
||||
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
|
||||
|
||||
verify-healing-with-root-disks: ## verify healing root disks
|
||||
verify-healing-with-root-disks: install-race ## verify healing root disks
|
||||
@echo "Verify healing with root drives"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
|
||||
|
||||
verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta
|
||||
@echo "Verify healing with rewrite"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
|
||||
|
||||
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
|
||||
verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions
|
||||
@echo "Verify resolving inconsistent versions build with race"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
|
||||
|
||||
build-debugging:
|
||||
@(env bash $(PWD)/docs/debugging/build.sh)
|
||||
|
||||
build: checks ## builds minio to $(PWD)
|
||||
build: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary to './minio'"
|
||||
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
|
||||
|
@ -147,9 +162,9 @@ hotfix-vars:
|
|||
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
|
||||
|
||||
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.1/pkger_2.2.1_linux_amd64.deb
|
||||
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.9/pkger_2.2.9_linux_amd64.deb
|
||||
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service
|
||||
@sudo apt install ./pkger_2.2.1_linux_amd64.deb --yes
|
||||
@sudo apt install ./pkger_2.2.9_linux_amd64.deb --yes
|
||||
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
|
||||
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
|
||||
|
@ -176,15 +191,15 @@ docker: build ## builds minio docker container
|
|||
@echo "Building minio docker image '$(TAG)'"
|
||||
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
|
||||
|
||||
install-race: checks ## builds minio to $(PWD)
|
||||
install-race: checks build-debugging ## builds minio to $(PWD)
|
||||
@echo "Building minio binary with -race to './minio'"
|
||||
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
|
||||
@echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
|
||||
install: build ## builds minio and installs it to $GOPATH/bin.
|
||||
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
|
||||
@echo "Installation successful. To learn more, try \"minio --help\"."
|
||||
|
||||
clean: ## cleanup all generated assets
|
||||
|
|
|
@ -210,10 +210,6 @@ For deployments behind a load balancer, proxy, or ingress rule where the MinIO h
|
|||
|
||||
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
|
||||
|
||||
Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.
|
||||
|
||||
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
|
||||
|
||||
| Dashboard | Creating a bucket |
|
||||
| ------------- | ------------- |
|
||||
| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) |
|
||||
|
|
|
@ -32,6 +32,7 @@ fi
|
|||
set +e
|
||||
|
||||
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
|
||||
./mc ready minioadm
|
||||
|
||||
./mc ls minioadm/
|
||||
|
||||
|
@ -56,7 +57,7 @@ done
|
|||
|
||||
set +e
|
||||
|
||||
sleep 10
|
||||
./mc ready minioadm/
|
||||
|
||||
./mc ls minioadm/
|
||||
if [ $? -ne 0 ]; then
|
||||
|
@ -81,11 +82,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
|||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin replicate add sitea siteb
|
||||
|
||||
./mc admin user add sitea foobar foo12345
|
||||
|
@ -109,11 +111,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
|
|||
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
|
||||
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
|
||||
|
||||
sleep 20s
|
||||
|
||||
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
|
||||
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
|
||||
|
||||
./mc ready sitea
|
||||
./mc ready siteb
|
||||
|
||||
./mc admin user add sitea foobar-admin foo12345
|
||||
|
||||
sleep 2s
|
||||
|
|
|
@ -0,0 +1,127 @@
|
|||
#!/bin/bash
|
||||
|
||||
# This script is used to test the migration of IAM content from old minio
|
||||
# instance to new minio instance.
|
||||
#
|
||||
# To run it locally, start the LDAP server in github.com/minio/minio-iam-testing
|
||||
# repo (e.g. make podman-run), and then run this script.
|
||||
#
|
||||
# This script assumes that LDAP server is at:
|
||||
#
|
||||
# `localhost:1389`
|
||||
#
|
||||
# if this is not the case, set the environment variable
|
||||
# `_MINIO_LDAP_TEST_SERVER`.
|
||||
|
||||
OLD_VERSION=RELEASE.2024-03-26T22-10-45Z
|
||||
OLD_BINARY_LINK=https://dl.min.io/server/minio/release/linux-amd64/archive/minio.${OLD_VERSION}
|
||||
|
||||
__init__() {
|
||||
if which curl &>/dev/null; then
|
||||
echo "curl is already installed"
|
||||
else
|
||||
echo "Installing curl:"
|
||||
sudo apt install curl -y
|
||||
fi
|
||||
|
||||
export GOPATH=/tmp/gopath
|
||||
export PATH="${PATH}":"${GOPATH}"/bin
|
||||
|
||||
if which mc &>/dev/null; then
|
||||
echo "mc is already installed"
|
||||
else
|
||||
echo "Installing mc:"
|
||||
go install github.com/minio/mc@latest
|
||||
fi
|
||||
|
||||
if [ ! -x ./minio.${OLD_VERSION} ]; then
|
||||
echo "Downloading minio.${OLD_VERSION} binary"
|
||||
curl -o minio.${OLD_VERSION} ${OLD_BINARY_LINK}
|
||||
chmod +x minio.${OLD_VERSION}
|
||||
fi
|
||||
|
||||
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
|
||||
export _MINIO_LDAP_TEST_SERVER=localhost:1389
|
||||
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
|
||||
fi
|
||||
|
||||
rm -rf /tmp/data
|
||||
}
|
||||
|
||||
create_iam_content_in_old_minio() {
|
||||
echo "Creating IAM content in old minio instance."
|
||||
|
||||
MINIO_CI_CD=1 ./minio.${OLD_VERSION} server /tmp/data/{1...4} &
|
||||
sleep 5
|
||||
|
||||
set -x
|
||||
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
|
||||
mc ready old-minio
|
||||
mc idp ldap add old-minio \
|
||||
server_addr=localhost:1389 \
|
||||
server_insecure=on \
|
||||
lookup_bind_dn=cn=admin,dc=min,dc=io \
|
||||
lookup_bind_password=admin \
|
||||
user_dn_search_base_dn=dc=min,dc=io \
|
||||
user_dn_search_filter="(uid=%s)" \
|
||||
group_search_base_dn=ou=swengg,dc=min,dc=io \
|
||||
group_search_filter="(&(objectclass=groupOfNames)(member=%d))"
|
||||
mc admin service restart old-minio
|
||||
|
||||
mc idp ldap policy attach old-minio readwrite --user=UID=dillon,ou=people,ou=swengg,dc=min,dc=io
|
||||
mc idp ldap policy attach old-minio readwrite --group=CN=project.c,ou=groups,ou=swengg,dc=min,dc=io
|
||||
|
||||
mc idp ldap policy entities old-minio
|
||||
|
||||
mc admin cluster iam export old-minio
|
||||
set +x
|
||||
|
||||
mc admin service stop old-minio
|
||||
}
|
||||
|
||||
import_iam_content_in_new_minio() {
|
||||
echo "Importing IAM content in new minio instance."
|
||||
# Assume current minio binary exists.
|
||||
MINIO_CI_CD=1 ./minio server /tmp/data/{1...4} &
|
||||
sleep 5
|
||||
|
||||
set -x
|
||||
mc alias set new-minio http://localhost:9000 minioadmin minioadmin
|
||||
echo "BEFORE IMPORT mappings:"
|
||||
mc ready new-minio
|
||||
mc idp ldap policy entities new-minio
|
||||
mc admin cluster iam import new-minio ./old-minio-iam-info.zip
|
||||
echo "AFTER IMPORT mappings:"
|
||||
mc idp ldap policy entities new-minio
|
||||
set +x
|
||||
|
||||
# mc admin service stop new-minio
|
||||
}
|
||||
|
||||
verify_iam_content_in_new_minio() {
|
||||
output=$(mc idp ldap policy entities new-minio --json)
|
||||
|
||||
groups=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .groups[]')
|
||||
if [ "$groups" != "cn=project.c,ou=groups,ou=swengg,dc=min,dc=io" ]; then
|
||||
echo "Failed to verify groups: $groups"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
users=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .users[]')
|
||||
if [ "$users" != "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" ]; then
|
||||
echo "Failed to verify users: $users"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
mc admin service stop new-minio
|
||||
}
|
||||
|
||||
main() {
|
||||
create_iam_content_in_old_minio
|
||||
|
||||
import_iam_content_in_new_minio
|
||||
|
||||
verify_iam_content_in_new_minio
|
||||
}
|
||||
|
||||
(__init__ "$@" && main "$@")
|
|
@ -55,6 +55,11 @@ __init__() {
|
|||
|
||||
go install github.com/minio/mc@latest
|
||||
|
||||
## this is needed because github actions don't have
|
||||
## docker-compose on all runners
|
||||
go install github.com/docker/compose/v2/cmd@latest
|
||||
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
|
||||
|
||||
TAG=minio/minio:dev make docker
|
||||
|
||||
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \
|
||||
|
|
|
@ -45,7 +45,8 @@ function verify_rewrite() {
|
|||
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
|
@ -77,7 +78,8 @@ function verify_rewrite() {
|
|||
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
|
||||
pid=$!
|
||||
disown $pid
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready minio/
|
||||
|
||||
if ! ps -p ${pid} 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
|
@ -87,17 +89,12 @@ function verify_rewrite() {
|
|||
exit 1
|
||||
fi
|
||||
|
||||
(
|
||||
cd ./docs/debugging/s3-check-md5
|
||||
go install -v
|
||||
)
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
-secret-key minio123 \
|
||||
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
|
||||
-endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/server1.log"
|
||||
echo "FAILED"
|
||||
|
@ -117,7 +114,7 @@ function verify_rewrite() {
|
|||
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
|
||||
sleep 1
|
||||
|
||||
if ! s3-check-md5 \
|
||||
if ! ./s3-check-md5 \
|
||||
-debug \
|
||||
-versions \
|
||||
-access-key minio \
|
||||
|
|
|
@ -15,13 +15,14 @@ WORK_DIR="$PWD/.verify-$RANDOM"
|
|||
export MINT_MODE=core
|
||||
export MINT_DATA_DIR="$WORK_DIR/data"
|
||||
export SERVER_ENDPOINT="127.0.0.1:9000"
|
||||
export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/"
|
||||
export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/"
|
||||
export ACCESS_KEY="minio"
|
||||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOGC=25
|
||||
export ENABLE_ADMIN=1
|
||||
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
|
@ -36,18 +37,21 @@ function start_minio_fs() {
|
|||
export MINIO_ROOT_USER=$ACCESS_KEY
|
||||
export MINIO_ROOT_PASSWORD=$SECRET_KEY
|
||||
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
|
||||
sleep 10
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure() {
|
||||
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_erasure_sets() {
|
||||
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
|
||||
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
|
||||
sleep 15
|
||||
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets() {
|
||||
|
@ -57,7 +61,7 @@ function start_minio_pool_erasure_sets() {
|
|||
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function start_minio_pool_erasure_sets_ipv6() {
|
||||
|
@ -67,7 +71,7 @@ function start_minio_pool_erasure_sets_ipv6() {
|
|||
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
|
||||
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify_ipv6
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure() {
|
||||
|
@ -78,7 +82,7 @@ function start_minio_dist_erasure() {
|
|||
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
|
||||
done
|
||||
|
||||
sleep 40
|
||||
"${WORK_DIR}/mc" ready verify
|
||||
}
|
||||
|
||||
function run_test_fs() {
|
||||
|
@ -222,7 +226,7 @@ function __init__() {
|
|||
exit 1
|
||||
fi
|
||||
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
|
||||
(cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc")
|
||||
|
||||
# remove mc source.
|
||||
purge "${MC_BUILD_DIR}"
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
#!/bin/bash -e
|
||||
#
|
||||
|
||||
set -E
|
||||
set -o pipefail
|
||||
|
||||
if [ ! -x "$PWD/minio" ]; then
|
||||
echo "minio executable binary not found in current directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
|
||||
function start_minio_3_node() {
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
pid1=$!
|
||||
disown ${pid1}
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
|
||||
pid2=$!
|
||||
disown $pid2
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
|
||||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
|
||||
/tmp/mc ready myminio
|
||||
|
||||
# Wait for all drives to be online and formatted
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
# Wait for all drives to be healed
|
||||
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
|
||||
|
||||
# Wait for Status: in MinIO output
|
||||
while true; do
|
||||
rv=$(check_online)
|
||||
if [ "$rv" != "1" ]; then
|
||||
# success
|
||||
break
|
||||
fi
|
||||
|
||||
# Check if we should retry
|
||||
retry=$((retry + 1))
|
||||
if [ $retry -le 20 ]; then
|
||||
sleep 5
|
||||
continue
|
||||
fi
|
||||
|
||||
# Failure
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
pkill -9 minio
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
done
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server1.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid2 1>&2 >/dev/null; then
|
||||
echo "server2 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server2.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! ps -p $pid3 1>&2 >/dev/null; then
|
||||
echo "server3 log:"
|
||||
cat "${WORK_DIR}/dist-minio-server3.log"
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! pkill minio; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
cat "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
if pgrep minio; then
|
||||
# forcibly killing, to proceed further properly.
|
||||
if ! pkill -9 minio; then
|
||||
echo "no minio process running anymore, proceed."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
fi
|
||||
}
|
||||
|
||||
function purge() {
|
||||
echo rm -rf "$1"
|
||||
}
|
||||
|
||||
function __init__() {
|
||||
echo "Initializing environment"
|
||||
mkdir -p "$WORK_DIR"
|
||||
mkdir -p "$MINIO_CONFIG_DIR"
|
||||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node $2
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node $2
|
||||
}
|
||||
|
||||
function main() {
|
||||
# use same ports for all tests
|
||||
start_port=$(shuf -i 10000-65000 -n 1)
|
||||
|
||||
perform_test "2" ${start_port}
|
||||
perform_test "1" ${start_port}
|
||||
perform_test "3" ${start_port}
|
||||
}
|
||||
|
||||
(__init__ "$@" && main "$@")
|
||||
rv=$?
|
||||
purge "$WORK_DIR"
|
||||
exit "$rv"
|
|
@ -12,17 +12,26 @@ fi
|
|||
WORK_DIR="$PWD/.verify-$RANDOM"
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
|
||||
GOPATH=/tmp/gopath
|
||||
|
||||
function start_minio_3_node() {
|
||||
for i in $(seq 1 3); do
|
||||
rm "${WORK_DIR}/dist-minio-server$i.log"
|
||||
done
|
||||
|
||||
export MINIO_ROOT_USER=minio
|
||||
export MINIO_ROOT_PASSWORD=minio123
|
||||
export MINIO_ERASURE_SET_DRIVE_COUNT=6
|
||||
export MINIO_CI_CD=1
|
||||
|
||||
start_port=$2
|
||||
first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l)
|
||||
|
||||
start_port=$1
|
||||
args=""
|
||||
for i in $(seq 1 3); do
|
||||
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
|
||||
for d in $(seq 1 3 5); do
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
d=$((d + 1))
|
||||
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
|
||||
done
|
||||
|
||||
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
|
||||
|
@ -37,7 +46,11 @@ function start_minio_3_node() {
|
|||
pid3=$!
|
||||
disown $pid3
|
||||
|
||||
sleep "$1"
|
||||
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
|
||||
/tmp/mc ready myminio
|
||||
|
||||
[ ${first_time} -eq 0 ] && upload_objects
|
||||
[ ${first_time} -ne 0 ] && sleep 120
|
||||
|
||||
if ! ps -p $pid1 1>&2 >/dev/null; then
|
||||
echo "server1 log:"
|
||||
|
@ -82,10 +95,23 @@ function start_minio_3_node() {
|
|||
fi
|
||||
}
|
||||
|
||||
function check_online() {
|
||||
function check_heal() {
|
||||
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
|
||||
echo "1"
|
||||
return 1
|
||||
fi
|
||||
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
test -f ${WORK_DIR}/$1/1/.minio.sys/format.json
|
||||
v1=$?
|
||||
nextInES=$(($1 + 1)) && [ $nextInES -gt 3 ] && nextInES=1
|
||||
foundFiles1=$(find ${WORK_DIR}/$1/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
|
||||
foundFiles2=$(find ${WORK_DIR}/$nextInES/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
|
||||
test $foundFiles1 -eq $foundFiles2
|
||||
v2=$?
|
||||
[ $v1 == 0 -a $v2 == 0 ] && return 0
|
||||
sleep 10
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
function purge() {
|
||||
|
@ -99,20 +125,35 @@ function __init__() {
|
|||
|
||||
## version is purposefully set to '3' for minio to migrate configuration file
|
||||
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
|
||||
|
||||
if [ ! -f /tmp/mc ]; then
|
||||
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x /tmp/mc
|
||||
fi
|
||||
}
|
||||
|
||||
function upload_objects() {
|
||||
/tmp/mc mb myminio/testbucket/
|
||||
for ((i = 0; i < 20; i++)); do
|
||||
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
|
||||
done
|
||||
}
|
||||
|
||||
function perform_test() {
|
||||
start_minio_3_node 120 $2
|
||||
start_port=$2
|
||||
|
||||
start_minio_3_node $start_port
|
||||
|
||||
echo "Testing Distributed Erasure setup healing of drives"
|
||||
echo "Remove the contents of the disks belonging to '${1}' erasure set"
|
||||
echo "Remove the contents of the disks belonging to '${1}' node"
|
||||
|
||||
rm -rf ${WORK_DIR}/${1}/*/
|
||||
|
||||
set -x
|
||||
start_minio_3_node 120 $2
|
||||
start_minio_3_node $start_port
|
||||
|
||||
rv=$(check_online)
|
||||
check_heal ${1}
|
||||
rv=$?
|
||||
if [ "$rv" == "1" ]; then
|
||||
for i in $(seq 1 3); do
|
||||
echo "server$i log:"
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy access control
|
||||
|
|
|
@ -39,9 +39,8 @@ import (
|
|||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -99,7 +98,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
|
|||
}
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
|
@ -431,7 +430,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
|||
case bucketNotificationConfig:
|
||||
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -447,7 +446,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
|
|||
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
|
||||
continue
|
||||
}
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -736,7 +735,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
|||
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
if rcfg, _ := getReplicationConfig(ctx, bucket); rcfg != nil && v.Suspended() {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
|
||||
continue
|
||||
}
|
||||
|
@ -784,7 +783,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
|||
}
|
||||
switch fileName {
|
||||
case bucketNotificationConfig:
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
|
||||
continue
|
||||
|
@ -838,9 +837,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
|||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
rpt.SetStatus(bucket, fileName, err)
|
||||
continue
|
||||
}
|
||||
|
@ -875,8 +878,10 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
|
|||
}
|
||||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: kmsKey,
|
||||
AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// validateAdminReq will validate request against and return whether it is allowed.
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
"github.com/minio/minio/internal/config/subnet"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
|
||||
|
@ -58,7 +58,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -162,7 +162,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -443,7 +443,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
|||
password := cred.SecretKey
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -31,10 +31,9 @@ import (
|
|||
"github.com/minio/minio/internal/config"
|
||||
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
|
||||
"github.com/minio/minio/internal/config/identity/openid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/ldap"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
|
||||
|
@ -60,7 +59,7 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
|
|||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -20,16 +20,15 @@ package cmd
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
|
||||
|
@ -105,6 +104,12 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
|||
return
|
||||
}
|
||||
|
||||
// fail if ldap is not enabled
|
||||
if !globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
|
||||
// More than maxConfigSize bytes were available
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
|
||||
|
@ -132,7 +137,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
|
|||
password := cred.SecretKey
|
||||
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -192,7 +197,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
|||
|
||||
// fail if ldap is not enabled
|
||||
if !globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errors.New("LDAP not enabled")), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Find the user for the request sender (as it may be sent via a service
|
||||
|
@ -217,9 +223,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
|||
err error
|
||||
)
|
||||
|
||||
// If we are creating svc account for request sender, ensure
|
||||
// that targetUser is a real user (i.e. not derived
|
||||
// credentials).
|
||||
// If we are creating svc account for request sender, ensure that targetUser
|
||||
// is a real user (i.e. not derived credentials).
|
||||
if isSvcAccForRequestor {
|
||||
if requestorIsDerivedCredential {
|
||||
if requestorParentUser == "" {
|
||||
|
@ -232,12 +237,12 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
|||
targetGroups = requestorGroups
|
||||
|
||||
// Deny if the target user is not LDAP
|
||||
isLDAP, err := globalIAMSys.LDAPConfig.DoesUsernameExist(targetUser)
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if isLDAP == "" {
|
||||
if foundResult == nil {
|
||||
err := errors.New("Specified user does not exist on LDAP server")
|
||||
APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err)
|
||||
writeErrorResponseJSON(ctx, w, APIErr, r.URL)
|
||||
|
@ -253,20 +258,36 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
|||
opts.claims[k] = v
|
||||
}
|
||||
} else {
|
||||
isDN := globalIAMSys.LDAPConfig.IsLDAPUserDN(targetUser)
|
||||
// We still need to ensure that the target user is a valid LDAP user.
|
||||
//
|
||||
// The target user may be supplied as a (short) username or a DN.
|
||||
// However, for now, we only support using the short username.
|
||||
|
||||
isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser)
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
// if not found, check if DN
|
||||
if strings.Contains(err.Error(), "not found") && isDN {
|
||||
// warn user that DNs are not allowed
|
||||
err = fmt.Errorf("Must use short username to add service account. %w", err)
|
||||
if strings.Contains(err.Error(), "User DN not found for:") {
|
||||
if isDN {
|
||||
// warn user that DNs are not allowed
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminLDAPExpectedLoginName, err), r.URL)
|
||||
} else {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
|
||||
}
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
}
|
||||
|
||||
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
|
||||
|
@ -300,7 +321,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
|
|||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
|
@ -373,14 +394,16 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
}
|
||||
|
||||
targetAccount, err := globalIAMSys.LDAPConfig.DoesUsernameExist(userDN)
|
||||
dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
} else if userDN == "" {
|
||||
}
|
||||
if dnResult == nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL)
|
||||
return
|
||||
}
|
||||
targetAccount := dnResult.NormDN
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
if listType != "sts-only" && listType != "svcacc-only" && listType != "" {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
|
@ -18,6 +18,7 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -25,9 +26,9 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -106,21 +107,12 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
|
|||
poolIndices = append(poolIndices, idx)
|
||||
}
|
||||
|
||||
if len(poolIndices) > 0 && !globalEndpoints[poolIndices[0]].Endpoints[0].IsLocal {
|
||||
ep := globalEndpoints[poolIndices[0]].Endpoints[0]
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if len(poolIndices) == 0 || !proxyDecommissionRequest(ctx, globalEndpoints[poolIndices[0]].Endpoints[0], w, r) {
|
||||
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -162,20 +154,12 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
|
||||
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == ep.Host {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return
|
||||
}
|
||||
}
|
||||
if !proxyDecommissionRequest(ctx, globalEndpoints[idx].Endpoints[0], w, r) {
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err := pools.DecommissionCancel(ctx, idx); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -225,7 +209,7 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(&status))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -258,7 +242,7 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
|
|||
poolsStatus[idx] = status
|
||||
}
|
||||
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -365,11 +349,11 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
|
|||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs))
|
||||
adminLogIf(r.Context(), json.NewEncoder(w).Encode(rs))
|
||||
}
|
||||
|
||||
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -389,5 +373,20 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
|
|||
// Cancel any ongoing rebalance operation
|
||||
globalNotificationSys.StopRebalance(r.Context())
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
}
|
||||
|
||||
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
|
||||
host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host)
|
||||
if host == "" {
|
||||
return
|
||||
}
|
||||
for nodeIdx, proxyEp := range globalProxyEndpoints {
|
||||
if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal {
|
||||
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
@ -32,9 +32,8 @@ import (
|
|||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
|
||||
|
@ -55,7 +54,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
|
|||
opts := getSRAddOptions(r)
|
||||
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -93,7 +92,7 @@ func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -140,7 +139,7 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
|
|||
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -192,7 +191,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
|
|||
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -263,7 +262,7 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
|
|||
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
|
||||
}
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -316,7 +315,6 @@ func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptio
|
|||
if encryptionKey != "" {
|
||||
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return SRError{
|
||||
Cause: err,
|
||||
Code: ErrSiteReplicationInvalidRequest,
|
||||
|
@ -349,6 +347,18 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
|
|||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
// Report the ILMExpiryStats only if at least one site has replication of ILM expiry enabled
|
||||
var replicateILMExpiry bool
|
||||
for _, site := range info.Sites {
|
||||
if site.ReplicateILMExpiry {
|
||||
replicateILMExpiry = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !replicateILMExpiry {
|
||||
// explicitly send nil for ILMExpiryStats
|
||||
info.ILMExpiryStats = nil
|
||||
}
|
||||
|
||||
if err = json.NewEncoder(w).Encode(info); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
|
@ -396,7 +406,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
|
|||
opts := getSREditOptions(r)
|
||||
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -433,7 +443,7 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -456,7 +466,7 @@ func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -493,7 +503,7 @@ func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.R
|
|||
}
|
||||
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -524,7 +534,7 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -586,7 +596,7 @@ func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http.
|
|||
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
|
||||
// would mean the network is not stable. Logging here will help in debugging network issues.
|
||||
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
|
@ -609,5 +619,5 @@ func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http.
|
|||
duration = globalNetPerfMinDuration
|
||||
}
|
||||
result := siteNetperf(r.Context(), duration)
|
||||
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
|
||||
adminLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
minio "github.com/minio/minio-go/v7"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {
|
||||
|
|
|
@ -29,15 +29,17 @@ import (
|
|||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/klauspost/compress/zip"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/cachevalue"
|
||||
"github.com/minio/minio/internal/config/dns"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
xldap "github.com/minio/pkg/v3/ldap"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/puzpuzpuz/xsync/v3"
|
||||
)
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
|
||||
|
@ -74,7 +76,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
|
@ -271,14 +273,21 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
}
|
||||
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
// We don't allow internal group manipulation in this API when LDAP
|
||||
// is enabled for now.
|
||||
err = errIAMActionNotAllowed
|
||||
} else {
|
||||
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: updReq,
|
||||
|
@ -368,7 +377,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemGroupInfo,
|
||||
GroupInfo: &madmin.SRGroupInfo{
|
||||
UpdateReq: madmin.GroupAddRemove{
|
||||
|
@ -406,7 +415,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
|
@ -466,6 +475,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
if !utf8.ValidString(accessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserValidUTF), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
checkDenyOnly := false
|
||||
if accessKey == cred.AccessKey {
|
||||
// Check that there is no explicit deny - otherwise it's allowed
|
||||
|
@ -495,25 +509,31 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
|||
password := cred.SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var ureq madmin.AddOrUpdateUserReq
|
||||
if err = json.Unmarshal(configBytes, &ureq); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// We don't allow internal user creation with LDAP enabled for now.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
updatedAt, err := globalIAMSys.CreateUser(ctx, accessKey, ureq)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemIAMUser,
|
||||
IAMUser: &madmin.SRIAMUser{
|
||||
AccessKey: accessKey,
|
||||
|
@ -687,12 +707,20 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
|||
// In case of LDAP we need to resolve the targetUser to a DN and
|
||||
// query their groups:
|
||||
opts.claims[ldapUserN] = targetUser // simple username
|
||||
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
var lookupResult *xldap.DNSearchResult
|
||||
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
targetUser = lookupResult.NormDN
|
||||
opts.claims[ldapUser] = targetUser // username DN
|
||||
opts.claims[ldapActualUser] = lookupResult.ActualDN
|
||||
|
||||
// Add LDAP attributes that were looked up into the claims.
|
||||
for attribKey, attribValue := range lookupResult.Attributes {
|
||||
opts.claims[ldapAttribPrefix+attribKey] = attribValue
|
||||
}
|
||||
|
||||
// NOTE: if not using LDAP, then internal IDP or open ID is
|
||||
// being used - in the former, group info is enforced when
|
||||
|
@ -731,7 +759,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
|
|||
// Call hook for cluster-replication if the service account is not for a
|
||||
// root user.
|
||||
if newCred.ParentUser != globalActiveCred.AccessKey {
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Create: &madmin.SRSvcAccCreate{
|
||||
|
@ -802,7 +830,11 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
|||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
addExpirationToCondValues(updateReq.NewExpiration, condValues)
|
||||
err = addExpirationToCondValues(updateReq.NewExpiration, condValues)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Permission checks:
|
||||
//
|
||||
|
@ -853,7 +885,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
|
|||
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Update: &madmin.SRSvcAccUpdate{
|
||||
|
@ -1026,8 +1058,13 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
|
|||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
Description: svc.Description,
|
||||
ParentUser: svc.ParentUser,
|
||||
Name: svc.Name,
|
||||
AccountStatus: svc.Status,
|
||||
AccessKey: svc.AccessKey,
|
||||
ImpliedPolicy: svc.IsImpliedPolicy(),
|
||||
Expiration: &expiryTime,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1115,7 +1152,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
|
|||
|
||||
// Call site replication hook - non-root user accounts are replicated.
|
||||
if svcAccount.ParentUser != "" && svcAccount.ParentUser != globalActiveCred.AccessKey {
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemSvcAcc,
|
||||
SvcAccChange: &madmin.SRSvcAccChange{
|
||||
Delete: &madmin.SRSvcAccDelete{
|
||||
|
@ -1199,9 +1236,9 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func() (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
cachevalue.Opts{ReturnLastGood: true},
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objectAPI)
|
||||
|
@ -1273,7 +1310,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
|
|||
default:
|
||||
policies, err := globalIAMSys.PolicyDBGet(accountName, cred.Groups...)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -1425,7 +1462,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
|
|||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
newPolicies[name] = p
|
||||
|
@ -1455,7 +1492,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
|||
for name, p := range policies {
|
||||
_, err = json.Marshal(p)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
newPolicies[name] = p
|
||||
|
@ -1485,7 +1522,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
// Call cluster-replication policy creation hook to replicate policy deletion to
|
||||
// other minio clusters.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicy,
|
||||
Name: policyName,
|
||||
UpdatedAt: UTCNow(),
|
||||
|
@ -1548,7 +1585,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
|||
|
||||
// Call cluster-replication policy creation hook to replicate policy to
|
||||
// other minio clusters.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicy,
|
||||
Name: policyName,
|
||||
Policy: iamPolicyBytes,
|
||||
|
@ -1556,7 +1593,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
|||
}))
|
||||
}
|
||||
|
||||
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
// SetPolicyForUserOrGroup - sets a policy on a user or a group.
|
||||
//
|
||||
// PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
|
||||
//
|
||||
// Deprecated: This API is replaced by attach/detach policy APIs for specific
|
||||
// type of users (builtin or LDAP).
|
||||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
|
@ -1608,6 +1650,32 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
|||
userType := regUser
|
||||
if globalIAMSys.GetUsersSysType() == LDAPUsersSysType {
|
||||
userType = stsUser
|
||||
|
||||
// Validate that the user or group exists in LDAP and use the normalized
|
||||
// form of the entityName (which will be an LDAP DN).
|
||||
var err error
|
||||
if isGroup {
|
||||
var foundGroupDN *xldap.DNSearchResult
|
||||
var underBaseDN bool
|
||||
if foundGroupDN, underBaseDN, err = globalIAMSys.LDAPConfig.GetValidatedGroupDN(nil, entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundGroupDN == nil || !underBaseDN {
|
||||
err = errNoSuchGroup
|
||||
}
|
||||
entityName = foundGroupDN.NormDN
|
||||
} else {
|
||||
var foundUserDN *xldap.DNSearchResult
|
||||
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
|
||||
iamLogIf(ctx, err)
|
||||
} else if foundUserDN == nil {
|
||||
err = errNoSuchUser
|
||||
}
|
||||
entityName = foundUserDN.NormDN
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
updatedAt, err := globalIAMSys.PolicyDBSet(ctx, entityName, policyName, userType, isGroup)
|
||||
|
@ -1616,7 +1684,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
|
||||
Type: madmin.SRIAMItemPolicyMapping,
|
||||
PolicyMapping: &madmin.SRPolicyMapping{
|
||||
UserOrGroup: entityName,
|
||||
|
@ -1756,17 +1824,27 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht
|
|||
}
|
||||
|
||||
const (
|
||||
allPoliciesFile = "policies.json"
|
||||
allUsersFile = "users.json"
|
||||
allGroupsFile = "groups.json"
|
||||
allSvcAcctsFile = "svcaccts.json"
|
||||
userPolicyMappingsFile = "user_mappings.json"
|
||||
groupPolicyMappingsFile = "group_mappings.json"
|
||||
stsUserPolicyMappingsFile = "stsuser_mappings.json"
|
||||
stsGroupPolicyMappingsFile = "stsgroup_mappings.json"
|
||||
iamAssetsDir = "iam-assets"
|
||||
allPoliciesFile = "policies.json"
|
||||
allUsersFile = "users.json"
|
||||
allGroupsFile = "groups.json"
|
||||
allSvcAcctsFile = "svcaccts.json"
|
||||
userPolicyMappingsFile = "user_mappings.json"
|
||||
groupPolicyMappingsFile = "group_mappings.json"
|
||||
stsUserPolicyMappingsFile = "stsuser_mappings.json"
|
||||
|
||||
iamAssetsDir = "iam-assets"
|
||||
)
|
||||
|
||||
var iamExportFiles = []string{
|
||||
allPoliciesFile,
|
||||
allUsersFile,
|
||||
allGroupsFile,
|
||||
allSvcAcctsFile,
|
||||
userPolicyMappingsFile,
|
||||
groupPolicyMappingsFile,
|
||||
stsUserPolicyMappingsFile,
|
||||
}
|
||||
|
||||
// ExportIAMHandler - exports all iam info as a zipped file
|
||||
func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
@ -1790,38 +1868,28 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
sys: nil,
|
||||
})
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
adminLogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
header.Method = zip.Deflate
|
||||
zwriter, zerr := zipWriter.CreateHeader(header)
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
adminLogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
if _, err := io.Copy(zwriter, r); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
iamFiles := []string{
|
||||
allPoliciesFile,
|
||||
allUsersFile,
|
||||
allGroupsFile,
|
||||
allSvcAcctsFile,
|
||||
userPolicyMappingsFile,
|
||||
groupPolicyMappingsFile,
|
||||
stsUserPolicyMappingsFile,
|
||||
stsGroupPolicyMappingsFile,
|
||||
}
|
||||
for _, f := range iamFiles {
|
||||
for _, f := range iamExportFiles {
|
||||
iamFile := pathJoin(iamAssetsDir, f)
|
||||
switch f {
|
||||
case allPoliciesFile:
|
||||
allPolicies, err := globalIAMSys.ListPolicies(ctx, "")
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -1900,7 +1968,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
|
||||
sa, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
|
@ -1922,6 +1990,9 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
Claims: claims,
|
||||
SessionPolicy: json.RawMessage(policyJSON),
|
||||
Status: acc.Credentials.Status,
|
||||
Name: sa.Name,
|
||||
Description: sa.Description,
|
||||
Expiration: &sa.Expiration,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1936,13 +2007,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
case userPolicyMappingsFile:
|
||||
userPolicyMap := make(map[string]MappedPolicy)
|
||||
userPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
|
||||
err := globalIAMSys.store.loadMappedPolicies(ctx, regUser, false, userPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
userPolData, err := json.Marshal(userPolicyMap)
|
||||
userPolData, err := json.Marshal(mappedPoliciesToMap(userPolicyMap))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
|
@ -1953,13 +2024,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
case groupPolicyMappingsFile:
|
||||
groupPolicyMap := make(map[string]MappedPolicy)
|
||||
groupPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
|
||||
err := globalIAMSys.store.loadMappedPolicies(ctx, regUser, true, groupPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
grpPolData, err := json.Marshal(groupPolicyMap)
|
||||
grpPolData, err := json.Marshal(mappedPoliciesToMap(groupPolicyMap))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
|
@ -1970,13 +2041,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
case stsUserPolicyMappingsFile:
|
||||
userPolicyMap := make(map[string]MappedPolicy)
|
||||
userPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
|
||||
err := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, userPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
userPolData, err := json.Marshal(userPolicyMap)
|
||||
userPolData, err := json.Marshal(mappedPoliciesToMap(userPolicyMap))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
|
@ -1985,22 +2056,6 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
case stsGroupPolicyMappingsFile:
|
||||
groupPolicyMap := make(map[string]MappedPolicy)
|
||||
err := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
grpPolData, err := json.Marshal(groupPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
if err = rawDataFn(bytes.NewReader(grpPolData), iamFile, len(grpPolData)); err != nil {
|
||||
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2170,12 +2225,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
// If group does not exist, then check if the group has beginning and end space characters
|
||||
// we will reject such group names.
|
||||
if errors.Is(gerr, errNoSuchGroup) && hasSpaceBE(group) {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allGroupsFile, group), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allGroupsFile, group), r.URL)
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, gerr, allGroupsFile, group), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -2202,6 +2257,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for user, svcAcctReq := range serviceAcctReqs {
|
||||
var sp *policy.Policy
|
||||
var err error
|
||||
|
@ -2212,7 +2277,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
}
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
// service account access key cannot have space characters
|
||||
// beginning and end of the string.
|
||||
if hasSpaceBE(svcAcctReq.AccessKey) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
|
||||
return
|
||||
|
@ -2238,20 +2304,14 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
updateReq = false
|
||||
}
|
||||
if updateReq {
|
||||
opts := updateServiceAccountOpts{
|
||||
secretKey: svcAcctReq.SecretKey,
|
||||
status: svcAcctReq.Status,
|
||||
name: svcAcctReq.Name,
|
||||
description: svcAcctReq.Description,
|
||||
expiration: svcAcctReq.Expiration,
|
||||
sessionPolicy: sp,
|
||||
}
|
||||
_, err = globalIAMSys.UpdateServiceAccount(ctx, svcAcctReq.AccessKey, opts)
|
||||
// If the service account exists, we remove it to ensure a
|
||||
// clean import.
|
||||
err := globalIAMSys.DeleteServiceAccount(ctx, svcAcctReq.AccessKey, true)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
delErr := fmt.Errorf("failed to delete existing service account(%s) before importing it: %w", svcAcctReq.AccessKey, err)
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, delErr, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
continue
|
||||
}
|
||||
opts := newServiceAccountOpts{
|
||||
accessKey: user,
|
||||
|
@ -2264,18 +2324,6 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
allowSiteReplicatorAccount: false,
|
||||
}
|
||||
|
||||
// In case of LDAP we need to resolve the targetUser to a DN and
|
||||
// query their groups:
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
opts.claims[ldapUserN] = svcAcctReq.AccessKey // simple username
|
||||
targetUser, _, err := globalIAMSys.LDAPConfig.LookupUserDN(svcAcctReq.AccessKey)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
}
|
||||
opts.claims[ldapUser] = targetUser // username DN
|
||||
}
|
||||
|
||||
if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
|
||||
return
|
||||
|
@ -2344,6 +2392,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for g, pm := range grpPolicyMap {
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL)
|
||||
|
@ -2373,6 +2432,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Validations for LDAP enabled deployments.
|
||||
if globalIAMSys.LDAPConfig.Enabled() {
|
||||
isGroup := true
|
||||
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
for u, pm := range userPolicyMap {
|
||||
// disallow setting policy mapping if user is a temporary user
|
||||
ok, _, err := globalIAMSys.IsTempUser(u)
|
||||
|
@ -2384,6 +2453,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
writeErrorResponseJSON(ctx, w, importError(ctx, errIAMActionNotAllowed, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL)
|
||||
return
|
||||
|
@ -2391,42 +2461,18 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// import sts group policy mappings
|
||||
{
|
||||
f, err := zr.Open(pathJoin(iamAssetsDir, stsGroupPolicyMappingsFile))
|
||||
switch {
|
||||
case errors.Is(err, os.ErrNotExist):
|
||||
case err != nil:
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
default:
|
||||
defer f.Close()
|
||||
var grpPolicyMap map[string]MappedPolicy
|
||||
data, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
if err = json.Unmarshal(data, &grpPolicyMap); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsGroupPolicyMappingsFile, ""), r.URL)
|
||||
return
|
||||
}
|
||||
for g, pm := range grpPolicyMap {
|
||||
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsGroupPolicyMappingsFile, g), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) {
|
||||
if exp == nil {
|
||||
return
|
||||
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error {
|
||||
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
|
||||
return nil
|
||||
}
|
||||
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(exp.Sub(time.Now()).Seconds()), 10)}
|
||||
dur := exp.Sub(time.Now())
|
||||
if dur <= 0 {
|
||||
return errors.New("unsupported expiration time")
|
||||
}
|
||||
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(dur.Seconds()), 10)}
|
||||
return nil
|
||||
}
|
||||
|
||||
func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
|
||||
|
@ -2454,6 +2500,12 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
|||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err)
|
||||
}
|
||||
|
||||
if createReq.Expiration != nil && !createReq.Expiration.IsZero() {
|
||||
// truncate expiration at the second.
|
||||
truncateTime := createReq.Expiration.Truncate(time.Second)
|
||||
createReq.Expiration = &truncateTime
|
||||
}
|
||||
|
||||
// service account access key cannot have space characters beginning and end of the string.
|
||||
if hasSpaceBE(createReq.AccessKey) {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument)
|
||||
|
@ -2485,7 +2537,10 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
|
|||
}
|
||||
|
||||
condValues := getConditionValues(r, "", cred)
|
||||
addExpirationToCondValues(createReq.Expiration, condValues)
|
||||
err = addExpirationToCondValues(createReq.Expiration, condValues)
|
||||
if err != nil {
|
||||
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
|
||||
}
|
||||
|
||||
// Check if action is allowed if creating access key for another user
|
||||
// Check if action is explicitly denied if for self
|
||||
|
|
|
@ -39,7 +39,7 @@ import (
|
|||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio-go/v7/pkg/signer"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -36,6 +36,7 @@ import (
|
|||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime"
|
||||
"sort"
|
||||
|
@ -49,6 +50,7 @@ import (
|
|||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/madmin-go/v3/estream"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/minio/internal/dsync"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
"github.com/minio/minio/internal/handlers"
|
||||
|
@ -57,9 +59,9 @@ import (
|
|||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/secure-io/sio-go"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
@ -130,7 +132,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
|
|||
// Download Binary Once
|
||||
binC, bin, err := downloadBinary(u, mode)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -354,7 +356,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
|||
// Download Binary Once
|
||||
binC, bin, err := downloadBinary(u, mode)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -368,7 +370,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
|||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -376,7 +378,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -389,7 +391,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
|||
StatusCode: http.StatusInternalServerError,
|
||||
}
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -397,7 +399,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
err = commitBinary()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -420,7 +422,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
|
|||
for _, nerr := range globalNotificationSys.SignalService(serviceRestart) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
adminLogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -451,7 +453,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
|||
case madmin.ServiceActionUnfreeze:
|
||||
serviceSig = serviceUnFreeze
|
||||
default:
|
||||
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
|
||||
adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -473,7 +475,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
|
|||
for _, nerr := range globalNotificationSys.SignalService(serviceSig) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
adminLogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -534,7 +536,7 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques
|
|||
case madmin.ServiceActionUnfreeze:
|
||||
serviceSig = serviceUnFreeze
|
||||
default:
|
||||
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
|
||||
adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -1239,7 +1241,7 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
|
|||
if hip.clientToken == "" {
|
||||
jerr := json.NewDecoder(r).Decode(&hip.hs)
|
||||
if jerr != nil {
|
||||
logger.LogIf(GlobalContext, jerr, logger.ErrorKind)
|
||||
adminLogIf(GlobalContext, jerr, logger.ErrorKind)
|
||||
err = ErrRequestBodyParse
|
||||
return
|
||||
}
|
||||
|
@ -1429,11 +1431,11 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi
|
|||
|
||||
if globalIsDistErasure {
|
||||
// Get heal status from other peers
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
|
||||
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus(ctx)
|
||||
var errCount int
|
||||
for _, nerr := range nerrs {
|
||||
if nerr.Err != nil {
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
adminLogIf(ctx, nerr.Err)
|
||||
errCount++
|
||||
}
|
||||
}
|
||||
|
@ -1561,7 +1563,7 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
|
|||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
// would mean the network is not stable. Logging here will help in debugging network issues.
|
||||
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
totalRx += n
|
||||
|
@ -1575,7 +1577,8 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
// NetperfHandler - perform mesh style network throughput test
|
||||
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
|
@ -1596,6 +1599,15 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
|
|||
ctx = lkctx.Context()
|
||||
defer nsLock.Unlock(lkctx)
|
||||
|
||||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// Unfreeze as soon as request context is canceled or when the function returns.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
}()
|
||||
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
duration, err := time.ParseDuration(durationStr)
|
||||
if err != nil {
|
||||
|
@ -1616,18 +1628,73 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
}
|
||||
|
||||
func isAllowedRWAccess(r *http.Request, cred auth.Credentials, bucketName string) (rd, wr bool) {
|
||||
owner := cred.AccessKey == globalActiveCred.AccessKey
|
||||
|
||||
// Set prefix value for "s3:prefix" policy conditionals.
|
||||
r.Header.Set("prefix", "")
|
||||
|
||||
// Set delimiter value for "s3:delimiter" policy conditionals.
|
||||
r.Header.Set("delimiter", SlashSeparator)
|
||||
|
||||
isAllowedAccess := func(bucketName string) (rd, wr bool) {
|
||||
if globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.GetObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
rd = true
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.PutObjectAction,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
ObjectName: "",
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
wr = true
|
||||
}
|
||||
|
||||
return rd, wr
|
||||
}
|
||||
return isAllowedAccess(bucketName)
|
||||
}
|
||||
|
||||
// ObjectSpeedTestHandler - reports maximum speed of a cluster by performing PUT and
|
||||
// GET operations on the server, supports auto tuning by default by automatically
|
||||
// increasing concurrency and stopping when we have reached the limits on the
|
||||
// system.
|
||||
func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
objectAPI, creds := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !globalAPIConfig.permitRootAccess() {
|
||||
rd, wr := isAllowedRWAccess(r, creds, globalObjectPerfBucket)
|
||||
if !rd || !wr {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
|
||||
Code: "XMinioSpeedtestInsufficientPermissions",
|
||||
Message: fmt.Sprintf("%s does not have read and write access to '%s' bucket", creds.AccessKey,
|
||||
globalObjectPerfBucket),
|
||||
StatusCode: http.StatusForbidden,
|
||||
}), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
sizeStr := r.Form.Get(peerRESTSize)
|
||||
durationStr := r.Form.Get(peerRESTDuration)
|
||||
concurrentStr := r.Form.Get(peerRESTConcurrent)
|
||||
|
@ -1636,6 +1703,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
|||
autotune := r.Form.Get("autotune") == "true"
|
||||
noClear := r.Form.Get("noclear") == "true"
|
||||
enableSha256 := r.Form.Get("enableSha256") == "true"
|
||||
enableMultipart := r.Form.Get("enableMultipart") == "true"
|
||||
|
||||
size, err := strconv.Atoi(sizeStr)
|
||||
if err != nil {
|
||||
|
@ -1691,8 +1759,11 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
|||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
// Unfreeze as soon as request context is canceled or when the function returns.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
}()
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
@ -1706,6 +1777,8 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
|
|||
storageClass: storageClass,
|
||||
bucketName: customBucket,
|
||||
enableSha256: enableSha256,
|
||||
enableMultipart: enableMultipart,
|
||||
creds: creds,
|
||||
})
|
||||
var prevResult madmin.SpeedTestResult
|
||||
for {
|
||||
|
@ -1792,7 +1865,8 @@ func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo,
|
|||
|
||||
// DriveSpeedtestHandler - reports throughput of drives available in the cluster
|
||||
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
ctx, cancel := context.WithCancel(r.Context())
|
||||
defer cancel()
|
||||
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
|
@ -1802,8 +1876,11 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
|
|||
// Freeze all incoming S3 API calls before running speedtest.
|
||||
globalNotificationSys.ServiceFreeze(ctx, true)
|
||||
|
||||
// unfreeze all incoming S3 API calls after speedtest.
|
||||
defer globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
// Unfreeze as soon as request context is canceled or when the function returns.
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
globalNotificationSys.ServiceFreeze(ctx, false)
|
||||
}()
|
||||
|
||||
serial := r.Form.Get("serial") == "true"
|
||||
blockSizeStr := r.Form.Get("blocksize")
|
||||
|
@ -2096,7 +2173,9 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
if err := GlobalKMS.CreateKey(ctx, r.Form.Get("key-id")); err != nil {
|
||||
if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{
|
||||
Name: r.Form.Get("key-id"),
|
||||
}); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -2117,22 +2196,12 @@ func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(ctx)
|
||||
stat, err := GlobalKMS.Status(ctx)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
status := madmin.KMSStatus{
|
||||
Name: stat.Name,
|
||||
DefaultKeyID: stat.DefaultKey,
|
||||
Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)),
|
||||
}
|
||||
for _, endpoint := range stat.Endpoints {
|
||||
status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS
|
||||
}
|
||||
|
||||
resp, err := json.Marshal(status)
|
||||
resp, err := json.Marshal(stat)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
|
@ -2154,15 +2223,9 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
|||
return
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(ctx)
|
||||
if err != nil {
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
keyID := r.Form.Get("key-id")
|
||||
if keyID == "" {
|
||||
keyID = stat.DefaultKey
|
||||
keyID = GlobalKMS.DefaultKey
|
||||
}
|
||||
response := madmin.KMSKeyStatus{
|
||||
KeyID: keyID,
|
||||
|
@ -2170,7 +2233,10 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: keyID,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
response.EncryptionErr = err.Error()
|
||||
resp, err := json.Marshal(response)
|
||||
|
@ -2183,7 +2249,11 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
|
||||
decryptedKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
|
||||
Name: key.KeyID,
|
||||
Ciphertext: key.Ciphertext,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
response.DecryptionErr = err.Error()
|
||||
resp, err := json.Marshal(response)
|
||||
|
@ -2277,7 +2347,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
|||
notifyTarget := fetchLambdaInfo()
|
||||
|
||||
local := getLocalServerProperty(globalEndpoints, r, metrics)
|
||||
servers := globalNotificationSys.ServerInfo(metrics)
|
||||
servers := globalNotificationSys.ServerInfo(ctx, metrics)
|
||||
servers = append(servers, local)
|
||||
|
||||
var poolsInfo map[int]map[int]madmin.ErasureSetInfo
|
||||
|
@ -2336,8 +2406,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
|||
|
||||
domain := globalDomainNames
|
||||
services := madmin.Services{
|
||||
KMS: fetchKMSStatus(),
|
||||
KMSStatus: fetchKMSStatusV2(ctx),
|
||||
KMSStatus: fetchKMSStatus(ctx),
|
||||
LDAP: ldap,
|
||||
Logger: log,
|
||||
Audit: audit,
|
||||
|
@ -2347,7 +2416,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
|
|||
return madmin.InfoMessage{
|
||||
Mode: string(mode),
|
||||
Domain: domain,
|
||||
Region: globalSite.Region,
|
||||
Region: globalSite.Region(),
|
||||
SQSARN: globalEventNotifier.GetARNList(false),
|
||||
DeploymentID: globalDeploymentID(),
|
||||
Buckets: buckets,
|
||||
|
@ -2375,7 +2444,7 @@ func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo {
|
|||
}
|
||||
|
||||
client := &http.Client{
|
||||
Transport: globalHealthChkTransport,
|
||||
Transport: globalRemoteTargetTransport,
|
||||
Timeout: 10 * time.Second,
|
||||
}
|
||||
|
||||
|
@ -2800,7 +2869,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID))
|
||||
encodedErrorResponse := encodeResponse(errorResponse)
|
||||
healthInfo.Error = string(encodedErrorResponse)
|
||||
logger.LogIf(ctx, enc.Encode(healthInfo))
|
||||
adminLogIf(ctx, enc.Encode(healthInfo))
|
||||
}
|
||||
|
||||
deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics.
|
||||
|
@ -2947,66 +3016,25 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
|
|||
return notify
|
||||
}
|
||||
|
||||
// fetchKMSStatus fetches KMS-related status information.
|
||||
func fetchKMSStatus() madmin.KMS {
|
||||
kmsStat := madmin.KMS{}
|
||||
if GlobalKMS == nil {
|
||||
kmsStat.Status = "disabled"
|
||||
return kmsStat
|
||||
}
|
||||
|
||||
stat, err := GlobalKMS.Stat(context.Background())
|
||||
if err != nil {
|
||||
kmsStat.Status = string(madmin.ItemOffline)
|
||||
return kmsStat
|
||||
}
|
||||
if len(stat.Endpoints) == 0 {
|
||||
kmsStat.Status = stat.Name
|
||||
return kmsStat
|
||||
}
|
||||
kmsStat.Status = string(madmin.ItemOnline)
|
||||
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
// 1. Generate a new key using the KMS.
|
||||
key, err := GlobalKMS.GenerateKey(context.Background(), "", kmsContext)
|
||||
if err != nil {
|
||||
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
|
||||
} else {
|
||||
kmsStat.Encrypt = "success"
|
||||
}
|
||||
|
||||
// 2. Verify that we can indeed decrypt the (encrypted) key
|
||||
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
|
||||
switch {
|
||||
case err != nil:
|
||||
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
|
||||
case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1:
|
||||
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
|
||||
default:
|
||||
kmsStat.Decrypt = "success"
|
||||
}
|
||||
return kmsStat
|
||||
}
|
||||
|
||||
// fetchKMSStatusV2 fetches KMS-related status information for all instances
|
||||
func fetchKMSStatusV2(ctx context.Context) []madmin.KMS {
|
||||
// fetchKMSStatus fetches KMS-related status information for all instances
|
||||
func fetchKMSStatus(ctx context.Context) []madmin.KMS {
|
||||
if GlobalKMS == nil {
|
||||
return []madmin.KMS{}
|
||||
}
|
||||
|
||||
results := GlobalKMS.Verify(ctx)
|
||||
|
||||
stats := []madmin.KMS{}
|
||||
for _, result := range results {
|
||||
stats = append(stats, madmin.KMS{
|
||||
Status: result.Status,
|
||||
Endpoint: result.Endpoint,
|
||||
Encrypt: result.Encrypt,
|
||||
Decrypt: result.Decrypt,
|
||||
Version: result.Version,
|
||||
})
|
||||
stat, err := GlobalKMS.Status(ctx)
|
||||
if err != nil {
|
||||
kmsLogIf(ctx, err, "failed to fetch KMS status information")
|
||||
return []madmin.KMS{}
|
||||
}
|
||||
|
||||
stats := make([]madmin.KMS, 0, len(stat.Endpoints))
|
||||
for endpoint, state := range stat.Endpoints {
|
||||
stats = append(stats, madmin.KMS{
|
||||
Status: string(state),
|
||||
Endpoint: endpoint,
|
||||
})
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
||||
|
@ -3113,7 +3141,7 @@ func getClusterMetaInfo(ctx context.Context) []byte {
|
|||
case ci := <-resultCh:
|
||||
out, err := json.MarshalIndent(ci, "", " ")
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
bugLogIf(ctx, err)
|
||||
return nil
|
||||
}
|
||||
return out
|
||||
|
@ -3172,11 +3200,11 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
file = strings.ReplaceAll(file, string(os.PathSeparator), "/")
|
||||
|
||||
file = filepath.ToSlash(file)
|
||||
// Reject attempts to traverse parent or absolute paths.
|
||||
if strings.Contains(file, "..") || strings.Contains(volume, "..") {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
if hasBadPathComponent(volume) || hasBadPathComponent(file) {
|
||||
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -3195,6 +3223,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
return
|
||||
}
|
||||
}
|
||||
addErr := func(msg string) {}
|
||||
|
||||
// Write a version for making *incompatible* changes.
|
||||
// The AdminClient will reject any version it does not know.
|
||||
|
@ -3206,18 +3235,18 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
clusterKey, err := bytesToPublicKey(getSubnetAdminPublicKey())
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, stream.AddError(err.Error()))
|
||||
bugLogIf(ctx, stream.AddError(err.Error()))
|
||||
return
|
||||
}
|
||||
err = stream.AddKeyEncrypted(clusterKey)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, stream.AddError(err.Error()))
|
||||
bugLogIf(ctx, stream.AddError(err.Error()))
|
||||
return
|
||||
}
|
||||
if b := getClusterMetaInfo(ctx); len(b) > 0 {
|
||||
w, err := stream.AddEncryptedStream("cluster.info", nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
bugLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
w.Write(b)
|
||||
|
@ -3226,14 +3255,19 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
// Add new key for inspect data.
|
||||
if err := stream.AddKeyEncrypted(publicKey); err != nil {
|
||||
logger.LogIf(ctx, stream.AddError(err.Error()))
|
||||
bugLogIf(ctx, stream.AddError(err.Error()))
|
||||
return
|
||||
}
|
||||
encStream, err := stream.AddEncryptedStream("inspect.zip", nil)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, stream.AddError(err.Error()))
|
||||
bugLogIf(ctx, stream.AddError(err.Error()))
|
||||
return
|
||||
}
|
||||
addErr = func(msg string) {
|
||||
inspectZipW.Close()
|
||||
encStream.Close()
|
||||
stream.AddError(msg)
|
||||
}
|
||||
defer encStream.Close()
|
||||
|
||||
inspectZipW = zip.NewWriter(encStream)
|
||||
|
@ -3244,7 +3278,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
// MUST use crypto/rand
|
||||
n, err := crand.Read(key[:])
|
||||
if err != nil || n != len(key) {
|
||||
logger.LogIf(ctx, err)
|
||||
bugLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -3258,7 +3292,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
stream, err := sio.AES_256_GCM.Stream(key[:])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
bugLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
// Zero nonce, we only use each key once, and 32 bytes is plenty.
|
||||
|
@ -3272,7 +3306,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
defer inspectZipW.Close()
|
||||
|
||||
if b := getClusterMetaInfo(ctx); len(b) > 0 {
|
||||
logger.LogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600))
|
||||
adminLogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -3300,32 +3334,20 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
sys: nil,
|
||||
})
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
bugLogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
header.Method = zip.Deflate
|
||||
zwriter, zerr := inspectZipW.CreateHeader(header)
|
||||
if zerr != nil {
|
||||
logger.LogIf(ctx, zerr)
|
||||
bugLogIf(ctx, zerr)
|
||||
return nil
|
||||
}
|
||||
if _, err := io.Copy(zwriter, r); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err := o.GetRawData(ctx, volume, file, rawDataFn)
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if !(volume == minioMetaBucket && file == formatConfigFile) {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save args passed to inspect command
|
||||
var sb bytes.Buffer
|
||||
|
@ -3336,7 +3358,25 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
|
|||
sb.WriteString(pool.CmdLine)
|
||||
}
|
||||
sb.WriteString("\n")
|
||||
logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
|
||||
adminLogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
|
||||
|
||||
err := o.GetRawData(ctx, volume, file, rawDataFn)
|
||||
if err != nil {
|
||||
if errors.Is(err, errFileNotFound) {
|
||||
addErr("GetRawData: No files matched the given pattern")
|
||||
return
|
||||
}
|
||||
embedFileInZip(inspectZipW, "GetRawData-err.txt", []byte(err.Error()), 0o600)
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
|
||||
// save the format.json as part of inspect by default
|
||||
if !(volume == minioMetaBucket && file == formatConfigFile) {
|
||||
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
|
||||
}
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
adminLogIf(ctx, err)
|
||||
}
|
||||
|
||||
scheme := "https"
|
||||
if !globalIsTLS {
|
||||
|
@ -3370,7 +3410,7 @@ function main() {
|
|||
}
|
||||
|
||||
main "$@"`, scheme)
|
||||
logger.LogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755))
|
||||
adminLogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755))
|
||||
}
|
||||
|
||||
func getSubnetAdminPublicKey() []byte {
|
||||
|
|
|
@ -63,8 +63,8 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
|
||||
errHealStopSignalled = fmt.Errorf("heal stop signaled")
|
||||
errHealIdleTimeout = errors.New("healing results were not consumed for too long")
|
||||
errHealStopSignalled = errors.New("heal stop signaled")
|
||||
|
||||
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
|
||||
apiErr := toAdminAPIErr(ctx, err)
|
||||
|
@ -329,21 +329,25 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
|
|||
// Add heal state and start sequence
|
||||
ahs.healSeqMap[hpath] = h
|
||||
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart(objAPI)
|
||||
|
||||
clientToken := h.clientToken
|
||||
if globalIsDistErasure {
|
||||
clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
|
||||
}
|
||||
|
||||
if h.clientToken == bgHealingUUID {
|
||||
// For background heal do nothing, do not spawn an unnecessary goroutine.
|
||||
} else {
|
||||
// Launch top-level background heal go-routine
|
||||
go h.healSequenceStart(objAPI)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(madmin.HealStartSuccess{
|
||||
ClientToken: clientToken,
|
||||
ClientAddress: h.clientAddress,
|
||||
StartTime: h.startTime,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(h.ctx, err)
|
||||
bugLogIf(h.ctx, err)
|
||||
return nil, toAdminAPIErr(h.ctx, err), ""
|
||||
}
|
||||
return b, noError, ""
|
||||
|
@ -390,7 +394,7 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string,
|
|||
if err != nil {
|
||||
h.currentStatus.Items = nil
|
||||
|
||||
logger.LogIf(h.ctx, err)
|
||||
bugLogIf(h.ctx, err)
|
||||
return nil, ErrInternalError
|
||||
}
|
||||
|
||||
|
@ -451,8 +455,8 @@ type healSequence struct {
|
|||
// Number of total items healed against item type
|
||||
healedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// Number of total items where healing failed against endpoint and drive state
|
||||
healFailedItemsMap map[string]int64
|
||||
// Number of total items where healing failed against item type
|
||||
healFailedItemsMap map[madmin.HealItemType]int64
|
||||
|
||||
// The time of the last scan/heal activity
|
||||
lastHealActivity time.Time
|
||||
|
@ -493,7 +497,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
|
|||
ctx: ctx,
|
||||
scannedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
healFailedItemsMap: make(map[string]int64),
|
||||
healFailedItemsMap: make(map[madmin.HealItemType]int64),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -537,14 +541,14 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
|
|||
return retMap
|
||||
}
|
||||
|
||||
// gethealFailedItemsMap - returns map of all items where heal failed against
|
||||
// getHealFailedItemsMap - returns map of all items where heal failed against
|
||||
// drive endpoint and status
|
||||
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
||||
func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
|
||||
h.mutex.RLock()
|
||||
defer h.mutex.RUnlock()
|
||||
|
||||
// Make a copy before returning the value
|
||||
retMap := make(map[string]int64, len(h.healFailedItemsMap))
|
||||
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
|
||||
for k, v := range h.healFailedItemsMap {
|
||||
retMap[k] = v
|
||||
}
|
||||
|
@ -552,6 +556,30 @@ func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
|
|||
return retMap
|
||||
}
|
||||
|
||||
func (h *healSequence) countFailed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.healFailedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countScanned(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
func (h *healSequence) countHealed(healType madmin.HealItemType) {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
h.healedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
}
|
||||
|
||||
// isQuitting - determines if the heal sequence is quitting (due to an
|
||||
// external signal)
|
||||
func (h *healSequence) isQuitting() bool {
|
||||
|
@ -704,10 +732,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
|||
task.opts.ScanMode = madmin.HealNormalScan
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
h.scannedItemsMap[healType]++
|
||||
h.lastHealActivity = UTCNow()
|
||||
h.mutex.Unlock()
|
||||
h.countScanned(healType)
|
||||
|
||||
if source.noWait {
|
||||
select {
|
||||
|
@ -739,37 +764,21 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
|
|||
// task queued, now wait for the response.
|
||||
select {
|
||||
case res := <-task.respCh:
|
||||
if res.err == nil {
|
||||
h.countHealed(healType)
|
||||
} else {
|
||||
h.countFailed(healType)
|
||||
}
|
||||
if !h.reportProgress {
|
||||
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
|
||||
return nil
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
// Progress is not reported in case of background heal processing.
|
||||
// Instead we increment relevant counter based on the heal result
|
||||
// for prometheus reporting.
|
||||
if res.err != nil {
|
||||
for _, d := range res.result.After.Drives {
|
||||
// For failed items we report the endpoint and drive state
|
||||
// This will help users take corrective actions for drives
|
||||
h.healFailedItemsMap[d.Endpoint+","+d.State]++
|
||||
}
|
||||
} else {
|
||||
// Only object type reported for successful healing
|
||||
h.healedItemsMap[res.result.Type]++
|
||||
}
|
||||
|
||||
// Report caller of any failure
|
||||
return res.err
|
||||
}
|
||||
res.result.Type = healType
|
||||
if res.err != nil {
|
||||
// Only report object error
|
||||
if healType != madmin.HealItemObject {
|
||||
return res.err
|
||||
}
|
||||
res.result.Detail = res.err.Error()
|
||||
}
|
||||
return h.pushHealResultItem(res.result)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
|
@ -18,7 +18,6 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net/http"
|
||||
"os"
|
||||
|
@ -31,7 +30,7 @@ import (
|
|||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
||||
// getLocalServerProperty - returns madmin.ServerProperties for only the
|
||||
|
@ -65,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
|
|||
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
|
||||
network[nodeName] = string(madmin.ItemOnline)
|
||||
} else {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
// log once the error
|
||||
logger.LogOnceIf(context.Background(), err, nodeName)
|
||||
if xnet.IsNetworkOrHostDown(err, false) {
|
||||
network[nodeName] = string(madmin.ItemOffline)
|
||||
} else if xnet.IsNetworkOrHostDown(err, true) {
|
||||
network[nodeName] = "connection attempt timedout"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ type ObjectToDelete struct {
|
|||
ReplicateDecisionStr string `xml:"-"`
|
||||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
// createBucketLocationConfiguration container for bucket configuration request from client.
|
||||
// Used for parsing the location from the request body for Makebucket.
|
||||
type createBucketLocationConfiguration struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
|
||||
|
|
|
@ -48,7 +48,7 @@ import (
|
|||
levent "github.com/minio/minio/internal/config/lambda/event"
|
||||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// APIError structure
|
||||
|
@ -56,19 +56,23 @@ type APIError struct {
|
|||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
ObjectSize string
|
||||
RangeRequested string
|
||||
}
|
||||
|
||||
// APIErrorResponse - error response format
|
||||
type APIErrorResponse struct {
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
XMLName xml.Name `xml:"Error" json:"-"`
|
||||
Code string
|
||||
Message string
|
||||
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
|
||||
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
|
||||
Resource string
|
||||
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
|
||||
RequestID string `xml:"RequestId" json:"RequestId"`
|
||||
HostID string `xml:"HostId" json:"HostId"`
|
||||
ActualObjectSize string `xml:"ActualObjectSize,omitempty" json:"ActualObjectSize,omitempty"`
|
||||
RangeRequested string `xml:"RangeRequested,omitempty" json:"RangeRequested,omitempty"`
|
||||
}
|
||||
|
||||
// APIErrorCode type of error status.
|
||||
|
@ -263,6 +267,7 @@ const (
|
|||
ErrInvalidResourceName
|
||||
ErrInvalidLifecycleQueryParameter
|
||||
ErrServerNotInitialized
|
||||
ErrBucketMetadataNotInitialized
|
||||
ErrRequestTimedout
|
||||
ErrClientDisconnected
|
||||
ErrTooManyRequests
|
||||
|
@ -278,9 +283,11 @@ const (
|
|||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchUserLDAPWarn
|
||||
ErrAdminLDAPExpectedLoginName
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminGroupDisabled
|
||||
ErrAdminInvalidGroupName
|
||||
ErrAdminNoSuchJob
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminPolicyChangeAlreadyApplied
|
||||
|
@ -300,6 +307,7 @@ const (
|
|||
ErrAdminConfigIDPCfgNameDoesNotExist
|
||||
ErrInsecureClientRequest
|
||||
ErrObjectTampered
|
||||
ErrAdminLDAPNotEnabled
|
||||
|
||||
// Site-Replication errors
|
||||
ErrSiteReplicationInvalidRequest
|
||||
|
@ -418,6 +426,7 @@ const (
|
|||
ErrAdminProfilerNotEnabled
|
||||
ErrInvalidDecompressedSize
|
||||
ErrAddUserInvalidArgument
|
||||
ErrAddUserValidUTF
|
||||
ErrAdminResourceInvalidArgument
|
||||
ErrAdminAccountNotEligible
|
||||
ErrAccountNotEligible
|
||||
|
@ -449,9 +458,9 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
|
|||
if err != nil {
|
||||
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
|
||||
}
|
||||
if globalSite.Region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
if errCode == ErrAuthorizationHeaderMalformed {
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", region)
|
||||
return apiErr
|
||||
}
|
||||
}
|
||||
|
@ -1293,7 +1302,12 @@ var errorCodes = errorCodeMap{
|
|||
},
|
||||
ErrServerNotInitialized: {
|
||||
Code: "XMinioServerNotInitialized",
|
||||
Description: "Server not initialized, please try again.",
|
||||
Description: "Server not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrBucketMetadataNotInitialized: {
|
||||
Code: "XMinioBucketMetadataNotInitialized",
|
||||
Description: "Bucket metadata not initialized yet, please try again.",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
},
|
||||
ErrMalformedJSON: {
|
||||
|
@ -1466,7 +1480,7 @@ var errorCodes = errorCodeMap{
|
|||
ErrTooManyRequests: {
|
||||
Code: "TooManyRequests",
|
||||
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
|
||||
HTTPStatusCode: http.StatusServiceUnavailable,
|
||||
HTTPStatusCode: http.StatusTooManyRequests,
|
||||
},
|
||||
ErrUnsupportedMetadata: {
|
||||
Code: "InvalidArgument",
|
||||
|
@ -2079,7 +2093,26 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Invalid attribute name specified.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
// Add your error structure here.
|
||||
ErrAdminLDAPNotEnabled: {
|
||||
Code: "XMinioLDAPNotEnabled",
|
||||
Description: "LDAP is not enabled. LDAP must be enabled to make LDAP requests.",
|
||||
HTTPStatusCode: http.StatusNotImplemented,
|
||||
},
|
||||
ErrAdminLDAPExpectedLoginName: {
|
||||
Code: "XMinioLDAPExpectedLoginName",
|
||||
Description: "Expected LDAP short username but was given full DN.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminInvalidGroupName: {
|
||||
Code: "XMinioInvalidGroupName",
|
||||
Description: "The group name is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAddUserValidUTF: {
|
||||
Code: "XMinioInvalidUTF",
|
||||
Description: "Invalid UTF-8 character detected.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
}
|
||||
|
||||
// toAPIErrorCode - Converts embedded errors. Convenience
|
||||
|
@ -2119,6 +2152,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errGroupNameContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidGroupName
|
||||
case errNoSuchJob:
|
||||
apiErr = ErrAdminNoSuchJob
|
||||
case errNoPolicyToAttachOrDetach:
|
||||
|
@ -2133,6 +2168,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrEntityTooSmall
|
||||
case errAuthentication:
|
||||
apiErr = ErrAccessDenied
|
||||
case auth.ErrContainsReservedChars:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidAccessKeyLength:
|
||||
apiErr = ErrAdminInvalidAccessKey
|
||||
case auth.ErrInvalidSecretKeyLength:
|
||||
|
@ -2200,6 +2237,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrInvalidMaxParts
|
||||
case ioutil.ErrOverread:
|
||||
apiErr = ErrExcessData
|
||||
case errServerNotInitialized:
|
||||
apiErr = ErrServerNotInitialized
|
||||
case errBucketMetadataNotInitialized:
|
||||
apiErr = ErrBucketMetadataNotInitialized
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
|
@ -2391,10 +2432,9 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
|
||||
switch apiErr.Code {
|
||||
case "NotImplemented":
|
||||
desc := fmt.Sprintf("%s (%v)", apiErr.Description, err)
|
||||
apiErr = APIError{
|
||||
Code: apiErr.Code,
|
||||
Description: desc,
|
||||
Description: fmt.Sprintf("%s (%v)", apiErr.Description, err),
|
||||
HTTPStatusCode: apiErr.HTTPStatusCode,
|
||||
}
|
||||
case "XMinioBackendDown":
|
||||
|
@ -2406,12 +2446,24 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||
switch e := err.(type) {
|
||||
case kms.Error:
|
||||
apiErr = APIError{
|
||||
Description: e.Err.Error(),
|
||||
Code: e.APICode,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
Description: e.Err,
|
||||
HTTPStatusCode: e.Code,
|
||||
}
|
||||
case batchReplicationJobError:
|
||||
apiErr = APIError(e)
|
||||
apiErr = APIError{
|
||||
Description: e.Description,
|
||||
Code: e.Code,
|
||||
HTTPStatusCode: e.HTTPStatusCode,
|
||||
}
|
||||
case InvalidRange:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidRange",
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: errorCodes[ErrInvalidRange].HTTPStatusCode,
|
||||
ObjectSize: strconv.FormatInt(e.ResourceSize, 10),
|
||||
RangeRequested: fmt.Sprintf("%d-%d", e.OffsetBegin, e.OffsetEnd),
|
||||
}
|
||||
case InvalidArgument:
|
||||
apiErr = APIError{
|
||||
Code: "InvalidArgument",
|
||||
|
@ -2519,7 +2571,7 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||
// Make sure to log the errors which we cannot translate
|
||||
// to a meaningful S3 API errors. This is added to aid in
|
||||
// debugging unexpected/unhandled errors.
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err)
|
||||
}
|
||||
|
||||
return apiErr
|
||||
|
@ -2538,13 +2590,15 @@ func getAPIError(code APIErrorCode) APIError {
|
|||
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
|
||||
reqInfo := logger.GetReqInfo(ctx)
|
||||
return APIErrorResponse{
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region,
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
Code: err.Code,
|
||||
Message: err.Description,
|
||||
BucketName: reqInfo.BucketName,
|
||||
Key: reqInfo.ObjectName,
|
||||
Resource: resource,
|
||||
Region: globalSite.Region(),
|
||||
RequestID: requestID,
|
||||
HostID: hostID,
|
||||
ActualObjectSize: err.ObjectSize,
|
||||
RangeRequested: err.RangeRequested,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
|
@ -30,7 +31,6 @@ import (
|
|||
"github.com/minio/minio-go/v7/pkg/tags"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
|
@ -54,7 +54,7 @@ func setCommonHeaders(w http.ResponseWriter) {
|
|||
|
||||
// Set `x-amz-bucket-region` only if region is set on the server
|
||||
// by default minio uses an empty region.
|
||||
if region := globalSite.Region; region != "" {
|
||||
if region := globalSite.Region(); region != "" {
|
||||
w.Header().Set(xhttp.AmzBucketRegion, region)
|
||||
}
|
||||
w.Header().Set(xhttp.AcceptRanges, "bytes")
|
||||
|
@ -68,7 +68,7 @@ func encodeResponse(response interface{}) []byte {
|
|||
var buf bytes.Buffer
|
||||
buf.WriteString(xml.Header)
|
||||
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
bugLogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
return buf.Bytes()
|
||||
|
@ -86,7 +86,7 @@ func encodeResponseList(response interface{}) []byte {
|
|||
var buf bytes.Buffer
|
||||
buf.WriteString(xxml.Header)
|
||||
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
bugLogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
return buf.Bytes()
|
||||
|
@ -108,7 +108,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
|
|||
}
|
||||
|
||||
// Write object header
|
||||
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
|
||||
// set common headers
|
||||
setCommonHeaders(w)
|
||||
|
||||
|
@ -136,7 +136,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
|||
// Set tag count if object has tags
|
||||
if len(objInfo.UserTags) > 0 {
|
||||
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
|
||||
if tags.Count() > 0 {
|
||||
if tags != nil && tags.Count() > 0 {
|
||||
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
|
||||
if opts.Tagging {
|
||||
// This is MinIO only extension to return back tags along with the count.
|
||||
|
@ -213,7 +213,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
|
|||
if objInfo.IsRemote() {
|
||||
// Check if object is being restored. For more information on x-amz-restore header see
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
|
||||
w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)}
|
||||
}
|
||||
|
||||
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
"github.com/minio/minio/internal/hash"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
xxml "github.com/minio/xxml"
|
||||
)
|
||||
|
||||
|
@ -544,7 +544,7 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
|
|||
}
|
||||
|
||||
// generates an ListBucketVersions response for the said bucket with other enumerated options.
|
||||
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
|
||||
versions := make([]ObjectVersion, 0, len(resp.Objects))
|
||||
|
||||
owner := &Owner{
|
||||
|
@ -573,7 +573,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
|||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
|
@ -634,7 +634,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
|
|||
}
|
||||
|
||||
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
func generateListObjectsV1Response(ctx context.Context, bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
|
||||
contents := make([]Object, 0, len(resp.Objects))
|
||||
owner := &Owner{
|
||||
ID: globalMinioDefaultOwnerID,
|
||||
|
@ -654,7 +654,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
|||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
|
@ -683,7 +683,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
|||
}
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
|
||||
contents := make([]Object, 0, len(objects))
|
||||
var owner *Owner
|
||||
if fetchOwner {
|
||||
|
@ -707,7 +707,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
|||
}
|
||||
content.Size = object.Size
|
||||
if object.StorageClass != "" {
|
||||
content.StorageClass = object.StorageClass
|
||||
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
|
||||
} else {
|
||||
content.StorageClass = globalMinioDefaultStorageClass
|
||||
}
|
||||
|
@ -891,7 +891,7 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
|
|||
}
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if statusCode < 100 || statusCode > 999 {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
|
||||
bugLogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
|
||||
statusCode = http.StatusInternalServerError
|
||||
}
|
||||
setCommonHeaders(w)
|
||||
|
@ -954,14 +954,14 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
|
|||
|
||||
switch err.Code {
|
||||
case "InvalidRegion":
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region())
|
||||
case "AuthorizationHeaderMalformed":
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
|
||||
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region())
|
||||
}
|
||||
|
||||
// Similar check to http.checkWriteHeaderCode
|
||||
if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
|
||||
bugLogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
|
||||
err.HTTPStatusCode = http.StatusInternalServerError
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
consoleapi "github.com/minio/console/api"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/rs/cors"
|
||||
)
|
||||
|
||||
|
@ -64,7 +64,7 @@ func setObjectLayer(o ObjectLayer) {
|
|||
globalObjLayerMutex.Unlock()
|
||||
}
|
||||
|
||||
// objectAPIHandler implements and provides http handlers for S3 API.
|
||||
// objectAPIHandlers implements and provides http handlers for S3 API.
|
||||
type objectAPIHandlers struct {
|
||||
ObjectAPI func() ObjectLayer
|
||||
}
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -41,7 +41,7 @@ import (
|
|||
xjwt "github.com/minio/minio/internal/jwt"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/mcontext"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Verify if request has JWT.
|
||||
|
@ -126,7 +126,7 @@ func getRequestAuthType(r *http.Request) (at authType) {
|
|||
var err error
|
||||
r.Form, err = url.ParseQuery(r.URL.RawQuery)
|
||||
if err != nil {
|
||||
logger.LogIf(r.Context(), err)
|
||||
authNLogIf(r.Context(), err)
|
||||
return authTypeUnknown
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +178,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
|
|||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
return cred, owner, ErrNone
|
||||
}
|
||||
|
@ -257,7 +257,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
|
|||
if err != nil {
|
||||
// Base64 decoding fails, we should log to indicate
|
||||
// something is malforming the request sent by client.
|
||||
logger.LogIf(GlobalContext, err, logger.ErrorKind)
|
||||
authNLogIf(GlobalContext, err, logger.ErrorKind)
|
||||
return nil, errAuthentication
|
||||
}
|
||||
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
|
||||
|
@ -353,7 +353,7 @@ func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action po
|
|||
|
||||
func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
if logger.GetReqInfo(ctx) == nil {
|
||||
logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
|
||||
bugLogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
|
||||
return ErrAccessDenied
|
||||
}
|
||||
|
||||
|
@ -368,7 +368,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
|||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypeSigned, authTypePresigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch action {
|
||||
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
|
||||
region = ""
|
||||
|
@ -384,7 +384,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
|||
|
||||
logger.GetReqInfo(ctx).Cred = cred
|
||||
logger.GetReqInfo(ctx).Owner = owner
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region
|
||||
logger.GetReqInfo(ctx).Region = globalSite.Region()
|
||||
|
||||
// region is valid only for CreateBucketAction.
|
||||
var region string
|
||||
|
@ -392,7 +392,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
|
|||
// To extract region from XML in request body, get copy of request body.
|
||||
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.ErrorKind)
|
||||
authZLogIf(ctx, err, logger.ErrorKind)
|
||||
return ErrMalformedXML
|
||||
}
|
||||
|
||||
|
@ -684,7 +684,7 @@ func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool,
|
|||
}
|
||||
cred, owner, s3Err = getReqAccessKeyV2(r)
|
||||
case authTypePresigned, authTypeSigned:
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
|
||||
return cred, owner, s3Err
|
||||
}
|
||||
|
@ -745,7 +745,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
|
|||
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
switch atype {
|
||||
case authTypeUnknown:
|
||||
return ErrSignatureVersionNotSupported
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
type nullReader struct{}
|
||||
|
@ -403,7 +403,7 @@ func TestIsReqAuthenticated(t *testing.T) {
|
|||
|
||||
// Validates all testcases.
|
||||
for i, testCase := range testCases {
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
|
||||
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3)
|
||||
if s3Error != testCase.s3Error {
|
||||
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
|
||||
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
|
||||
|
@ -443,7 +443,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
|||
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,8 +25,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// healTask represents what to heal along with options
|
||||
|
@ -101,16 +100,17 @@ func waitForLowHTTPReq() {
|
|||
}
|
||||
|
||||
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
|
||||
bgSeq := newBgHealSequence()
|
||||
// Run the background healer
|
||||
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI)
|
||||
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
|
||||
}
|
||||
|
||||
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
|
||||
globalBackgroundHealState.LaunchNewHealSequence(bgSeq, objAPI)
|
||||
}
|
||||
|
||||
// Wait for heal requests and process them
|
||||
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
|
||||
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *healSequence) {
|
||||
for {
|
||||
select {
|
||||
case task, ok := <-h.tasks:
|
||||
|
@ -135,8 +135,18 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
|
|||
|
||||
if task.respCh != nil {
|
||||
task.respCh <- healResult{result: res, err: err}
|
||||
continue
|
||||
}
|
||||
|
||||
// when respCh is not set caller is not waiting but we
|
||||
// update the relevant metrics for them
|
||||
if bgSeq != nil {
|
||||
if err == nil {
|
||||
bgSeq.countHealed(res.Type)
|
||||
} else {
|
||||
bgSeq.countFailed(res.Type)
|
||||
}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
@ -148,7 +158,7 @@ func newHealRoutine() *healRoutine {
|
|||
|
||||
if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" {
|
||||
if numHealers, err := strconv.Atoi(envHealWorkers); err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
|
||||
bugLogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
|
||||
} else {
|
||||
workers = numHealers
|
||||
}
|
||||
|
|
|
@ -33,8 +33,7 @@ import (
|
|||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -409,11 +408,11 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
|||
if errors.Is(err, errFileNotFound) {
|
||||
return nil
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
|
||||
healingLogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
|
||||
tracker = initHealingTracker(disk, mustGetUUID())
|
||||
}
|
||||
|
||||
logger.Event(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
|
||||
healingLogEvent(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
|
||||
|
||||
buckets, _ := z.ListBuckets(ctx, BucketOptions{})
|
||||
// Buckets data are dispersed in multiple pools/sets, make
|
||||
|
@ -452,11 +451,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
|||
return err
|
||||
}
|
||||
|
||||
logger.Event(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
|
||||
if len(tracker.QueuedBuckets) > 0 {
|
||||
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
|
||||
}
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
|
||||
if serverDebugLog {
|
||||
tracker.printTo(os.Stdout)
|
||||
|
@ -464,7 +459,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
|||
}
|
||||
|
||||
if tracker.HealID == "" { // HealID was empty only before Feb 2023
|
||||
logger.LogIf(ctx, tracker.delete(ctx))
|
||||
bugLogIf(ctx, tracker.delete(ctx))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -482,7 +477,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
|||
t, err := loadHealingTracker(ctx, disk)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errFileNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
healingLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -517,7 +512,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
|
|||
// Reformat disks immediately
|
||||
_, err := z.HealFormat(context.Background(), false)
|
||||
if err != nil && !errors.Is(err, errNoHealRequired) {
|
||||
logger.LogIf(ctx, err)
|
||||
healingLogIf(ctx, err)
|
||||
// Reset for next interval.
|
||||
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
|
||||
continue
|
||||
|
|
|
@ -33,10 +33,9 @@ import (
|
|||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
@ -156,7 +155,7 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
|
|||
}
|
||||
default:
|
||||
// we should never come here, Validate should have caught this.
|
||||
logger.LogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
|
||||
batchLogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -321,7 +320,7 @@ func (r BatchJobExpire) Notify(ctx context.Context, body io.Reader) error {
|
|||
req.Header.Set("Authorization", r.NotificationCfg.Token)
|
||||
}
|
||||
|
||||
clnt := http.Client{Transport: getRemoteInstanceTransport}
|
||||
clnt := http.Client{Transport: getRemoteInstanceTransport()}
|
||||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -428,11 +427,12 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
|||
}
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
|
||||
_, err := api.DeleteObject(ctx, exp.Bucket, encodeDirObject(exp.Name), ObjectOptions{
|
||||
DeletePrefix: true,
|
||||
DeletePrefix: true,
|
||||
DeletePrefixObject: true, // use prefix delete on exact object (this is an optimization to avoid fan-out calls)
|
||||
})
|
||||
if err != nil {
|
||||
stopFn(exp, err)
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
|
||||
} else {
|
||||
stopFn(exp, err)
|
||||
success = true
|
||||
|
@ -470,19 +470,19 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
|
|||
for i, err := range errs {
|
||||
if err != nil {
|
||||
stopFn(toDelCopy[i], err)
|
||||
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
|
||||
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
|
||||
failed++
|
||||
if attempts == retryAttempts { // all retry attempts failed, record failure
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false)
|
||||
}
|
||||
} else {
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts)
|
||||
}
|
||||
if attempts != retryAttempts {
|
||||
// retry
|
||||
toDel = append(toDel, toDelCopy[i])
|
||||
}
|
||||
} else {
|
||||
stopFn(toDelCopy[i], nil)
|
||||
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, true)
|
||||
ri.trackCurrentBucketObject(r.Bucket, *oi, true, attempts)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -537,7 +537,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
results := make(chan ObjectInfo, workerSize)
|
||||
results := make(chan itemOrErr[ObjectInfo], workerSize)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
|
||||
|
@ -556,16 +556,16 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||
select {
|
||||
case <-saveTicker.C:
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
case <-ctx.Done():
|
||||
// persist in-memory state immediately before exiting due to context cancellation.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
|
||||
case <-saverQuitCh:
|
||||
// persist in-memory state immediately to disk.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -584,11 +584,18 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||
versionsCount int
|
||||
toDel []expireObjInfo
|
||||
)
|
||||
failed := true
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Apply filter to find the matching rule to apply expiry
|
||||
// actions accordingly.
|
||||
// nolint:gocritic
|
||||
if result.IsLatest {
|
||||
if result.Item.IsLatest {
|
||||
// send down filtered entries to be deleted using
|
||||
// DeleteObjects method
|
||||
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
|
||||
|
@ -609,7 +616,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||
var match BatchJobExpireFilter
|
||||
var found bool
|
||||
for _, rule := range r.Rules {
|
||||
if rule.Matches(result, now) {
|
||||
if rule.Matches(result.Item, now) {
|
||||
match = rule
|
||||
found = true
|
||||
break
|
||||
|
@ -619,18 +626,18 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||
continue
|
||||
}
|
||||
|
||||
prevObj = result
|
||||
prevObj = result.Item
|
||||
matchedFilter = match
|
||||
versionsCount = 1
|
||||
// Include the latest version
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result,
|
||||
ObjectInfo: result.Item,
|
||||
ExpireAll: true,
|
||||
})
|
||||
continue
|
||||
}
|
||||
} else if prevObj.Name == result.Name {
|
||||
} else if prevObj.Name == result.Item.Name {
|
||||
if matchedFilter.Purge.RetainVersions == 0 {
|
||||
continue // including latest version in toDel suffices, skipping other versions
|
||||
}
|
||||
|
@ -643,7 +650,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||
continue // retain versions
|
||||
}
|
||||
toDel = append(toDel, expireObjInfo{
|
||||
ObjectInfo: result,
|
||||
ObjectInfo: result.Item,
|
||||
})
|
||||
}
|
||||
// Send any remaining objects downstream
|
||||
|
@ -658,8 +665,8 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||
<-expireDoneCh // waits for the expire goroutine to complete
|
||||
wk.Wait() // waits for all expire workers to retire
|
||||
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
ri.Complete = !failed && ri.ObjectsFailed == 0
|
||||
ri.Failed = failed || ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
|
||||
// Close the saverQuitCh - this also triggers saving in-memory state
|
||||
|
@ -669,7 +676,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
|
|||
// Notify expire jobs final status to the configured endpoint
|
||||
buf, _ := json.Marshal(ri)
|
||||
if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil {
|
||||
logger.LogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
|
||||
batchLogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -48,11 +48,10 @@ import (
|
|||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/console"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
@ -92,7 +91,7 @@ func notifyEndpoint(ctx context.Context, ri *batchJobInfo, endpoint, token strin
|
|||
}
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
clnt := http.Client{Transport: getRemoteInstanceTransport}
|
||||
clnt := http.Client{Transport: getRemoteInstanceTransport()}
|
||||
resp, err := clnt.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -206,7 +205,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
|
|||
if aerr == nil {
|
||||
return
|
||||
}
|
||||
logger.LogIf(ctx,
|
||||
batchLogIf(ctx,
|
||||
fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
|
||||
humanize.Ordinal(attempts), res.UploadID, tgtBucket, tgtObject, aerr))
|
||||
attempts++
|
||||
|
@ -351,7 +350,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
|||
c, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport,
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
BucketLookup: lookupStyle(r.Source.Path),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -402,7 +401,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
|||
} else {
|
||||
if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) &&
|
||||
!isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -414,7 +413,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
|||
} else {
|
||||
if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) &&
|
||||
!isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -443,15 +442,15 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
|||
return
|
||||
}
|
||||
stopFn(oi, err)
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
success = false
|
||||
} else {
|
||||
stopFn(oi, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success)
|
||||
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
|
@ -466,10 +465,10 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
|
|||
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
|
||||
if err := r.Notify(ctx, ri); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
@ -553,7 +552,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
|
|||
VersionID: entry.VersionID,
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -572,7 +571,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
|
|||
|
||||
opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -691,6 +690,7 @@ type batchJobInfo struct {
|
|||
StartTime time.Time `json:"startTime" msg:"st"`
|
||||
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
|
||||
RetryAttempts int `json:"retryAttempts" msg:"ra"`
|
||||
Attempts int `json:"attempts" msg:"at"`
|
||||
|
||||
Complete bool `json:"complete" msg:"cmp"`
|
||||
Failed bool `json:"failed" msg:"fld"`
|
||||
|
@ -834,13 +834,15 @@ func (ri *batchJobInfo) clone() *batchJobInfo {
|
|||
ObjectsFailed: ri.ObjectsFailed,
|
||||
BytesTransferred: ri.BytesTransferred,
|
||||
BytesFailed: ri.BytesFailed,
|
||||
Attempts: ri.Attempts,
|
||||
}
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
|
||||
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool, attempt int) {
|
||||
if ri == nil {
|
||||
return
|
||||
}
|
||||
ri.Attempts++
|
||||
if success {
|
||||
if dmarker {
|
||||
ri.DeleteMarkers++
|
||||
|
@ -848,7 +850,19 @@ func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
|
|||
ri.Objects++
|
||||
ri.BytesTransferred += size
|
||||
}
|
||||
if attempt > 1 {
|
||||
if dmarker {
|
||||
ri.DeleteMarkersFailed--
|
||||
} else {
|
||||
ri.ObjectsFailed--
|
||||
ri.BytesFailed += size
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if attempt > 1 {
|
||||
// Only count first attempt
|
||||
return
|
||||
}
|
||||
if dmarker {
|
||||
ri.DeleteMarkersFailed++
|
||||
} else {
|
||||
|
@ -922,7 +936,7 @@ func (ri *batchJobInfo) trackMultipleObjectVersions(bucket string, info ObjectIn
|
|||
}
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool) {
|
||||
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool, attempt int) {
|
||||
if ri == nil {
|
||||
return
|
||||
}
|
||||
|
@ -932,7 +946,7 @@ func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo,
|
|||
|
||||
ri.Bucket = bucket
|
||||
ri.Object = info.Name
|
||||
ri.countItem(info.Size, info.DeleteMarker, success)
|
||||
ri.countItem(info.Size, info.DeleteMarker, success, attempt)
|
||||
}
|
||||
|
||||
func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInfo) {
|
||||
|
@ -946,7 +960,7 @@ func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInf
|
|||
ri.Bucket = bucket
|
||||
for i := range batch {
|
||||
ri.Object = batch[i].Name
|
||||
ri.countItem(batch[i].Size, batch[i].DeleteMarker, true)
|
||||
ri.countItem(batch[i].Size, batch[i].DeleteMarker, true, 1)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1048,7 +1062,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport,
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
BucketLookup: lookupStyle(r.Target.Path),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1058,8 +1072,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
|
||||
|
||||
var (
|
||||
walkCh = make(chan ObjectInfo, 100)
|
||||
slowCh = make(chan ObjectInfo, 100)
|
||||
walkCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
slowCh = make(chan itemOrErr[ObjectInfo], 100)
|
||||
)
|
||||
|
||||
if !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
|
||||
|
@ -1068,11 +1082,11 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
cl, err := miniogo.New(u.Host, &miniogo.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport,
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
BucketLookup: lookupStyle(r.Target.Path),
|
||||
})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1083,25 +1097,25 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
writeFn := func(batch []ObjectInfo) {
|
||||
if len(batch) > 0 {
|
||||
if err := r.writeAsArchive(ctx, api, cl, batch); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
for _, b := range batch {
|
||||
slowCh <- b
|
||||
slowCh <- itemOrErr[ObjectInfo]{Item: b}
|
||||
}
|
||||
} else {
|
||||
ri.trackCurrentBucketBatch(r.Source.Bucket, batch)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
}
|
||||
}
|
||||
}
|
||||
for obj := range walkCh {
|
||||
if obj.DeleteMarker || !obj.VersionPurgeStatus.Empty() || obj.Size >= int64(smallerThan) {
|
||||
if obj.Item.DeleteMarker || !obj.Item.VersionPurgeStatus.Empty() || obj.Item.Size >= int64(smallerThan) {
|
||||
slowCh <- obj
|
||||
continue
|
||||
}
|
||||
|
||||
batch = append(batch, obj)
|
||||
batch = append(batch, obj.Item)
|
||||
|
||||
if len(batch) < *r.Source.Snowball.Batch {
|
||||
continue
|
||||
|
@ -1154,8 +1168,13 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
prevObj := ""
|
||||
|
||||
skipReplicate := false
|
||||
for result := range slowCh {
|
||||
result := result
|
||||
for res := range slowCh {
|
||||
if res.Err != nil {
|
||||
ri.Failed = true
|
||||
batchLogIf(ctx, res.Err)
|
||||
continue
|
||||
}
|
||||
result := res.Item
|
||||
if result.Name != prevObj {
|
||||
prevObj = result.Name
|
||||
skipReplicate = result.DeleteMarker && s3Type
|
||||
|
@ -1179,15 +1198,15 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
return
|
||||
}
|
||||
stopFn(result, err)
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
success = false
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Source.Bucket, result, success)
|
||||
ri.trackCurrentBucketObject(r.Source.Bucket, result, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
|
||||
if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
|
||||
time.Sleep(wait)
|
||||
|
@ -1202,10 +1221,10 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
|
||||
if err := r.Notify(ctx, ri); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
@ -1232,6 +1251,7 @@ type batchReplicationJobError struct {
|
|||
Code string
|
||||
Description string
|
||||
HTTPStatusCode int
|
||||
ObjectSize int64
|
||||
}
|
||||
|
||||
func (e batchReplicationJobError) Error() string {
|
||||
|
@ -1248,9 +1268,18 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
|||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if r.Source.Bucket == "" {
|
||||
if r.Source.Endpoint != "" && r.Target.Endpoint != "" {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if r.Source.Bucket == "" || r.Target.Bucket == "" {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
var isRemoteToLocal bool
|
||||
localBkt := r.Source.Bucket
|
||||
if r.Source.Endpoint != "" {
|
||||
|
@ -1275,9 +1304,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
|||
if err := r.Source.Snowball.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if !r.Source.Creds.Empty() {
|
||||
if err := r.Source.Creds.Validate(); err != nil {
|
||||
|
@ -1299,9 +1325,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
|||
if r.Target.Endpoint != "" && !r.Target.Type.isMinio() && !r.Target.ValidPath() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
if r.Target.Bucket == "" {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if !r.Target.Creds.Empty() {
|
||||
if err := r.Target.Creds.Validate(); err != nil {
|
||||
|
@ -1309,10 +1332,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
|||
}
|
||||
}
|
||||
|
||||
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
|
||||
return errInvalidArgument
|
||||
}
|
||||
|
||||
if err := r.Target.Type.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1354,7 +1373,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
|
|||
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
|
||||
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
|
||||
Secure: u.Scheme == "https",
|
||||
Transport: getRemoteInstanceTransport,
|
||||
Transport: getRemoteInstanceTransport(),
|
||||
BucketLookup: lookupStyle(pathStyle),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -1485,7 +1504,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
|||
jobType = string(madmin.BatchJobReplicate)
|
||||
}
|
||||
|
||||
resultCh := make(chan ObjectInfo)
|
||||
resultCh := make(chan itemOrErr[ObjectInfo])
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
@ -1497,10 +1516,14 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
listResult := madmin.ListBatchJobsResult{}
|
||||
for result := range resultCh {
|
||||
if result.Err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, result.Err), r.URL)
|
||||
return
|
||||
}
|
||||
req := &BatchJobRequest{}
|
||||
if err := req.load(ctx, objectAPI, result.Name); err != nil {
|
||||
if err := req.load(ctx, objectAPI, result.Item.Name); err != nil {
|
||||
if !errors.Is(err, errNoSuchJob) {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -1516,7 +1539,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, json.NewEncoder(w).Encode(&listResult))
|
||||
batchLogIf(ctx, json.NewEncoder(w).Encode(&listResult))
|
||||
}
|
||||
|
||||
var errNoSuchJob = errors.New("no such job")
|
||||
|
@ -1539,7 +1562,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
|
|||
req := &BatchJobRequest{}
|
||||
if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, jobID)); err != nil {
|
||||
if !errors.Is(err, errNoSuchJob) {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
}
|
||||
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -1548,7 +1571,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
|
|||
|
||||
buf, err := yaml.Marshal(req)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -1703,21 +1726,25 @@ func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobP
|
|||
}
|
||||
|
||||
func (j *BatchJobPool) resume() {
|
||||
results := make(chan ObjectInfo, 100)
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
ctx, cancel := context.WithCancel(j.ctx)
|
||||
defer cancel()
|
||||
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil {
|
||||
logger.LogIf(j.ctx, err)
|
||||
batchLogIf(j.ctx, err)
|
||||
return
|
||||
}
|
||||
for result := range results {
|
||||
if result.Err != nil {
|
||||
batchLogIf(j.ctx, result.Err)
|
||||
continue
|
||||
}
|
||||
// ignore batch-replicate.bin and batch-rotate.bin entries
|
||||
if strings.HasSuffix(result.Name, slashSeparator) {
|
||||
if strings.HasSuffix(result.Item.Name, slashSeparator) {
|
||||
continue
|
||||
}
|
||||
req := &BatchJobRequest{}
|
||||
if err := req.load(ctx, j.objLayer, result.Name); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
if err := req.load(ctx, j.objLayer, result.Item.Name); err != nil {
|
||||
batchLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
_, nodeIdx := parseRequestToken(req.ID)
|
||||
|
@ -1726,7 +1753,7 @@ func (j *BatchJobPool) resume() {
|
|||
continue
|
||||
}
|
||||
if err := j.queueJob(req); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -1750,7 +1777,7 @@ func (j *BatchJobPool) AddWorker() {
|
|||
if job.Replicate.RemoteToLocal() {
|
||||
if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil {
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(j.ctx, err)
|
||||
batchLogIf(j.ctx, err)
|
||||
j.canceler(job.ID, false)
|
||||
continue
|
||||
}
|
||||
|
@ -1759,7 +1786,7 @@ func (j *BatchJobPool) AddWorker() {
|
|||
} else {
|
||||
if err := job.Replicate.Start(job.ctx, j.objLayer, *job); err != nil {
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(j.ctx, err)
|
||||
batchLogIf(j.ctx, err)
|
||||
j.canceler(job.ID, false)
|
||||
continue
|
||||
}
|
||||
|
@ -1769,14 +1796,14 @@ func (j *BatchJobPool) AddWorker() {
|
|||
case job.KeyRotate != nil:
|
||||
if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil {
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(j.ctx, err)
|
||||
batchLogIf(j.ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
case job.Expire != nil:
|
||||
if err := job.Expire.Start(job.ctx, j.objLayer, *job); err != nil {
|
||||
if !isErrBucketNotFound(err) {
|
||||
logger.LogIf(j.ctx, err)
|
||||
batchLogIf(j.ctx, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
|
|
@ -419,6 +419,12 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "at":
|
||||
z.Attempts, err = dc.ReadInt()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
case "cmp":
|
||||
z.Complete, err = dc.ReadBool()
|
||||
if err != nil {
|
||||
|
@ -492,9 +498,9 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 16
|
||||
// map header, size 17
|
||||
// write "v"
|
||||
err = en.Append(0xde, 0x0, 0x10, 0xa1, 0x76)
|
||||
err = en.Append(0xde, 0x0, 0x11, 0xa1, 0x76)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -553,6 +559,16 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
// write "at"
|
||||
err = en.Append(0xa2, 0x61, 0x74)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteInt(z.Attempts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
// write "cmp"
|
||||
err = en.Append(0xa3, 0x63, 0x6d, 0x70)
|
||||
if err != nil {
|
||||
|
@ -659,9 +675,9 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 16
|
||||
// map header, size 17
|
||||
// string "v"
|
||||
o = append(o, 0xde, 0x0, 0x10, 0xa1, 0x76)
|
||||
o = append(o, 0xde, 0x0, 0x11, 0xa1, 0x76)
|
||||
o = msgp.AppendInt(o, z.Version)
|
||||
// string "jid"
|
||||
o = append(o, 0xa3, 0x6a, 0x69, 0x64)
|
||||
|
@ -678,6 +694,9 @@ func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
|
|||
// string "ra"
|
||||
o = append(o, 0xa2, 0x72, 0x61)
|
||||
o = msgp.AppendInt(o, z.RetryAttempts)
|
||||
// string "at"
|
||||
o = append(o, 0xa2, 0x61, 0x74)
|
||||
o = msgp.AppendInt(o, z.Attempts)
|
||||
// string "cmp"
|
||||
o = append(o, 0xa3, 0x63, 0x6d, 0x70)
|
||||
o = msgp.AppendBool(o, z.Complete)
|
||||
|
@ -765,6 +784,12 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
case "at":
|
||||
z.Attempts, bts, err = msgp.ReadIntBytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "Attempts")
|
||||
return
|
||||
}
|
||||
case "cmp":
|
||||
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
|
||||
if err != nil {
|
||||
|
@ -839,6 +864,6 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
|
||||
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
|
||||
func (z *batchJobInfo) Msgsize() (s int) {
|
||||
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
|
||||
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
|
||||
return
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
"github.com/minio/pkg/v2/wildcard"
|
||||
"github.com/minio/pkg/v3/wildcard"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
|
|
|
@ -33,9 +33,8 @@ import (
|
|||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v2/workers"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
"github.com/minio/pkg/v3/workers"
|
||||
)
|
||||
|
||||
// keyrotate:
|
||||
|
@ -96,6 +95,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
|||
if e.Type == ssekms && spaces {
|
||||
return crypto.ErrInvalidEncryptionKeyID
|
||||
}
|
||||
|
||||
if e.Type == ssekms && GlobalKMS != nil {
|
||||
ctx := kms.Context{}
|
||||
if e.Context != "" {
|
||||
|
@ -114,7 +114,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
|
|||
e.kmsContext[k] = v
|
||||
}
|
||||
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
|
||||
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -357,7 +357,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
retryAttempts := ri.RetryAttempts
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
results := make(chan ObjectInfo, 100)
|
||||
results := make(chan itemOrErr[ObjectInfo], 100)
|
||||
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
|
||||
Marker: lastObject,
|
||||
Filter: selectObj,
|
||||
|
@ -366,9 +366,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
// Do not need to retry if we can't list objects on source.
|
||||
return err
|
||||
}
|
||||
|
||||
for result := range results {
|
||||
result := result
|
||||
failed := false
|
||||
for res := range results {
|
||||
if res.Err != nil {
|
||||
failed = true
|
||||
batchLogIf(ctx, res.Err)
|
||||
break
|
||||
}
|
||||
result := res.Item
|
||||
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
|
||||
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
|
||||
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
|
||||
|
@ -378,21 +383,19 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
go func() {
|
||||
defer wk.Give()
|
||||
for attempts := 1; attempts <= retryAttempts; attempts++ {
|
||||
attempts := attempts
|
||||
stopFn := globalBatchJobsMetrics.trace(batchJobMetricKeyRotation, job.ID, attempts)
|
||||
success := true
|
||||
if err := r.KeyRotate(ctx, api, result); err != nil {
|
||||
stopFn(result, err)
|
||||
logger.LogIf(ctx, err)
|
||||
batchLogIf(ctx, err)
|
||||
success = false
|
||||
} else {
|
||||
stopFn(result, nil)
|
||||
}
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success)
|
||||
ri.RetryAttempts = attempts
|
||||
ri.trackCurrentBucketObject(r.Bucket, result, success, attempts)
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk after every 10secs.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
|
||||
if success {
|
||||
break
|
||||
}
|
||||
|
@ -408,14 +411,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
|
|||
}
|
||||
wk.Wait()
|
||||
|
||||
ri.Complete = ri.ObjectsFailed == 0
|
||||
ri.Failed = ri.ObjectsFailed > 0
|
||||
ri.Complete = !failed && ri.ObjectsFailed == 0
|
||||
ri.Failed = failed || ri.ObjectsFailed > 0
|
||||
globalBatchJobsMetrics.save(job.ID, ri)
|
||||
// persist in-memory state to disk.
|
||||
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
|
||||
|
||||
if err := r.Notify(ctx, ri); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
|
||||
}
|
||||
|
||||
cancel()
|
||||
|
@ -476,8 +479,5 @@ func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest,
|
|||
}
|
||||
}
|
||||
|
||||
if err := r.Flags.Retry.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
return r.Flags.Retry.Validate()
|
||||
}
|
||||
|
|
|
@ -26,15 +26,17 @@ import (
|
|||
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/ringbuffer"
|
||||
)
|
||||
|
||||
// Calculates bitrot in chunks and writes the hash into the stream.
|
||||
type streamingBitrotWriter struct {
|
||||
iow io.WriteCloser
|
||||
closeWithErr func(err error) error
|
||||
closeWithErr func(err error)
|
||||
h hash.Hash
|
||||
shardSize int64
|
||||
canClose *sync.WaitGroup
|
||||
byteBuf []byte
|
||||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
||||
|
@ -62,7 +64,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
|
|||
}
|
||||
|
||||
func (b *streamingBitrotWriter) Close() error {
|
||||
// Close the underlying writer.
|
||||
// This will also flush the ring buffer if used.
|
||||
err := b.iow.Close()
|
||||
|
||||
// Wait for all data to be written before returning else it causes race conditions.
|
||||
// Race condition is because of io.PipeWriter implementation. i.e consider the following
|
||||
// sequent of operations:
|
||||
|
@ -73,29 +78,34 @@ func (b *streamingBitrotWriter) Close() error {
|
|||
if b.canClose != nil {
|
||||
b.canClose.Wait()
|
||||
}
|
||||
|
||||
// Recycle the buffer.
|
||||
if b.byteBuf != nil {
|
||||
globalBytePoolCap.Load().Put(b.byteBuf)
|
||||
b.byteBuf = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation.
|
||||
// The output is written to the supplied writer w.
|
||||
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error {
|
||||
// Similar to CloseWithError on pipes we always return nil.
|
||||
return nil
|
||||
}}
|
||||
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) {}}
|
||||
}
|
||||
|
||||
// Returns streaming bitrot writer implementation.
|
||||
func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
|
||||
r, w := io.Pipe()
|
||||
h := algo.New()
|
||||
buf := globalBytePoolCap.Load().Get()
|
||||
rb := ringbuffer.NewBuffer(buf[:cap(buf)]).SetBlocking(true)
|
||||
|
||||
bw := &streamingBitrotWriter{
|
||||
iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: w.CloseWithError,
|
||||
iow: ioutil.NewDeadlineWriter(rb.WriteCloser(), globalDriveConfig.GetMaxTimeout()),
|
||||
closeWithErr: rb.CloseWithError,
|
||||
h: h,
|
||||
shardSize: shardSize,
|
||||
canClose: &sync.WaitGroup{},
|
||||
byteBuf: buf,
|
||||
}
|
||||
bw.canClose.Add(1)
|
||||
go func() {
|
||||
|
@ -106,7 +116,7 @@ func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath stri
|
|||
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
|
||||
totalFileSize = bitrotSumsTotalSize + length
|
||||
}
|
||||
r.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, r))
|
||||
rb.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, rb))
|
||||
}()
|
||||
return bw
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
"github.com/minio/minio/internal/grid"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
// To abstract a node over network.
|
||||
|
@ -198,7 +198,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
|
|||
if err != nil {
|
||||
bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt))
|
||||
if !isNetworkError(err) {
|
||||
logger.LogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
|
||||
bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
|
||||
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err))
|
||||
} else {
|
||||
offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, err))
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -85,7 +85,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
|||
kmsKey := encConfig.KeyID()
|
||||
if kmsKey != "" {
|
||||
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
|
||||
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
|
||||
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: kmsKey, AssociatedData: kmsContext})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
|
||||
|
@ -114,7 +114,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
|
|||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: &cfgStr,
|
||||
|
@ -203,7 +203,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
|
|||
}
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeSSEConfig,
|
||||
Bucket: bucket,
|
||||
SSEConfig: nil,
|
||||
|
|
|
@ -61,8 +61,8 @@ import (
|
|||
"github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -72,6 +72,8 @@ const (
|
|||
|
||||
xMinIOErrCodeHeader = "x-minio-error-code"
|
||||
xMinIOErrDescHeader = "x-minio-error-desc"
|
||||
|
||||
postPolicyBucketTagging = "tagging"
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
|
@ -98,7 +100,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
|||
// Get buckets in the DNS
|
||||
dnsBuckets, err := globalDNSConfig.List()
|
||||
if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
dnsLogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -160,13 +162,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
|||
ctx := GlobalContext
|
||||
for _, err := range g.Wait() {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
dnsLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
for _, bucket := range bucketsInConflict.ToSlice() {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
dnsLogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
@ -187,7 +189,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
|||
// We go to here, so we know the bucket no longer exists,
|
||||
// but is registered in DNS to this server
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
dnsLogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
|
||||
bucket, err))
|
||||
}
|
||||
}(bucket)
|
||||
|
@ -227,7 +229,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
|
|||
// Generate response.
|
||||
encodedSuccessResponse := encodeResponse(LocationResponse{})
|
||||
// Get current region.
|
||||
region := globalSite.Region
|
||||
region := globalSite.Region()
|
||||
if region != globalMinioDefaultRegion {
|
||||
encodedSuccessResponse = encodeResponse(LocationResponse{
|
||||
Location: region,
|
||||
|
@ -790,7 +792,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
// check if client is attempting to create more buckets, complain about it.
|
||||
if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets {
|
||||
logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets))
|
||||
internalLogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets), logger.WarningKind)
|
||||
}
|
||||
|
||||
opts := MakeBucketOptions{
|
||||
|
@ -871,7 +873,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
|
||||
|
||||
// Call site replication hook
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
|
||||
replLogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
w.Header().Set(xhttp.Location, pathJoin(SlashSeparator, bucket))
|
||||
|
@ -1218,11 +1220,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParametersSSEC), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
reader io.Reader
|
||||
keyID string
|
||||
|
@ -1420,6 +1417,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
|
||||
if formValues.Get(postPolicyBucketTagging) != "" {
|
||||
tags, err := tags.ParseObjectXML(strings.NewReader(formValues.Get(postPolicyBucketTagging)))
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
|
||||
return
|
||||
}
|
||||
tagsStr := tags.String()
|
||||
opts.UserDefined[xhttp.AmzObjectTagging] = tagsStr
|
||||
} else {
|
||||
// avoid user set an invalid tag using `X-Amz-Tagging`
|
||||
delete(opts.UserDefined, xhttp.AmzObjectTagging)
|
||||
}
|
||||
|
||||
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
|
@ -1666,7 +1676,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
case rcfg.HasActiveRules("", true):
|
||||
case rcfg != nil && rcfg.HasActiveRules("", true):
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -1698,7 +1708,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
|
||||
if globalDNSConfig != nil {
|
||||
if err := globalDNSConfig.Delete(bucket); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
|
||||
dnsLogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -1708,7 +1718,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
|
@ -1781,7 +1791,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
|||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeObjectLockConfig,
|
||||
Bucket: bucket,
|
||||
ObjectLockConfig: &cfgStr,
|
||||
|
@ -1885,7 +1895,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
|
|||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
Tags: &cfgStr,
|
||||
|
@ -1961,7 +1971,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeTags,
|
||||
Bucket: bucket,
|
||||
UpdatedAt: updatedAt,
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -64,7 +64,8 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
|||
}
|
||||
|
||||
// Check if bucket exists.
|
||||
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
|
||||
rcfg, err := globalBucketObjectLockSys.Get(bucket)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -76,7 +77,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
|
|||
}
|
||||
|
||||
// Validate the received bucket policy document
|
||||
if err = bucketLifecycle.Validate(); err != nil {
|
||||
if err = bucketLifecycle.Validate(rcfg); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ import (
|
|||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/minio/internal/s3select"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/zeebo/xxh3"
|
||||
)
|
||||
|
||||
|
@ -277,6 +277,10 @@ func (es *expiryState) getWorkerCh(h uint64) chan<- expiryOp {
|
|||
}
|
||||
|
||||
func (es *expiryState) ResizeWorkers(n int) {
|
||||
if n == 0 {
|
||||
n = 100
|
||||
}
|
||||
|
||||
// Lock to avoid multiple resizes to happen at the same time.
|
||||
es.mu.Lock()
|
||||
defer es.mu.Unlock()
|
||||
|
@ -336,7 +340,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
|||
case newerNoncurrentTask:
|
||||
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event)
|
||||
case jentry:
|
||||
logger.LogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName))
|
||||
transitionLogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName))
|
||||
case freeVersionTask:
|
||||
oi := v.ObjectInfo
|
||||
traceFn := globalLifecycleSys.trace(oi)
|
||||
|
@ -355,7 +359,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
|||
// Remove the remote object
|
||||
err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier)
|
||||
if ignoreNotFoundErr(err) != nil {
|
||||
logger.LogIf(es.ctx, err)
|
||||
transitionLogIf(es.ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -368,10 +372,10 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
|
|||
auditLogLifecycle(es.ctx, oi, ILMFreeVersionDelete, nil, traceFn)
|
||||
}
|
||||
if ignoreNotFoundErr(err) != nil {
|
||||
logger.LogIf(es.ctx, err)
|
||||
transitionLogIf(es.ctx, err)
|
||||
}
|
||||
default:
|
||||
logger.LogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v))
|
||||
bugLogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -486,7 +490,7 @@ func (t *transitionState) worker(objectAPI ObjectLayer) {
|
|||
if err := transitionObject(t.ctx, objectAPI, task.objInfo, newLifecycleAuditEvent(task.src, task.event)); err != nil {
|
||||
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !xnet.IsNetworkOrHostDown(err, false) {
|
||||
if !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
logger.LogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w",
|
||||
transitionLogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w",
|
||||
task.event.StorageClass, task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err))
|
||||
}
|
||||
}
|
||||
|
@ -538,6 +542,10 @@ func (t *transitionState) UpdateWorkers(n int) {
|
|||
}
|
||||
|
||||
func (t *transitionState) updateWorkers(n int) {
|
||||
if n == 0 {
|
||||
n = 100
|
||||
}
|
||||
|
||||
for t.numWorkers < n {
|
||||
go t.worker(t.objAPI)
|
||||
t.numWorkers++
|
||||
|
@ -614,7 +622,7 @@ func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *Ob
|
|||
// remote object
|
||||
opts.SkipFreeVersion = true
|
||||
} else {
|
||||
logger.LogIf(ctx, err)
|
||||
transitionLogIf(ctx, err)
|
||||
}
|
||||
|
||||
// Now, delete object from hot-tier namespace
|
||||
|
@ -663,11 +671,12 @@ func genTransitionObjName(bucket string) (string, error) {
|
|||
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
|
||||
// to the transition tier without decrypting or re-encrypting.
|
||||
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo, lae lcAuditEvent) (err error) {
|
||||
timeILM := globalScannerMetrics.timeILM(lae.Action)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
globalScannerMetrics.timeILM(lae.Action)(1)
|
||||
timeILM(1)
|
||||
}()
|
||||
|
||||
opts := ObjectOptions{
|
||||
|
@ -721,9 +730,9 @@ func auditTierActions(ctx context.Context, tier string, bytes int64) func(err er
|
|||
|
||||
// getTransitionedObjectReader returns a reader from the transitioned tier.
|
||||
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionedObject.Tier)
|
||||
tgtClient, err := globalTierConfigMgr.getDriver(ctx, oi.TransitionedObject.Tier)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("transition storage class not configured")
|
||||
return nil, fmt.Errorf("transition storage class not configured: %w", err)
|
||||
}
|
||||
|
||||
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
|
||||
|
@ -879,7 +888,7 @@ func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string
|
|||
if vid != "" && vid != nullVersionID {
|
||||
_, err := uuid.Parse(vid)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
s3LogIf(ctx, err)
|
||||
return opts, InvalidVersionID{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Validate all the ListObjects query arguments, returns an APIErrorCode
|
||||
|
@ -124,7 +124,7 @@ func (api objectAPIHandlers) listObjectVersionsHandler(w http.ResponseWriter, r
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
|
||||
response := generateListVersionsResponse(ctx, bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponseList(response))
|
||||
|
@ -219,7 +219,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
|
|||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
response := generateListObjectsV2Response(ctx, bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, checkObjMeta)
|
||||
|
||||
|
@ -318,7 +318,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
|
|||
return
|
||||
}
|
||||
|
||||
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
response := generateListObjectsV1Response(ctx, bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponseList(response))
|
||||
|
|
|
@ -37,8 +37,8 @@ import (
|
|||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/sync/errgroup"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/sync/errgroup"
|
||||
)
|
||||
|
||||
// BucketMetadataSys captures all bucket metadata for a given cluster.
|
||||
|
@ -46,6 +46,7 @@ type BucketMetadataSys struct {
|
|||
objAPI ObjectLayer
|
||||
|
||||
sync.RWMutex
|
||||
initialized bool
|
||||
metadataMap map[string]BucketMetadata
|
||||
}
|
||||
|
||||
|
@ -433,6 +434,8 @@ func (sys *BucketMetadataSys) GetConfigFromDisk(ctx context.Context, bucket stri
|
|||
return loadBucketMetadata(ctx, objAPI, bucket)
|
||||
}
|
||||
|
||||
var errBucketMetadataNotInitialized = errors.New("bucket metadata not initialized yet")
|
||||
|
||||
// GetConfig returns a specific configuration from the bucket metadata.
|
||||
// The returned object may not be modified.
|
||||
// reloaded will be true if metadata refreshed from disk
|
||||
|
@ -454,6 +457,10 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
|
|||
}
|
||||
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
|
||||
if err != nil {
|
||||
if !sys.Initialized() {
|
||||
// bucket metadata not yet initialized
|
||||
return newBucketMetadata(bucket), reloaded, errBucketMetadataNotInitialized
|
||||
}
|
||||
return meta, reloaded, err
|
||||
}
|
||||
sys.Lock()
|
||||
|
@ -498,9 +505,10 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
|
|||
}
|
||||
|
||||
errs := g.Wait()
|
||||
for _, err := range errs {
|
||||
for index, err := range errs {
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogOnceIf(ctx, fmt.Errorf("Unable to load bucket metadata, will be retried: %w", err),
|
||||
"load-bucket-metadata-"+buckets[index].Name, logger.WarningKind)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -542,7 +550,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
|||
case <-t.C:
|
||||
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{})
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
break
|
||||
}
|
||||
|
||||
|
@ -560,7 +568,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
|||
|
||||
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
wait() // wait to proceed to next entry.
|
||||
continue
|
||||
}
|
||||
|
@ -583,6 +591,14 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
|
|||
}
|
||||
}
|
||||
|
||||
// Initialized indicates if bucket metadata sys is initialized atleast once.
|
||||
func (sys *BucketMetadataSys) Initialized() bool {
|
||||
sys.RLock()
|
||||
defer sys.RUnlock()
|
||||
|
||||
return sys.initialized
|
||||
}
|
||||
|
||||
// Loads bucket metadata for all buckets into BucketMetadataSys.
|
||||
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
||||
count := 100 // load 100 bucket metadata at a time.
|
||||
|
@ -596,6 +612,10 @@ func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
|
|||
buckets = buckets[count:]
|
||||
}
|
||||
|
||||
sys.Lock()
|
||||
sys.initialized = true
|
||||
sys.Unlock()
|
||||
|
||||
if globalIsDistErasure {
|
||||
go sys.refreshBucketsMetadataLoop(ctx, failedBuckets)
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import (
|
|||
"github.com/minio/minio/internal/fips"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/sio"
|
||||
)
|
||||
|
||||
|
@ -145,7 +145,7 @@ func (b *BucketMetadata) SetCreatedAt(createdAt time.Time) {
|
|||
// If an error is returned the returned metadata will be default initialized.
|
||||
func readBucketMetadata(ctx context.Context, api ObjectLayer, name string) (BucketMetadata, error) {
|
||||
if name == "" {
|
||||
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
|
||||
internalLogIf(ctx, errors.New("bucket name cannot be empty"), logger.WarningKind)
|
||||
return BucketMetadata{}, errInvalidArgument
|
||||
}
|
||||
b := newBucketMetadata(name)
|
||||
|
@ -400,7 +400,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
|
|||
for legacyFile := range configs {
|
||||
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
|
||||
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -490,7 +490,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
|
|||
}
|
||||
|
||||
metadata := make(map[string]string)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, "", kmsContext)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -519,7 +519,11 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
extKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kmsContext)
|
||||
extKey, err := GlobalKMS.Decrypt(context.TODO(), &kms.DecryptRequest{
|
||||
Name: keyID,
|
||||
Ciphertext: kmsKey,
|
||||
AssociatedData: kmsContext,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
"github.com/minio/minio/internal/event"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -66,8 +66,9 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
|||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
config.SetRegion(globalSite.Region)
|
||||
if err = config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil {
|
||||
region := globalSite.Region()
|
||||
config.SetRegion(region)
|
||||
if err = config.Validate(region, globalEventNotifier.targetList); err != nil {
|
||||
arnErr, ok := err.(*event.ErrARNNotFound)
|
||||
if ok {
|
||||
for i, queue := range config.QueueList {
|
||||
|
@ -134,7 +135,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalEventNotifier.targetList)
|
||||
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region(), globalEventNotifier.targetList)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
if event.IsEventError(err) {
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/minio/minio/internal/bucket/replication"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// BucketObjectLockSys - map of bucket and retention configuration.
|
||||
|
@ -44,7 +44,6 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
|
|||
if errors.Is(err, errInvalidArgument) {
|
||||
return r, err
|
||||
}
|
||||
logger.CriticalIf(context.Background(), err)
|
||||
return r, err
|
||||
}
|
||||
return config.ToRetention(), nil
|
||||
|
@ -66,7 +65,7 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke
|
|||
if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) {
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
return true
|
||||
}
|
||||
if ret.RetainUntilDate.After(t) {
|
||||
|
@ -114,7 +113,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
|
|||
// duration of the retention period.
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
return ObjectLocked{}
|
||||
}
|
||||
|
||||
|
@ -140,7 +139,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
|
|||
if !byPassSet {
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
return ObjectLocked{}
|
||||
}
|
||||
|
||||
|
@ -170,7 +169,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec
|
|||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
|
||||
}
|
||||
|
||||
|
@ -277,7 +276,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
|
|||
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
|
||||
|
@ -324,7 +323,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
|
|||
|
||||
t, err := objectlock.UTCNowNTP()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err, logger.WarningKind)
|
||||
return mode, retainDate, legalHold, ErrObjectLocked
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -113,7 +113,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
|
|||
}
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
Policy: bucketPolicyBytes,
|
||||
|
@ -157,7 +157,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
|
|||
}
|
||||
|
||||
// Call site replication hook.
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypePolicy,
|
||||
Bucket: bucket,
|
||||
UpdatedAt: updatedAt,
|
||||
|
|
|
@ -29,8 +29,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/minio/minio/internal/auth"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v2/policy/condition"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
"github.com/minio/pkg/v3/policy/condition"
|
||||
)
|
||||
|
||||
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.BucketPolicy {
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
"github.com/minio/minio/internal/handlers"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// PolicySys - policy subsystem.
|
||||
|
@ -53,7 +53,7 @@ func (sys *PolicySys) IsAllowed(args policy.BucketPolicyArgs) bool {
|
|||
|
||||
// Log unhandled errors.
|
||||
if _, ok := err.(BucketPolicyNotFound); !ok {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
internalLogIf(GlobalContext, err, logger.WarningKind)
|
||||
}
|
||||
|
||||
// As policy is not available for given bucket name, returns IsOwner i.e.
|
||||
|
|
|
@ -49,8 +49,8 @@ var bucketStorageCache = cachevalue.New[DataUsageInfo]()
|
|||
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
||||
bucketStorageCache.InitOnce(10*time.Second,
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func() (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
func(ctx context.Context) (DataUsageInfo, error) {
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer done()
|
||||
|
||||
return loadDataUsageFromBackend(ctx, objAPI)
|
||||
|
@ -59,14 +59,14 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
|
|||
}
|
||||
|
||||
// GetBucketUsageInfo return bucket usage info for a given bucket
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
|
||||
dui, err := bucketStorageCache.Get()
|
||||
func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string) (BucketUsageInfo, error) {
|
||||
dui, err := bucketStorageCache.GetWithCtx(ctx)
|
||||
timedout := OperationTimedOut{}
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) {
|
||||
if len(dui.BucketsUsage) > 0 {
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket)
|
||||
internalLogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket)
|
||||
} else {
|
||||
logger.LogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket)
|
||||
internalLogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -87,7 +87,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
|
|||
}
|
||||
if !quotaCfg.IsValid() {
|
||||
if quotaCfg.Type == "fifo" {
|
||||
logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"))
|
||||
internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
|
||||
return quotaCfg, fmt.Errorf("invalid quota type 'fifo'")
|
||||
}
|
||||
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
|
||||
|
@ -118,7 +118,7 @@ func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string,
|
|||
return BucketQuotaExceeded{Bucket: bucket}
|
||||
}
|
||||
|
||||
bui, err := sys.GetBucketUsageInfo(bucket)
|
||||
bui, err := sys.GetBucketUsageInfo(ctx, bucket)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.
|
||||
|
|
|
@ -51,6 +51,8 @@ import (
|
|||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"github.com/zeebo/xxh3"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -76,21 +78,13 @@ const (
|
|||
ReplicationWorkerMultiplier = 1.5
|
||||
)
|
||||
|
||||
func isReplicationEnabled(ctx context.Context, bucketName string) bool {
|
||||
rc, _ := getReplicationConfig(ctx, bucketName)
|
||||
return rc != nil
|
||||
}
|
||||
|
||||
// gets replication config associated to a given bucket name.
|
||||
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
|
||||
rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
|
||||
if err != nil {
|
||||
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) || errors.Is(err, errInvalidArgument) {
|
||||
return rCfg, err
|
||||
}
|
||||
logger.CriticalIf(ctx, err)
|
||||
if err != nil && !errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) {
|
||||
return rCfg, err
|
||||
}
|
||||
return rCfg, err
|
||||
return rCfg, nil
|
||||
}
|
||||
|
||||
// validateReplicationDestination returns error if replication destination bucket missing or not configured
|
||||
|
@ -264,10 +258,16 @@ func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplica
|
|||
if mopts.replicationRequest { // incoming replication request on target cluster
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return
|
||||
}
|
||||
if cfg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
opts := replication.ObjectOpts{
|
||||
Name: object,
|
||||
SSEC: crypto.SSEC.IsEncrypted(mopts.meta),
|
||||
|
@ -315,6 +315,7 @@ var standardHeaders = []string{
|
|||
func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool {
|
||||
c, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || c == nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return false
|
||||
}
|
||||
for _, obj := range objects {
|
||||
|
@ -334,6 +335,7 @@ func isStandardHeader(matchHeaderKey string) bool {
|
|||
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, delOpts ObjectOptions, gerr error) (dsc ReplicateDecision) {
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return
|
||||
}
|
||||
// If incoming request is a replication request, it does not need to be re-replicated.
|
||||
|
@ -426,7 +428,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
|||
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || rcfg == nil {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket)
|
||||
replLogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket)
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
Object: ObjectInfo{
|
||||
|
@ -443,7 +445,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
|||
}
|
||||
dsc, err := parseReplicateDecision(ctx, bucket, dobj.ReplicationState.ReplicateDecisionStr)
|
||||
if err != nil {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s",
|
||||
replLogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s",
|
||||
bucket, err, dobj.ReplicationState.ReplicateDecisionStr), dobj.ReplicationState.ReplicateDecisionStr)
|
||||
sendEvent(eventArgs{
|
||||
BucketName: bucket,
|
||||
|
@ -497,7 +499,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
|
|||
tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtEntry.Arn)
|
||||
if tgtClnt == nil {
|
||||
// Skip stale targets if any and log them to be missing at least once.
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn)
|
||||
replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -609,7 +611,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
|||
return
|
||||
}
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
|
||||
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
BucketName: dobj.Bucket,
|
||||
Object: ObjectInfo{
|
||||
|
@ -684,7 +686,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
|
|||
} else {
|
||||
rinfo.VersionPurgeStatus = Failed
|
||||
}
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr))
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr))
|
||||
if rmErr != nil && minio.IsNetworkOrHostDown(rmErr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
}
|
||||
|
@ -764,13 +766,20 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
|
|||
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) {
|
||||
meta := make(map[string]string)
|
||||
for k, v := range objInfo.UserDefined {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
// In case of SSE-C objects copy the allowed internal headers as well
|
||||
if !crypto.SSEC.IsEncrypted(objInfo.UserDefined) || !slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
|
||||
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
|
||||
continue
|
||||
}
|
||||
if isStandardHeader(k) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if isStandardHeader(k) {
|
||||
continue
|
||||
if slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
|
||||
meta[validSSEReplicationHeaders[k]] = v
|
||||
} else {
|
||||
meta[k] = v
|
||||
}
|
||||
meta[k] = v
|
||||
}
|
||||
|
||||
if sc == "" && (objInfo.StorageClass == storageclass.STANDARD || objInfo.StorageClass == storageclass.RRS) {
|
||||
|
@ -989,8 +998,8 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||
object := ri.Name
|
||||
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogOnceIf(ctx, err, "get-replication-config-"+bucket)
|
||||
if err != nil || cfg == nil {
|
||||
replLogOnceIf(ctx, err, "get-replication-config-"+bucket)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1029,7 +1038,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
|
|||
for _, tgtArn := range tgtArns {
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtArn)
|
||||
if tgt == nil {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn)
|
||||
replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1151,7 +1160,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||
}
|
||||
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN)
|
||||
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1166,9 +1175,10 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{
|
||||
VersionID: ri.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
VersionID: ri.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
ReplicationRequest: true,
|
||||
})
|
||||
if err != nil {
|
||||
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||
|
@ -1180,7 +1190,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID)
|
||||
replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -1193,7 +1203,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
replLogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1205,7 +1215,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||
}
|
||||
|
||||
if tgt.Bucket == "" {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1231,7 +1241,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||
|
||||
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
|
||||
replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1262,23 +1272,19 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
|
|||
}
|
||||
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
|
||||
if objInfo.isMultipart() {
|
||||
if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
|
||||
r, objInfo, putOpts); rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
|
||||
}
|
||||
}
|
||||
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
|
||||
} else {
|
||||
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
|
||||
}
|
||||
}
|
||||
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
|
||||
}
|
||||
if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
if rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): to (target: %s): %w",
|
||||
bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
}
|
||||
if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
@ -1308,7 +1314,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
}
|
||||
|
||||
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN)
|
||||
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1322,11 +1328,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
|
||||
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
|
||||
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{
|
||||
VersionID: ri.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
})
|
||||
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{},
|
||||
ObjectOptions{
|
||||
VersionID: ri.VersionID,
|
||||
Versioned: versioned,
|
||||
VersionSuspended: versionSuspended,
|
||||
ReplicationRequest: true,
|
||||
})
|
||||
if err != nil {
|
||||
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
|
||||
objInfo := ri.ToObjectInfo()
|
||||
|
@ -1337,13 +1345,14 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
UserAgent: "Internal: [Replication]",
|
||||
Host: globalLocalNodeName,
|
||||
})
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err))
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err))
|
||||
}
|
||||
return
|
||||
}
|
||||
defer gr.Close()
|
||||
|
||||
objInfo := gr.ObjInfo
|
||||
|
||||
// make sure we have the latest metadata for metrics calculation
|
||||
rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN)
|
||||
|
||||
|
@ -1356,7 +1365,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
|
||||
size, err := objInfo.GetActualSize()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
replLogIf(ctx, err)
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1367,8 +1376,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
return
|
||||
}
|
||||
|
||||
// Set the encrypted size for SSE-C objects
|
||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
|
||||
size = objInfo.Size
|
||||
}
|
||||
|
||||
if tgt.Bucket == "" {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1398,7 +1412,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
if rAction == replicateNone {
|
||||
if ri.OpType == replication.ExistingObjectReplicationType &&
|
||||
objInfo.ModTime.Unix() > oi.LastModified.Unix() && objInfo.VersionID == nullVersionID {
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL()))
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL()))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1438,7 +1452,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
rAction = replicateAll
|
||||
default:
|
||||
rinfo.Err = cerr
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD",
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD",
|
||||
bucket, object, objInfo.VersionID, tgt.EndpointURL(), cerr))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
|
@ -1488,13 +1502,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
}
|
||||
if _, rinfo.Err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); rinfo.Err != nil {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
}
|
||||
} else {
|
||||
var putOpts minio.PutObjectOptions
|
||||
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
|
||||
replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectReplicationNotTracked,
|
||||
BucketName: bucket,
|
||||
|
@ -1524,27 +1538,19 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
|
|||
}
|
||||
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
|
||||
if objInfo.isMultipart() {
|
||||
if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
|
||||
r, objInfo, putOpts); rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
} else {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
}
|
||||
}
|
||||
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
|
||||
} else {
|
||||
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
} else {
|
||||
rinfo.ReplicationStatus = replication.Completed
|
||||
}
|
||||
}
|
||||
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
|
||||
}
|
||||
if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
if rinfo.Err != nil {
|
||||
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
|
||||
rinfo.ReplicationStatus = replication.Failed
|
||||
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w",
|
||||
bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
|
||||
}
|
||||
if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
|
||||
globalBucketTargetSys.markOffline(tgt.EndpointURL())
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
|
@ -1564,7 +1570,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
|||
break
|
||||
}
|
||||
if minio.ToErrorResponse(err).Code == "PreconditionFailed" {
|
||||
return err
|
||||
return nil
|
||||
}
|
||||
attempts++
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
|
@ -1580,14 +1586,10 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
|||
for attempts <= 3 {
|
||||
actx, acancel := context.WithTimeout(ctx, time.Minute)
|
||||
aerr := c.AbortMultipartUpload(actx, bucket, object, uploadID)
|
||||
acancel()
|
||||
if aerr == nil {
|
||||
acancel()
|
||||
return
|
||||
}
|
||||
acancel()
|
||||
logger.LogIf(actx,
|
||||
fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
|
||||
humanize.Ordinal(attempts), uploadID, bucket, object, aerr))
|
||||
attempts++
|
||||
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
|
||||
}
|
||||
|
@ -1599,21 +1601,35 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
|||
pInfo minio.ObjectPart
|
||||
)
|
||||
|
||||
var objectSize int64
|
||||
for _, partInfo := range objInfo.Parts {
|
||||
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize)
|
||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
|
||||
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.Size), partInfo.Size, "", "", partInfo.ActualSize)
|
||||
} else {
|
||||
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cHeader := http.Header{}
|
||||
cHeader.Add(xhttp.MinIOSourceReplicationRequest, "true")
|
||||
popts := minio.PutObjectPartOptions{
|
||||
SSE: opts.ServerSideEncryption,
|
||||
SSE: opts.ServerSideEncryption,
|
||||
CustomHeader: cHeader,
|
||||
}
|
||||
|
||||
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.ActualSize, popts)
|
||||
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
|
||||
objectSize += partInfo.Size
|
||||
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.Size, popts)
|
||||
} else {
|
||||
objectSize += partInfo.ActualSize
|
||||
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.ActualSize, popts)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pInfo.Size != partInfo.ActualSize {
|
||||
if !crypto.SSEC.IsEncrypted(objInfo.UserDefined) && pInfo.Size != partInfo.ActualSize {
|
||||
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize)
|
||||
}
|
||||
uploadedParts = append(uploadedParts, minio.CompletePart{
|
||||
|
@ -1621,11 +1637,15 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
|
|||
ETag: pInfo.ETag,
|
||||
})
|
||||
}
|
||||
|
||||
// really big value but its okay on heavily loaded systems. This is just tail end timeout.
|
||||
cctx, ccancel := context.WithTimeout(ctx, 10*time.Minute)
|
||||
defer ccancel()
|
||||
_, err = c.CompleteMultipartUpload(cctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{
|
||||
UserMetadata: map[string]string{validSSEReplicationHeaders[ReservedMetadataPrefix+"Actual-Object-Size"]: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"]},
|
||||
Internal: minio.AdvancedPutOptions{
|
||||
SourceMTime: objInfo.ModTime,
|
||||
SourceETag: objInfo.ETag,
|
||||
// always set this to distinguish between `mc mirror` replication and serverside
|
||||
ReplicationRequest: true,
|
||||
},
|
||||
|
@ -1838,7 +1858,7 @@ func (p *ReplicationPool) AddMRFWorker() {
|
|||
globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
|
||||
|
||||
default:
|
||||
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type")
|
||||
bugLogIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type")
|
||||
}
|
||||
case <-p.mrfWorkerKillCh:
|
||||
return
|
||||
|
@ -1882,7 +1902,7 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT
|
|||
atomic.AddInt32(opTracker, -1)
|
||||
}
|
||||
default:
|
||||
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
|
||||
bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1921,7 +1941,7 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation
|
|||
case DeletedObjectReplicationInfo:
|
||||
replicateDelete(p.ctx, v, p.objLayer)
|
||||
default:
|
||||
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
|
||||
bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2068,9 +2088,9 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
|
|||
p.mu.RUnlock()
|
||||
switch prio {
|
||||
case "fast":
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic"), string(replicationSubsystem))
|
||||
replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic"), string(replicationSubsystem), logger.WarningKind)
|
||||
case "slow":
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem))
|
||||
replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind)
|
||||
default:
|
||||
maxWorkers = min(maxWorkers, WorkerMaxLimit)
|
||||
if p.ActiveWorkers() < maxWorkers {
|
||||
|
@ -2125,9 +2145,9 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
|
|||
p.mu.RUnlock()
|
||||
switch prio {
|
||||
case "fast":
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes"), string(replicationSubsystem))
|
||||
replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes"), string(replicationSubsystem), logger.WarningKind)
|
||||
case "slow":
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem))
|
||||
replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind)
|
||||
default:
|
||||
maxWorkers = min(maxWorkers, WorkerMaxLimit)
|
||||
if p.ActiveWorkers() < maxWorkers {
|
||||
|
@ -2219,6 +2239,8 @@ func getProxyTargets(ctx context.Context, bucket, object string, opts ObjectOpti
|
|||
}
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil || cfg == nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
|
||||
return &madmin.BucketTargets{}
|
||||
}
|
||||
topts := replication.ObjectOpts{Name: object}
|
||||
|
@ -2260,7 +2282,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRa
|
|||
if rs != nil {
|
||||
h, err := rs.ToHeader()
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err))
|
||||
replLogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err))
|
||||
continue
|
||||
}
|
||||
gopts.Set(xhttp.Range, h)
|
||||
|
@ -2628,7 +2650,7 @@ func (s *replicationResyncer) PersistToDisk(ctx context.Context, objectAPI Objec
|
|||
}
|
||||
if updt {
|
||||
if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err))
|
||||
replLogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err))
|
||||
} else {
|
||||
lastResyncStatusSave[bucket] = brs.LastUpdate
|
||||
}
|
||||
|
@ -2713,15 +2735,15 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
|||
s.workerCh <- struct{}{}
|
||||
}()
|
||||
// Allocate new results channel to receive ObjectInfo.
|
||||
objInfoCh := make(chan ObjectInfo)
|
||||
objInfoCh := make(chan itemOrErr[ObjectInfo])
|
||||
cfg, err := getReplicationConfig(ctx, opts.bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err))
|
||||
replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err))
|
||||
return
|
||||
}
|
||||
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, opts.bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err))
|
||||
replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err))
|
||||
return
|
||||
}
|
||||
rcfg := replicationConfig{
|
||||
|
@ -2734,12 +2756,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
|||
TargetArn: opts.arn,
|
||||
})
|
||||
if len(tgtArns) != 1 {
|
||||
logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn))
|
||||
replLogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn))
|
||||
return
|
||||
}
|
||||
tgt := globalBucketTargetSys.GetRemoteTargetClient(opts.bucket, opts.arn)
|
||||
if tgt == nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn))
|
||||
replLogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn))
|
||||
return
|
||||
}
|
||||
// mark resync status as resync started
|
||||
|
@ -2750,7 +2772,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
|||
// Walk through all object versions - Walk() is always in ascending order needed to ensure
|
||||
// delete marker replicated to target after object version is first created.
|
||||
if err := objectAPI.Walk(ctx, opts.bucket, "", objInfoCh, WalkOptions{}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
replLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -2848,7 +2870,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
|||
}
|
||||
}(ctx, i)
|
||||
}
|
||||
for obj := range objInfoCh {
|
||||
for res := range objInfoCh {
|
||||
if res.Err != nil {
|
||||
resyncStatus = ResyncFailed
|
||||
replLogIf(ctx, res.Err)
|
||||
return
|
||||
}
|
||||
select {
|
||||
case <-s.resyncCancelCh:
|
||||
resyncStatus = ResyncCanceled
|
||||
|
@ -2857,11 +2884,11 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
|
|||
return
|
||||
default:
|
||||
}
|
||||
if heal && lastCheckpoint != "" && lastCheckpoint != obj.Name {
|
||||
if heal && lastCheckpoint != "" && lastCheckpoint != res.Item.Name {
|
||||
continue
|
||||
}
|
||||
lastCheckpoint = ""
|
||||
roi := getHealReplicateObjectInfo(obj, rcfg)
|
||||
roi := getHealReplicateObjectInfo(res.Item, rcfg)
|
||||
if !roi.ExistingObjResync.mustResync() {
|
||||
continue
|
||||
}
|
||||
|
@ -3025,7 +3052,7 @@ func (p *ReplicationPool) loadResync(ctx context.Context, buckets []BucketInfo,
|
|||
meta, err := loadBucketResyncMetadata(ctx, bucket, objAPI)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errVolumeNotFound) {
|
||||
logger.LogIf(ctx, err)
|
||||
replLogIf(ctx, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
@ -3112,18 +3139,18 @@ func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationR
|
|||
func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) {
|
||||
cfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return nil, err
|
||||
}
|
||||
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
replLogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
objInfoCh := make(chan ObjectInfo, 10)
|
||||
objInfoCh := make(chan itemOrErr[ObjectInfo], 10)
|
||||
if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, WalkOptions{}); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
replLogIf(ctx, err)
|
||||
return nil, err
|
||||
}
|
||||
rcfg := replicationConfig{
|
||||
|
@ -3133,11 +3160,17 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
|
|||
diffCh := make(chan madmin.DiffInfo, 4000)
|
||||
go func() {
|
||||
defer xioutil.SafeClose(diffCh)
|
||||
for obj := range objInfoCh {
|
||||
for res := range objInfoCh {
|
||||
if res.Err != nil {
|
||||
diffCh <- madmin.DiffInfo{Err: res.Err}
|
||||
return
|
||||
}
|
||||
if contextCanceled(ctx) {
|
||||
// Just consume input...
|
||||
continue
|
||||
}
|
||||
obj := res.Item
|
||||
|
||||
// Ignore object prefixes which are excluded
|
||||
// from versioning via the MinIO bucket versioning extension.
|
||||
if globalBucketVersioningSys.PrefixSuspended(bucket, obj.Name) {
|
||||
|
@ -3205,7 +3238,11 @@ func QueueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, ret
|
|||
if oi.ModTime.IsZero() {
|
||||
return
|
||||
}
|
||||
rcfg, _ := getReplicationConfig(ctx, bucket)
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err != nil {
|
||||
replLogOnceIf(ctx, err, bucket)
|
||||
return
|
||||
}
|
||||
tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
|
||||
queueReplicationHeal(ctx, bucket, oi, replicationConfig{
|
||||
Config: rcfg,
|
||||
|
@ -3507,7 +3544,7 @@ func (p *ReplicationPool) processMRF() {
|
|||
continue
|
||||
}
|
||||
if err := p.queueMRFHeal(); err != nil && !osIsNotExist(err) {
|
||||
logger.LogIf(p.ctx, err)
|
||||
replLogIf(p.ctx, err)
|
||||
}
|
||||
pTimer.Reset(mrfQueueInterval)
|
||||
case <-p.ctx.Done():
|
||||
|
|
|
@ -20,7 +20,6 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -32,7 +31,6 @@ import (
|
|||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/crypto"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -131,7 +129,7 @@ func (sys *BucketTargetSys) initHC(ep *url.URL) {
|
|||
func newHCClient() *madmin.AnonymousClient {
|
||||
clnt, e := madmin.NewAnonymousClientNoEndpoint()
|
||||
if e != nil {
|
||||
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to initialize health check client"), string(replicationSubsystem))
|
||||
bugLogIf(GlobalContext, errors.New("Unable to initialize health check client"))
|
||||
return nil
|
||||
}
|
||||
clnt.SetCustomTransport(globalRemoteTargetTransport)
|
||||
|
@ -430,7 +428,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
|
|||
if arn.Type == madmin.ReplicationService {
|
||||
// reject removal of remote target if replication configuration is present
|
||||
rcfg, err := getReplicationConfig(ctx, bucket)
|
||||
if err == nil {
|
||||
if err == nil && rcfg != nil {
|
||||
for _, tgtArn := range rcfg.FilterTargetArns(replication.ObjectOpts{OpType: replication.AllReplicationType}) {
|
||||
if err == nil && (tgtArn == arnStr || rcfg.RoleArn == arnStr) {
|
||||
sys.RLock()
|
||||
|
@ -624,7 +622,7 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
|
|||
for _, tgt := range cfg.Targets {
|
||||
tgtClient, err := sys.getRemoteTargetClient(&tgt)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
replLogIf(GlobalContext, err)
|
||||
continue
|
||||
}
|
||||
sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: tgtClient}
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -82,7 +82,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
|||
}, r.URL)
|
||||
return
|
||||
}
|
||||
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
|
||||
if rc, _ := getReplicationConfig(ctx, bucket); rc != nil && v.Suspended() {
|
||||
writeErrorResponse(ctx, w, APIError{
|
||||
Code: "InvalidBucketState",
|
||||
Description: "A replication configuration is present on this bucket, bucket wide versioning cannot be suspended.",
|
||||
|
@ -108,7 +108,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
|
|||
// We encode the xml bytes as base64 to ensure there are no encoding
|
||||
// errors.
|
||||
cfgStr := base64.StdEncoding.EncodeToString(configData)
|
||||
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
|
||||
Type: madmin.SRBucketMetaTypeVersionConfig,
|
||||
Bucket: bucket,
|
||||
Versioning: &cfgStr,
|
||||
|
|
|
@ -65,5 +65,5 @@ var (
|
|||
MinioBannerName = "MinIO Object Storage Server"
|
||||
|
||||
// MinioLicense - MinIO server license.
|
||||
MinioLicense = "GNU AGPLv3 <https://www.gnu.org/licenses/agpl-3.0.html>"
|
||||
MinioLicense = "GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html"
|
||||
)
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
var callhomeLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
|
@ -81,6 +80,9 @@ func runCallhome(ctx context.Context, objAPI ObjectLayer) bool {
|
|||
ctx = lkctx.Context()
|
||||
defer locker.Unlock(lkctx)
|
||||
|
||||
// Perform callhome once and then keep running it at regular intervals.
|
||||
performCallhome(ctx)
|
||||
|
||||
callhomeTimer := time.NewTimer(globalCallhomeConfig.FrequencyDur())
|
||||
defer callhomeTimer.Stop()
|
||||
|
||||
|
@ -112,7 +114,7 @@ func performCallhome(ctx context.Context) {
|
|||
deadline := 10 * time.Second // Default deadline is 10secs for callhome
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil {
|
||||
logger.LogIf(ctx, errors.New("Callhome: object layer not ready"))
|
||||
internalLogIf(ctx, errors.New("Callhome: object layer not ready"))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -142,11 +144,14 @@ func performCallhome(ctx context.Context) {
|
|||
select {
|
||||
case hi, hasMore := <-healthInfoCh:
|
||||
if !hasMore {
|
||||
auditOptions := AuditLogOptions{Event: "callhome:diagnostics"}
|
||||
// Received all data. Send to SUBNET and return
|
||||
err := sendHealthInfo(ctx, healthInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
|
||||
internalLogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
|
||||
auditOptions.Error = err.Error()
|
||||
}
|
||||
auditLogInternal(ctx, auditOptions)
|
||||
return
|
||||
}
|
||||
healthInfo = hi
|
||||
|
@ -180,12 +185,12 @@ func createHealthJSONGzip(ctx context.Context, healthInfo madmin.HealthInfo) []b
|
|||
|
||||
enc := json.NewEncoder(gzWriter)
|
||||
if e := enc.Encode(header); e != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e))
|
||||
internalLogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e))
|
||||
return nil
|
||||
}
|
||||
|
||||
if e := enc.Encode(healthInfo); e != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Could not encode health info: %w", e))
|
||||
internalLogIf(ctx, fmt.Errorf("Could not encode health info: %w", e))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -21,10 +21,8 @@ import (
|
|||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/gob"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
|
@ -49,7 +47,6 @@ import (
|
|||
"github.com/minio/console/api/operations"
|
||||
consoleoauth2 "github.com/minio/console/pkg/auth/idp/oauth2"
|
||||
consoleCerts "github.com/minio/console/pkg/certs"
|
||||
"github.com/minio/kms-go/kes"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/set"
|
||||
|
@ -58,19 +55,28 @@ import (
|
|||
"github.com/minio/minio/internal/config"
|
||||
"github.com/minio/minio/internal/kms"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/certs"
|
||||
"github.com/minio/pkg/v2/console"
|
||||
"github.com/minio/pkg/v2/ellipses"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v3/certs"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// serverDebugLog will enable debug printing
|
||||
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
|
||||
var currentReleaseTime time.Time
|
||||
var (
|
||||
serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
|
||||
currentReleaseTime time.Time
|
||||
orchestrated = IsKubernetes() || IsDocker()
|
||||
)
|
||||
|
||||
func init() {
|
||||
if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stderr.Fd())) {
|
||||
color.TurnOff()
|
||||
}
|
||||
if env.Get("NO_COLOR", "") != "" || env.Get("TERM", "") == "dumb" {
|
||||
color.TurnOff()
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
if mousetrap.StartedByExplorer() {
|
||||
fmt.Printf("Don't double-click %s\n", os.Args[0])
|
||||
|
@ -129,6 +135,9 @@ func minioConfigToConsoleFeatures() {
|
|||
os.Setenv("CONSOLE_LOG_QUERY_AUTH_TOKEN", value)
|
||||
}
|
||||
}
|
||||
if value := env.Get(config.EnvBrowserRedirectURL, ""); value != "" {
|
||||
os.Setenv("CONSOLE_BROWSER_REDIRECT_URL", value)
|
||||
}
|
||||
// pass the console subpath configuration
|
||||
if globalBrowserRedirectURL != nil {
|
||||
subPath := path.Clean(pathJoin(strings.TrimSpace(globalBrowserRedirectURL.Path), SlashSeparator))
|
||||
|
@ -167,7 +176,10 @@ func minioConfigToConsoleFeatures() {
|
|||
os.Setenv("CONSOLE_STS_DURATION", valueSession)
|
||||
}
|
||||
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region)
|
||||
os.Setenv("CONSOLE_MINIO_SITE_NAME", globalSite.Name())
|
||||
os.Setenv("CONSOLE_MINIO_SITE_REGION", globalSite.Region())
|
||||
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region())
|
||||
|
||||
os.Setenv("CONSOLE_CERT_PASSWD", env.Get("MINIO_CERT_PASSWD", ""))
|
||||
|
||||
// This section sets Browser (console) stored config
|
||||
|
@ -390,20 +402,38 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
|
|||
ctxt.certsDirSet = true
|
||||
}
|
||||
|
||||
memAvailable := availableMemory()
|
||||
if ctx.IsSet("memlimit") || ctx.GlobalIsSet("memlimit") {
|
||||
memlimit := ctx.String("memlimit")
|
||||
if memlimit == "" {
|
||||
memlimit = ctx.GlobalString("memlimit")
|
||||
}
|
||||
mlimit, err := humanize.ParseBytes(memlimit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if mlimit > memAvailable {
|
||||
logger.Info("WARNING: maximum memory available (%s) smaller than specified --memlimit=%s, ignoring --memlimit value",
|
||||
humanize.IBytes(memAvailable), memlimit)
|
||||
}
|
||||
ctxt.MemLimit = mlimit
|
||||
} else {
|
||||
ctxt.MemLimit = memAvailable
|
||||
}
|
||||
|
||||
if memAvailable < ctxt.MemLimit {
|
||||
ctxt.MemLimit = memAvailable
|
||||
}
|
||||
|
||||
ctxt.FTP = ctx.StringSlice("ftp")
|
||||
ctxt.SFTP = ctx.StringSlice("sftp")
|
||||
|
||||
ctxt.Interface = ctx.String("interface")
|
||||
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
|
||||
ctxt.ConnReadDeadline = ctx.Duration("conn-read-deadline")
|
||||
ctxt.ConnWriteDeadline = ctx.Duration("conn-write-deadline")
|
||||
ctxt.ConnClientReadDeadline = ctx.Duration("conn-client-read-deadline")
|
||||
ctxt.ConnClientWriteDeadline = ctx.Duration("conn-client-write-deadline")
|
||||
|
||||
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
|
||||
ctxt.SendBufSize = ctx.Int("send-buf-size")
|
||||
ctxt.RecvBufSize = ctx.Int("recv-buf-size")
|
||||
ctxt.IdleTimeout = ctx.Duration("idle-timeout")
|
||||
ctxt.ReadHeaderTimeout = ctx.Duration("read-header-timeout")
|
||||
ctxt.MaxIdleConnsPerHost = ctx.Int("max-idle-conns-per-host")
|
||||
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
|
||||
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
|
||||
|
||||
if conf := ctx.String("config"); len(conf) > 0 {
|
||||
err = mergeServerCtxtFromConfigFile(conf, ctxt)
|
||||
|
@ -494,7 +524,11 @@ func runDNSCache(ctx *cli.Context) {
|
|||
dnsTTL := ctx.Duration("dns-cache-ttl")
|
||||
// Check if we have configured a custom DNS cache TTL.
|
||||
if dnsTTL <= 0 {
|
||||
dnsTTL = 10 * time.Minute
|
||||
if orchestrated {
|
||||
dnsTTL = 30 * time.Second
|
||||
} else {
|
||||
dnsTTL = 10 * time.Minute
|
||||
}
|
||||
}
|
||||
|
||||
// Call to refresh will refresh names in cache.
|
||||
|
@ -757,12 +791,7 @@ func serverHandleEnvVars() {
|
|||
for _, endpoint := range minioEndpoints {
|
||||
if net.ParseIP(endpoint) == nil {
|
||||
// Checking if the IP is a DNS entry.
|
||||
lookupHost := globalDNSCache.LookupHost
|
||||
if IsKubernetes() || IsDocker() {
|
||||
lookupHost = net.DefaultResolver.LookupHost
|
||||
}
|
||||
|
||||
addrs, err := lookupHost(GlobalContext, endpoint)
|
||||
addrs, err := globalDNSCache.LookupHost(GlobalContext, endpoint)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to initialize MinIO server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
|
||||
}
|
||||
|
@ -851,125 +880,28 @@ func loadRootCredentials() {
|
|||
// Initialize KMS global variable after valiadating and loading the configuration.
|
||||
// It depends on KMS env variables and global cli flags.
|
||||
func handleKMSConfig() {
|
||||
if env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint) {
|
||||
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKMSSecretKey, kms.EnvKESEndpoint))
|
||||
present, err := kms.IsPresent()
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Invalid KMS configuration specified")
|
||||
}
|
||||
if !present {
|
||||
return
|
||||
}
|
||||
|
||||
if env.IsSet(kms.EnvKMSSecretKey) {
|
||||
KMS, err := kms.Parse(env.Get(kms.EnvKMSSecretKey, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to parse the KMS secret key inherited from the shell environment")
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
KMS, err := kms.Connect(GlobalContext, &kms.ConnectionOptions{
|
||||
CADir: globalCertsCADir.Get(),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Failed to connect to KMS")
|
||||
}
|
||||
if env.IsSet(kms.EnvKESEndpoint) {
|
||||
if env.IsSet(kms.EnvKESAPIKey) {
|
||||
if env.IsSet(kms.EnvKESClientKey) {
|
||||
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientKey))
|
||||
}
|
||||
if env.IsSet(kms.EnvKESClientCert) {
|
||||
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientCert))
|
||||
}
|
||||
}
|
||||
if !env.IsSet(kms.EnvKESKeyName) {
|
||||
logger.Fatal(errors.New("Invalid KES configuration"), fmt.Sprintf("The mandatory environment variable %q not set", kms.EnvKESKeyName))
|
||||
}
|
||||
|
||||
var endpoints []string
|
||||
for _, endpoint := range strings.Split(env.Get(kms.EnvKESEndpoint, ""), ",") {
|
||||
if strings.TrimSpace(endpoint) == "" {
|
||||
continue
|
||||
}
|
||||
if !ellipses.HasEllipses(endpoint) {
|
||||
endpoints = append(endpoints, endpoint)
|
||||
continue
|
||||
}
|
||||
patterns, err := ellipses.FindEllipsesPatterns(endpoint)
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Invalid KES endpoint %q", endpoint))
|
||||
}
|
||||
for _, lbls := range patterns.Expand() {
|
||||
endpoints = append(endpoints, strings.Join(lbls, ""))
|
||||
}
|
||||
}
|
||||
rootCAs, err := certs.GetRootCAs(env.Get(kms.EnvKESServerCA, globalCertsCADir.Get()))
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(kms.EnvKESServerCA, globalCertsCADir.Get())))
|
||||
}
|
||||
|
||||
var kmsConf kms.Config
|
||||
if env.IsSet(kms.EnvKESAPIKey) {
|
||||
key, err := kes.ParseAPIKey(env.Get(kms.EnvKESAPIKey, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(err, fmt.Sprintf("Failed to parse KES API key from %q", env.Get(kms.EnvKESAPIKey, "")))
|
||||
}
|
||||
kmsConf = kms.Config{
|
||||
Endpoints: endpoints,
|
||||
DefaultKeyID: env.Get(kms.EnvKESKeyName, ""),
|
||||
APIKey: key,
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
} else {
|
||||
loadX509KeyPair := func(certFile, keyFile string) (tls.Certificate, error) {
|
||||
// Manually load the certificate and private key into memory.
|
||||
// We need to check whether the private key is encrypted, and
|
||||
// if so, decrypt it using the user-provided password.
|
||||
certBytes, err := os.ReadFile(certFile)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err)
|
||||
}
|
||||
keyBytes, err := os.ReadFile(keyFile)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("Unable to load KES client private key as specified by the shell environment: %v", err)
|
||||
}
|
||||
privateKeyPEM, rest := pem.Decode(bytes.TrimSpace(keyBytes))
|
||||
if len(rest) != 0 {
|
||||
return tls.Certificate{}, errors.New("Unable to load KES client private key as specified by the shell environment: private key contains additional data")
|
||||
}
|
||||
if x509.IsEncryptedPEMBlock(privateKeyPEM) {
|
||||
keyBytes, err = x509.DecryptPEMBlock(privateKeyPEM, []byte(env.Get(kms.EnvKESClientPassword, "")))
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("Unable to decrypt KES client private key as specified by the shell environment: %v", err)
|
||||
}
|
||||
keyBytes = pem.EncodeToMemory(&pem.Block{Type: privateKeyPEM.Type, Bytes: keyBytes})
|
||||
}
|
||||
certificate, err := tls.X509KeyPair(certBytes, keyBytes)
|
||||
if err != nil {
|
||||
return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err)
|
||||
}
|
||||
return certificate, nil
|
||||
}
|
||||
|
||||
reloadCertEvents := make(chan tls.Certificate, 1)
|
||||
certificate, err := certs.NewCertificate(env.Get(kms.EnvKESClientCert, ""), env.Get(kms.EnvKESClientKey, ""), loadX509KeyPair)
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Failed to load KES client certificate")
|
||||
}
|
||||
certificate.Watch(context.Background(), 15*time.Minute, syscall.SIGHUP)
|
||||
certificate.Notify(reloadCertEvents)
|
||||
|
||||
kmsConf = kms.Config{
|
||||
Endpoints: endpoints,
|
||||
DefaultKeyID: env.Get(kms.EnvKESKeyName, ""),
|
||||
Certificate: certificate,
|
||||
ReloadCertEvents: reloadCertEvents,
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
KMS, err := kms.NewWithConfig(kmsConf)
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
|
||||
}
|
||||
// We check that the default key ID exists or try to create it otherwise.
|
||||
// This implicitly checks that we can communicate to KES. We don't treat
|
||||
// a policy error as failure condition since MinIO may not have the permission
|
||||
// to create keys - just to generate/decrypt data encryption keys.
|
||||
if err = KMS.CreateKey(context.Background(), env.Get(kms.EnvKESKeyName, "")); err != nil && !errors.Is(err, kes.ErrKeyExists) && !errors.Is(err, kes.ErrNotAllowed) {
|
||||
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
if _, err = KMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{}); errors.Is(err, kms.ErrKeyNotFound) {
|
||||
err = KMS.CreateKey(GlobalContext, &kms.CreateKeyRequest{Name: KMS.DefaultKey})
|
||||
}
|
||||
if err != nil && !errors.Is(err, kms.ErrKeyExists) && !errors.Is(err, kms.ErrPermission) {
|
||||
logger.Fatal(err, "Failed to connect to KMS")
|
||||
}
|
||||
GlobalKMS = KMS
|
||||
}
|
||||
|
||||
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
|
||||
|
@ -1043,7 +975,7 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
|
|||
}
|
||||
if err = manager.AddCertificate(certFile, keyFile); err != nil {
|
||||
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
|
||||
logger.LogIf(GlobalContext, err, logger.ErrorKind)
|
||||
bootLogIf(GlobalContext, err, logger.ErrorKind)
|
||||
}
|
||||
}
|
||||
secureConn = true
|
||||
|
|
|
@ -56,7 +56,7 @@ import (
|
|||
"github.com/minio/minio/internal/crypto"
|
||||
xhttp "github.com/minio/minio/internal/http"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/env"
|
||||
"github.com/minio/pkg/v3/env"
|
||||
)
|
||||
|
||||
func initHelp() {
|
||||
|
@ -240,6 +240,11 @@ func initHelp() {
|
|||
Description: "manage Browser HTTP specific features, such as Security headers, etc.",
|
||||
Optional: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.ILMSubSys,
|
||||
Description: "manage ILM settings for expiration and transition workers",
|
||||
Optional: true,
|
||||
},
|
||||
}
|
||||
|
||||
if globalIsErasure {
|
||||
|
@ -288,6 +293,7 @@ func initHelp() {
|
|||
config.DriveSubSys: drive.HelpDrive,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.BrowserSubSys: browser.Help,
|
||||
config.ILMSubSys: ilm.Help,
|
||||
}
|
||||
|
||||
config.RegisterHelpSubSys(helpMap)
|
||||
|
@ -362,7 +368,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
|||
}
|
||||
case config.IdentityOpenIDSubSys:
|
||||
if _, err := openid.LookupConfig(s,
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.IdentityLDAPSubSys:
|
||||
|
@ -383,7 +389,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
|
|||
}
|
||||
case config.IdentityPluginSubSys:
|
||||
if _, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default],
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
|
||||
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
|
||||
return err
|
||||
}
|
||||
case config.SubnetSubSys:
|
||||
|
@ -479,7 +485,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
|||
|
||||
dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
}
|
||||
if err == nil && dnsURL != "" {
|
||||
bootstrapTraceMsg("initialize remote bucket DNS store")
|
||||
|
@ -487,27 +493,27 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
|||
dns.Authentication(dnsUser, dnsPass),
|
||||
dns.RootCAs(globalRootCAs))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
|
||||
}
|
||||
}
|
||||
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
|
||||
}
|
||||
|
||||
if etcdCfg.Enabled {
|
||||
bootstrapTraceMsg("initialize etcd store")
|
||||
globalEtcdClient, err = etcd.New(etcdCfg)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
|
||||
}
|
||||
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
if globalDNSConfig != nil {
|
||||
// if global DNS is already configured, indicate with a warning, in case
|
||||
// users are confused.
|
||||
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig))
|
||||
configLogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig))
|
||||
} else {
|
||||
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
|
||||
dns.DomainNames(globalDomainNames),
|
||||
|
@ -516,7 +522,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
|||
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
|
||||
)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
|
||||
globalDomainNames, err))
|
||||
}
|
||||
}
|
||||
|
@ -530,10 +536,11 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
|||
// but not federation.
|
||||
globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled
|
||||
|
||||
globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
|
||||
siteCfg, err := config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
|
||||
}
|
||||
globalSite.Update(siteCfg)
|
||||
|
||||
globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled
|
||||
if globalAutoEncryption && GlobalKMS == nil {
|
||||
|
@ -545,19 +552,19 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
|
|||
bootstrapTraceMsg("initialize the event notification targets")
|
||||
globalNotifyTargetList, err = notify.FetchEnabledTargets(GlobalContext, s, transport)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("initialize the lambda targets")
|
||||
globalLambdaTargetList, err = lambda.FetchEnabledTargets(GlobalContext, s, transport)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err))
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("applying the dynamic configuration")
|
||||
// Apply dynamic config values
|
||||
if err := applyDynamicConfig(ctx, objAPI, s); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
configLogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -566,21 +573,18 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
|||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
var errs []error
|
||||
setDriveCounts := objAPI.SetDriveCounts()
|
||||
switch subSys {
|
||||
case config.APISubSys:
|
||||
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
|
||||
}
|
||||
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts)
|
||||
globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy())
|
||||
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
|
||||
|
||||
// Initialize remote instance transport once.
|
||||
getRemoteInstanceTransportOnce.Do(func() {
|
||||
getRemoteInstanceTransport = NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline)
|
||||
})
|
||||
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
|
||||
case config.CompressionSubSys:
|
||||
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
|
@ -592,95 +596,96 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
|||
case config.HealSubSys:
|
||||
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply heal config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply heal config: %w", err))
|
||||
} else {
|
||||
globalHealConfig.Update(healCfg)
|
||||
}
|
||||
globalHealConfig.Update(healCfg)
|
||||
case config.BatchSubSys:
|
||||
batchCfg, err := batch.LookupConfig(s[config.BatchSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply batch config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply batch config: %w", err))
|
||||
} else {
|
||||
globalBatchConfig.Update(batchCfg)
|
||||
}
|
||||
globalBatchConfig.Update(batchCfg)
|
||||
case config.ScannerSubSys:
|
||||
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply scanner config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply scanner config: %w", err))
|
||||
} else {
|
||||
// update dynamic scanner values.
|
||||
scannerIdleMode.Store(scannerCfg.IdleMode)
|
||||
scannerCycle.Store(scannerCfg.Cycle)
|
||||
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
|
||||
scannerExcessFolders.Store(scannerCfg.ExcessFolders)
|
||||
configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
}
|
||||
// update dynamic scanner values.
|
||||
scannerIdleMode.Store(scannerCfg.IdleMode)
|
||||
scannerCycle.Store(scannerCfg.Cycle)
|
||||
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
|
||||
scannerExcessFolders.Store(scannerCfg.ExcessFolders)
|
||||
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
|
||||
case config.LoggerWebhookSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.LoggerWebhookSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err))
|
||||
}
|
||||
userAgent := getUserAgent(getMinioMode())
|
||||
for n, l := range loggerCfg.HTTP {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceConsoleIf
|
||||
l.LogOnceIf = configLogOnceConsoleIf
|
||||
l.UserAgent = userAgent
|
||||
l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
}
|
||||
loggerCfg.HTTP[n] = l
|
||||
}
|
||||
if errs := logger.UpdateSystemTargets(ctx, loggerCfg); len(errs) > 0 {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs))
|
||||
if errs := logger.UpdateHTTPWebhooks(ctx, loggerCfg.HTTP); len(errs) > 0 {
|
||||
configLogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs))
|
||||
}
|
||||
case config.AuditWebhookSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditWebhookSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err))
|
||||
}
|
||||
userAgent := getUserAgent(getMinioMode())
|
||||
for n, l := range loggerCfg.AuditWebhook {
|
||||
if l.Enabled {
|
||||
l.LogOnce = logger.LogOnceConsoleIf
|
||||
l.LogOnceIf = configLogOnceConsoleIf
|
||||
l.UserAgent = userAgent
|
||||
l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
|
||||
}
|
||||
loggerCfg.AuditWebhook[n] = l
|
||||
}
|
||||
|
||||
if errs := logger.UpdateAuditWebhookTargets(ctx, loggerCfg); len(errs) > 0 {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs))
|
||||
if errs := logger.UpdateAuditWebhooks(ctx, loggerCfg.AuditWebhook); len(errs) > 0 {
|
||||
configLogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs))
|
||||
}
|
||||
case config.AuditKafkaSubSys:
|
||||
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditKafkaSubSys)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err))
|
||||
}
|
||||
for n, l := range loggerCfg.AuditKafka {
|
||||
if l.Enabled {
|
||||
if l.TLS.Enable {
|
||||
l.TLS.RootCAs = globalRootCAs
|
||||
}
|
||||
l.LogOnce = logger.LogOnceIf
|
||||
l.LogOnce = configLogOnceIf
|
||||
loggerCfg.AuditKafka[n] = l
|
||||
}
|
||||
}
|
||||
if errs := logger.UpdateAuditKafkaTargets(ctx, loggerCfg); len(errs) > 0 {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs))
|
||||
}
|
||||
case config.StorageClassSubSys:
|
||||
for i, setDriveCount := range setDriveCounts {
|
||||
sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
|
||||
break
|
||||
}
|
||||
// if we validated all setDriveCounts and it was successful
|
||||
// proceed to store the correct storage class globally.
|
||||
if i == len(setDriveCounts)-1 {
|
||||
if i == 0 {
|
||||
globalStorageClass.Update(sc)
|
||||
}
|
||||
}
|
||||
case config.SubnetSubSys:
|
||||
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalProxyTransport)
|
||||
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalRemoteTargetTransport)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
|
||||
} else {
|
||||
globalSubnetConfig.Update(subnetConfig, globalIsCICD)
|
||||
globalSubnetConfig.ApplyEnv() // update environment settings for Console UI
|
||||
|
@ -688,7 +693,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
|||
case config.CallhomeSubSys:
|
||||
callhomeCfg, err := callhome.LookupConfig(s[config.CallhomeSubSys][config.Default])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err))
|
||||
} else {
|
||||
enable := callhomeCfg.Enable && !globalCallhomeConfig.Enabled()
|
||||
globalCallhomeConfig.Update(callhomeCfg)
|
||||
|
@ -697,45 +702,50 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
|
|||
}
|
||||
}
|
||||
case config.DriveSubSys:
|
||||
if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err))
|
||||
driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default])
|
||||
if err != nil {
|
||||
configLogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err))
|
||||
} else {
|
||||
err := globalDriveConfig.Update(driveConfig)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
|
||||
if err = globalDriveConfig.Update(driveConfig); err != nil {
|
||||
configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
|
||||
}
|
||||
}
|
||||
case config.CacheSubSys:
|
||||
cacheCfg, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err))
|
||||
configLogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err))
|
||||
} else {
|
||||
globalCacheConfig.Update(cacheCfg)
|
||||
}
|
||||
case config.BrowserSubSys:
|
||||
browserCfg, err := browser.LookupConfig(s[config.BrowserSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply browser config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply browser config: %w", err))
|
||||
} else {
|
||||
globalBrowserConfig.Update(browserCfg)
|
||||
}
|
||||
globalBrowserConfig.Update(browserCfg)
|
||||
case config.ILMSubSys:
|
||||
ilmCfg, err := ilm.LookupConfig(s[config.ILMSubSys][config.Default])
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply ilm config: %w", err)
|
||||
errs = append(errs, fmt.Errorf("Unable to apply ilm config: %w", err))
|
||||
} else {
|
||||
if globalTransitionState != nil {
|
||||
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
|
||||
}
|
||||
if globalExpiryState != nil {
|
||||
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
|
||||
}
|
||||
globalILMConfig.update(ilmCfg)
|
||||
}
|
||||
if globalTransitionState != nil {
|
||||
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
|
||||
}
|
||||
if globalExpiryState != nil {
|
||||
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
|
||||
}
|
||||
globalILMConfig.update(ilmCfg)
|
||||
}
|
||||
globalServerConfigMu.Lock()
|
||||
defer globalServerConfigMu.Unlock()
|
||||
if globalServerConfig != nil {
|
||||
globalServerConfig[subSys] = s[subSys]
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -750,41 +760,33 @@ func autoGenerateRootCredentials() {
|
|||
return
|
||||
}
|
||||
|
||||
if manager, ok := GlobalKMS.(kms.KeyManager); ok {
|
||||
stat, err := GlobalKMS.Stat(GlobalContext)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err, "Unable to generate root credentials using KMS")
|
||||
return
|
||||
}
|
||||
aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")})
|
||||
if errors.Is(err, kes.ErrNotAllowed) || errors.Is(err, errors.ErrUnsupported) {
|
||||
return // If we don't have permission to compute the HMAC, don't change the cred.
|
||||
}
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key using KMS")
|
||||
}
|
||||
|
||||
aKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root access key"))
|
||||
if errors.Is(err, kes.ErrNotAllowed) {
|
||||
return // If we don't have permission to compute the HMAC, don't change the cred.
|
||||
}
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key using KMS")
|
||||
}
|
||||
sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")})
|
||||
if err != nil {
|
||||
// Here, we must have permission. Otherwise, we would have failed earlier.
|
||||
logger.Fatal(err, "Unable to generate root secret key using KMS")
|
||||
}
|
||||
|
||||
sKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root secret key"))
|
||||
if err != nil {
|
||||
// Here, we must have permission. Otherwise, we would have failed earlier.
|
||||
logger.Fatal(err, "Unable to generate root secret key using KMS")
|
||||
}
|
||||
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key")
|
||||
}
|
||||
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root secret key")
|
||||
}
|
||||
|
||||
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root access key")
|
||||
}
|
||||
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
|
||||
if err != nil {
|
||||
logger.Fatal(err, "Unable to generate root secret key")
|
||||
}
|
||||
|
||||
logger.Info("Automatically generated root access key and secret key with the KMS")
|
||||
globalActiveCred = auth.Credentials{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
}
|
||||
logger.Info("Automatically generated root access key and secret key with the KMS")
|
||||
globalActiveCred = auth.Credentials{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,8 +39,8 @@ func TestServerConfig(t *testing.T) {
|
|||
t.Fatalf("Init Test config failed")
|
||||
}
|
||||
|
||||
if globalSite.Region != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region)
|
||||
if globalSite.Region() != globalMinioDefaultRegion {
|
||||
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region())
|
||||
}
|
||||
|
||||
// Set new region and verify.
|
||||
|
@ -52,8 +52,8 @@ func TestServerConfig(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if site.Region != "us-west-1" {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region)
|
||||
if site.Region() != "us-west-1" {
|
||||
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region())
|
||||
}
|
||||
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
|
||||
|
|
|
@ -33,8 +33,8 @@ import (
|
|||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/event/target"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v2/quick"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
"github.com/minio/pkg/v3/quick"
|
||||
)
|
||||
|
||||
// Save config file to corresponding backend
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/minio/minio/internal/config/policy/opa"
|
||||
"github.com/minio/minio/internal/config/storageclass"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/quick"
|
||||
"github.com/minio/pkg/v3/quick"
|
||||
)
|
||||
|
||||
// FileLogger is introduced to workaround the dependency about logrus
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2015-2021 MinIO, Inc.
|
||||
// Copyright (c) 2015-2024 MinIO, Inc.
|
||||
//
|
||||
// This file is part of MinIO Object Storage stack
|
||||
//
|
||||
|
@ -20,6 +20,7 @@ package cmd
|
|||
import (
|
||||
"container/ring"
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
|
@ -28,8 +29,8 @@ import (
|
|||
"github.com/minio/minio/internal/logger/target/console"
|
||||
"github.com/minio/minio/internal/logger/target/types"
|
||||
"github.com/minio/minio/internal/pubsub"
|
||||
"github.com/minio/pkg/v2/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v2/net"
|
||||
"github.com/minio/pkg/v3/logger/message/log"
|
||||
xnet "github.com/minio/pkg/v3/net"
|
||||
)
|
||||
|
||||
// number of log messages to buffer
|
||||
|
@ -49,10 +50,10 @@ type HTTPConsoleLoggerSys struct {
|
|||
|
||||
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
||||
// the console logging pub sub system
|
||||
func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
|
||||
func NewConsoleLogger(ctx context.Context, w io.Writer) *HTTPConsoleLoggerSys {
|
||||
return &HTTPConsoleLoggerSys{
|
||||
pubsub: pubsub.New[log.Info, madmin.LogMask](8),
|
||||
console: console.New(),
|
||||
console: console.New(w),
|
||||
logBuf: ring.New(defaultLogBufferCount),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,13 +36,14 @@ import (
|
|||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/bucket/object/lock"
|
||||
objectlock "github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/replication"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
"github.com/minio/minio/internal/color"
|
||||
"github.com/minio/minio/internal/config/heal"
|
||||
"github.com/minio/minio/internal/event"
|
||||
xioutil "github.com/minio/minio/internal/ioutil"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/pkg/v2/console"
|
||||
"github.com/minio/pkg/v3/console"
|
||||
uatomic "go.uber.org/atomic"
|
||||
)
|
||||
|
||||
|
@ -52,7 +53,7 @@ const (
|
|||
dataScannerCompactLeastObject = 500 // Compact when there is less than this many objects in a branch.
|
||||
dataScannerCompactAtChildren = 10000 // Compact when there are this many children in a branch.
|
||||
dataScannerCompactAtFolders = dataScannerCompactAtChildren / 4 // Compact when this many subfolders in a single folder.
|
||||
dataScannerForceCompactAtFolders = 1_000_000 // Compact when this many subfolders in a single folder (even top level).
|
||||
dataScannerForceCompactAtFolders = 250_000 // Compact when this many subfolders in a single folder (even top level).
|
||||
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
|
||||
|
||||
healDeleteDangling = true
|
||||
|
@ -63,11 +64,12 @@ var (
|
|||
globalHealConfig heal.Config
|
||||
|
||||
// Sleeper values are updated when config is loaded.
|
||||
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
|
||||
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
|
||||
scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle
|
||||
scannerExcessObjectVersions = uatomic.NewInt64(100)
|
||||
scannerExcessFolders = uatomic.NewInt64(50000)
|
||||
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
|
||||
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
|
||||
scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle
|
||||
scannerExcessObjectVersions = uatomic.NewInt64(100)
|
||||
scannerExcessObjectVersionsTotalSize = uatomic.NewInt64(1024 * 1024 * 1024 * 1024) // 1 TB
|
||||
scannerExcessFolders = uatomic.NewInt64(50000)
|
||||
)
|
||||
|
||||
// initDataScanner will start the scanner in the background.
|
||||
|
@ -122,13 +124,13 @@ func readBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer) backgroundH
|
|||
buf, err := readConfig(ctx, objAPI, backgroundHealInfoPath)
|
||||
if err != nil {
|
||||
if !errors.Is(err, errConfigNotFound) {
|
||||
logger.LogOnceIf(ctx, err, backgroundHealInfoPath)
|
||||
internalLogOnceIf(ctx, err, backgroundHealInfoPath)
|
||||
}
|
||||
return backgroundHealInfo{}
|
||||
}
|
||||
var info backgroundHealInfo
|
||||
if err = json.Unmarshal(buf, &info); err != nil {
|
||||
logger.LogOnceIf(ctx, err, backgroundHealInfoPath)
|
||||
bugLogIf(ctx, err, backgroundHealInfoPath)
|
||||
}
|
||||
return info
|
||||
}
|
||||
|
@ -140,13 +142,13 @@ func saveBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer, info backgr
|
|||
|
||||
b, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
bugLogIf(ctx, err)
|
||||
return
|
||||
}
|
||||
// Get last healing information
|
||||
err = saveConfig(ctx, objAPI, backgroundHealInfoPath, b)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
internalLogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -167,7 +169,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||
cycleInfo.next = binary.LittleEndian.Uint64(buf[:8])
|
||||
buf = buf[8:]
|
||||
_, err := cycleInfo.UnmarshalMsg(buf)
|
||||
logger.LogIf(ctx, err)
|
||||
bugLogIf(ctx, err)
|
||||
}
|
||||
|
||||
scannerTimer := time.NewTimer(scannerCycle.Load())
|
||||
|
@ -204,7 +206,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||
results := make(chan DataUsageInfo, 1)
|
||||
go storeDataUsageInBackend(ctx, objAPI, results)
|
||||
err := objAPI.NSScanner(ctx, results, uint32(cycleInfo.current), scanMode)
|
||||
logger.LogOnceIf(ctx, err, "ns-scanner")
|
||||
scannerLogIf(ctx, err)
|
||||
res := map[string]string{"cycle": strconv.FormatUint(cycleInfo.current, 10)}
|
||||
if err != nil {
|
||||
res["error"] = err.Error()
|
||||
|
@ -224,7 +226,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||
binary.LittleEndian.PutUint64(tmp, cycleInfo.next)
|
||||
tmp, _ = cycleInfo.MarshalMsg(tmp)
|
||||
err = saveConfig(ctx, objAPI, dataUsageBloomNamePath, tmp)
|
||||
logger.LogOnceIf(ctx, err, dataUsageBloomNamePath)
|
||||
scannerLogIf(ctx, err, dataUsageBloomNamePath)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -347,6 +349,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, ca
|
|||
// No useful information...
|
||||
return cache, err
|
||||
}
|
||||
s.newCache.forceCompact(dataScannerCompactAtChildren)
|
||||
s.newCache.Info.LastUpdate = UTCNow()
|
||||
s.newCache.Info.NextCycle = cache.Info.NextCycle
|
||||
return s.newCache, nil
|
||||
|
@ -752,7 +755,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
versionID: "",
|
||||
}, madmin.HealItemObject)
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
logger.LogOnceIf(ctx, err, entry.name)
|
||||
scannerLogIf(ctx, err)
|
||||
}
|
||||
foundObjs = foundObjs || err == nil
|
||||
return
|
||||
|
@ -769,7 +772,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
}, madmin.HealItemObject)
|
||||
stopFn(int(ver.Size))
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
logger.LogOnceIf(ctx, err, fiv.Name)
|
||||
scannerLogIf(ctx, err, fiv.Name)
|
||||
}
|
||||
if err == nil {
|
||||
successVersions++
|
||||
|
@ -945,17 +948,39 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi Object
|
|||
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (action lifecycle.Action, size int64) {
|
||||
size, err := oi.GetActualSize()
|
||||
if i.debug {
|
||||
logger.LogIf(ctx, err)
|
||||
scannerLogIf(ctx, err)
|
||||
}
|
||||
if i.lifeCycle == nil {
|
||||
return action, size
|
||||
}
|
||||
|
||||
versionID := oi.VersionID
|
||||
vcfg, _ := globalBucketVersioningSys.Get(i.bucket)
|
||||
rCfg, _ := globalBucketObjectLockSys.Get(i.bucket)
|
||||
replcfg, _ := getReplicationConfig(ctx, i.bucket)
|
||||
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, rCfg, replcfg, oi)
|
||||
|
||||
var vc *versioning.Versioning
|
||||
var lr objectlock.Retention
|
||||
var rcfg *replication.Config
|
||||
if !isMinioMetaBucketName(i.bucket) {
|
||||
vc, err = globalBucketVersioningSys.Get(i.bucket)
|
||||
if err != nil {
|
||||
scannerLogOnceIf(ctx, err, i.bucket)
|
||||
return
|
||||
}
|
||||
|
||||
// Check if bucket is object locked.
|
||||
lr, err = globalBucketObjectLockSys.Get(i.bucket)
|
||||
if err != nil {
|
||||
scannerLogOnceIf(ctx, err, i.bucket)
|
||||
return
|
||||
}
|
||||
|
||||
rcfg, err = getReplicationConfig(ctx, i.bucket)
|
||||
if err != nil {
|
||||
scannerLogOnceIf(ctx, err, i.bucket)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, lr, rcfg, oi)
|
||||
if i.debug {
|
||||
if versionID != "" {
|
||||
console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, lcEvt.Action)
|
||||
|
@ -969,11 +994,11 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
|
|||
// This can happen when,
|
||||
// - ExpireObjectAllVersions flag is enabled
|
||||
// - NoncurrentVersionExpiration is applicable
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction:
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
size = 0
|
||||
case lifecycle.DeleteAction:
|
||||
// On a non-versioned bucket, DeleteObject removes the only version permanently.
|
||||
if !vcfg.PrefixEnabled(oi.Name) {
|
||||
if !vc.PrefixEnabled(oi.Name) {
|
||||
size = 0
|
||||
}
|
||||
}
|
||||
|
@ -1066,7 +1091,7 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
|
|||
}
|
||||
|
||||
// Check if we have many versions after applyNewerNoncurrentVersionLimit.
|
||||
if len(objInfos) > int(scannerExcessObjectVersions.Load()) {
|
||||
if len(objInfos) >= int(scannerExcessObjectVersions.Load()) {
|
||||
// Notify object accessed via a GET request.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectManyVersions,
|
||||
|
@ -1090,6 +1115,39 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
|
|||
})
|
||||
}
|
||||
|
||||
cumulativeSize := int64(0)
|
||||
for _, objInfo := range objInfos {
|
||||
cumulativeSize += objInfo.Size
|
||||
}
|
||||
// Check if the cumulative size of all versions of this object is high.
|
||||
if cumulativeSize >= scannerExcessObjectVersionsTotalSize.Load() {
|
||||
// Notify object accessed via a GET request.
|
||||
sendEvent(eventArgs{
|
||||
EventName: event.ObjectLargeVersions,
|
||||
BucketName: i.bucket,
|
||||
Object: ObjectInfo{
|
||||
Name: i.objectPath(),
|
||||
},
|
||||
UserAgent: "Scanner",
|
||||
Host: globalLocalNodeName,
|
||||
RespElements: map[string]string{
|
||||
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
|
||||
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
|
||||
},
|
||||
})
|
||||
|
||||
auditLogInternal(context.Background(), AuditLogOptions{
|
||||
Event: "scanner:largeversions",
|
||||
APIName: "Scanner",
|
||||
Bucket: i.bucket,
|
||||
Object: i.objectPath(),
|
||||
Tags: map[string]interface{}{
|
||||
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
|
||||
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return objInfos, nil
|
||||
}
|
||||
|
||||
|
@ -1105,7 +1163,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
|
|||
|
||||
// Note: objDeleted is true if and only if action ==
|
||||
// lifecycle.DeleteAllVersionsAction
|
||||
if action == lifecycle.DeleteAllVersionsAction {
|
||||
if action.DeleteAll() {
|
||||
return true, 0
|
||||
}
|
||||
|
||||
|
@ -1123,7 +1181,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
|
|||
err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling})
|
||||
done()
|
||||
if err != nil {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath())
|
||||
healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1142,17 +1200,15 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc
|
|||
console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", event.Action)
|
||||
}
|
||||
|
||||
if event.Action == lifecycle.NoneAction {
|
||||
return event
|
||||
}
|
||||
|
||||
if obj.IsLatest && event.Action == lifecycle.DeleteAllVersionsAction {
|
||||
if lr.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
|
||||
switch event.Action {
|
||||
case lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
// Skip if bucket has object locking enabled; To prevent the
|
||||
// possibility of violating an object retention on one of the
|
||||
// noncurrent versions of this object.
|
||||
if lr.LockEnabled {
|
||||
return lifecycle.Event{Action: lifecycle.NoneAction}
|
||||
}
|
||||
}
|
||||
|
||||
switch event.Action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredVersionAction:
|
||||
// Defensive code, should never happen
|
||||
if obj.VersionID == "" {
|
||||
|
@ -1184,29 +1240,22 @@ func applyTransitionRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo)
|
|||
return true
|
||||
}
|
||||
|
||||
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool {
|
||||
var err error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// Note: DeleteAllVersions action is not supported for
|
||||
// transitioned objects
|
||||
globalScannerMetrics.timeILM(lcEvent.Action)(1)
|
||||
}()
|
||||
|
||||
if err = expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil {
|
||||
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) {
|
||||
timeILM := globalScannerMetrics.timeILM(lcEvent.Action)
|
||||
if err := expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil {
|
||||
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
|
||||
return false
|
||||
}
|
||||
logger.LogOnceIf(ctx, err, obj.Name)
|
||||
ilmLogIf(ctx, err)
|
||||
return false
|
||||
}
|
||||
timeILM(1)
|
||||
|
||||
// Notification already sent in *expireTransitionedObject*, just return 'true' here.
|
||||
return true
|
||||
}
|
||||
|
||||
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool {
|
||||
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) {
|
||||
traceFn := globalLifecycleSys.trace(obj)
|
||||
opts := ObjectOptions{
|
||||
Expiration: ExpirationOptions{Expire: true},
|
||||
|
@ -1221,22 +1270,26 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
|||
|
||||
if lcEvent.Action.DeleteAll() {
|
||||
opts.DeletePrefix = true
|
||||
// use prefix delete on exact object (this is an optimization to avoid fan-out calls)
|
||||
opts.DeletePrefixObject = true
|
||||
}
|
||||
var (
|
||||
dobj ObjectInfo
|
||||
err error
|
||||
)
|
||||
|
||||
timeILM := globalScannerMetrics.timeILM(lcEvent.Action)
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if lcEvent.Action != lifecycle.NoneAction {
|
||||
numVersions := uint64(1)
|
||||
if lcEvent.Action == lifecycle.DeleteAllVersionsAction {
|
||||
if lcEvent.Action.DeleteAll() {
|
||||
numVersions = uint64(obj.NumVersions)
|
||||
}
|
||||
globalScannerMetrics.timeILM(lcEvent.Action)(numVersions)
|
||||
timeILM(numVersions)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -1246,7 +1299,7 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
|||
return false
|
||||
}
|
||||
// Assume it is still there.
|
||||
logger.LogOnceIf(ctx, err, "non-transition-expiry")
|
||||
ilmLogOnceIf(ctx, err, "non-transition-expiry")
|
||||
return false
|
||||
}
|
||||
if dobj.Name == "" {
|
||||
|
@ -1261,8 +1314,11 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
|||
if obj.DeleteMarker {
|
||||
eventName = event.ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
if lcEvent.Action.DeleteAll() {
|
||||
switch lcEvent.Action {
|
||||
case lifecycle.DeleteAllVersionsAction:
|
||||
eventName = event.ObjectRemovedDeleteAllVersions
|
||||
case lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
eventName = event.ILMDelMarkerExpirationDelete
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
|
@ -1287,7 +1343,7 @@ func applyLifecycleAction(event lifecycle.Event, src lcEventSrc, obj ObjectInfo)
|
|||
switch action := event.Action; action {
|
||||
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction,
|
||||
lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction,
|
||||
lifecycle.DeleteAllVersionsAction:
|
||||
lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
success = applyExpiryRule(event, src, obj)
|
||||
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
|
||||
success = applyTransitionRule(event, src, obj)
|
||||
|
@ -1387,48 +1443,7 @@ func (d *dynamicSleeper) Timer(ctx context.Context) func() {
|
|||
t := time.Now()
|
||||
return func() {
|
||||
doneAt := time.Now()
|
||||
for {
|
||||
// Grab current values
|
||||
d.mu.RLock()
|
||||
minWait, maxWait := d.minSleep, d.maxSleep
|
||||
factor := d.factor
|
||||
cycle := d.cycle
|
||||
d.mu.RUnlock()
|
||||
elapsed := doneAt.Sub(t)
|
||||
// Don't sleep for really small amount of time
|
||||
wantSleep := time.Duration(float64(elapsed) * factor)
|
||||
if wantSleep <= minWait {
|
||||
return
|
||||
}
|
||||
if maxWait > 0 && wantSleep > maxWait {
|
||||
wantSleep = maxWait
|
||||
}
|
||||
timer := time.NewTimer(wantSleep)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
if !timer.Stop() {
|
||||
<-timer.C
|
||||
}
|
||||
if d.isScanner {
|
||||
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
|
||||
}
|
||||
return
|
||||
case <-timer.C:
|
||||
if d.isScanner {
|
||||
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
|
||||
}
|
||||
return
|
||||
case <-cycle:
|
||||
if !timer.Stop() {
|
||||
// We expired.
|
||||
<-timer.C
|
||||
if d.isScanner {
|
||||
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
d.Sleep(ctx, doneAt.Sub(t))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,12 +20,15 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/bucket/object/lock"
|
||||
"github.com/minio/minio/internal/bucket/versioning"
|
||||
)
|
||||
|
||||
|
@ -141,3 +144,96 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvalActionFromLifecycle(t *testing.T) {
|
||||
// Tests cover only ExpiredObjectDeleteAllVersions and DelMarkerExpiration actions
|
||||
obj := ObjectInfo{
|
||||
Name: "foo",
|
||||
ModTime: time.Now().Add(-31 * 24 * time.Hour),
|
||||
Size: 100 << 20,
|
||||
VersionID: uuid.New().String(),
|
||||
IsLatest: true,
|
||||
NumVersions: 4,
|
||||
}
|
||||
delMarker := ObjectInfo{
|
||||
Name: "foo-deleted",
|
||||
ModTime: time.Now().Add(-61 * 24 * time.Hour),
|
||||
Size: 0,
|
||||
VersionID: uuid.New().String(),
|
||||
IsLatest: true,
|
||||
DeleteMarker: true,
|
||||
NumVersions: 4,
|
||||
}
|
||||
deleteAllILM := `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<Expiration>
|
||||
<Days>30</Days>
|
||||
<ExpiredObjectAllVersions>true</ExpiredObjectAllVersions>
|
||||
</Expiration>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<ID>DeleteAllVersions</ID>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`
|
||||
delMarkerILM := `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>DelMarkerExpiration</ID>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<DelMarkerExpiration>
|
||||
<Days>60</Days>
|
||||
</DelMarkerExpiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`
|
||||
deleteAllLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(deleteAllILM))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse deleteAllILM test ILM policy %v", err)
|
||||
}
|
||||
delMarkerLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(delMarkerILM))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to parse delMarkerILM test ILM policy %v", err)
|
||||
}
|
||||
tests := []struct {
|
||||
ilm lifecycle.Lifecycle
|
||||
retention lock.Retention
|
||||
obj ObjectInfo
|
||||
want lifecycle.Action
|
||||
}{
|
||||
{
|
||||
// with object locking
|
||||
ilm: *deleteAllLc,
|
||||
retention: lock.Retention{LockEnabled: true},
|
||||
obj: obj,
|
||||
want: lifecycle.NoneAction,
|
||||
},
|
||||
{
|
||||
// without object locking
|
||||
ilm: *deleteAllLc,
|
||||
retention: lock.Retention{},
|
||||
obj: obj,
|
||||
want: lifecycle.DeleteAllVersionsAction,
|
||||
},
|
||||
{
|
||||
// with object locking
|
||||
ilm: *delMarkerLc,
|
||||
retention: lock.Retention{LockEnabled: true},
|
||||
obj: delMarker,
|
||||
want: lifecycle.NoneAction,
|
||||
},
|
||||
{
|
||||
// without object locking
|
||||
ilm: *delMarkerLc,
|
||||
retention: lock.Retention{},
|
||||
obj: delMarker,
|
||||
want: lifecycle.DelMarkerDeleteAllVersionsAction,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("TestEvalAction-%d", i), func(t *testing.T) {
|
||||
if got := evalActionFromLifecycle(context.TODO(), test.ilm, test.retention, nil, test.obj); got.Action != test.want {
|
||||
t.Fatalf("Expected %v but got %v", test.want, got)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -36,8 +35,6 @@ import (
|
|||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/minio/madmin-go/v3"
|
||||
"github.com/minio/minio/internal/bucket/lifecycle"
|
||||
"github.com/minio/minio/internal/hash"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/tinylib/msgp/msgp"
|
||||
"github.com/valyala/bytebufferpool"
|
||||
)
|
||||
|
@ -191,6 +188,22 @@ type replicationAllStatsV1 struct {
|
|||
ReplicaCount uint64 `msg:"ReplicaCount,omitempty"`
|
||||
}
|
||||
|
||||
// empty returns true if the replicationAllStats is empty (contains no entries).
|
||||
func (r *replicationAllStats) empty() bool {
|
||||
if r == nil {
|
||||
return true
|
||||
}
|
||||
if r.ReplicaSize != 0 || r.ReplicaCount != 0 {
|
||||
return false
|
||||
}
|
||||
for _, v := range r.Targets {
|
||||
if !v.Empty() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// clone creates a deep-copy clone.
|
||||
func (r *replicationAllStats) clone() *replicationAllStats {
|
||||
if r == nil {
|
||||
|
@ -523,20 +536,18 @@ func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash {
|
|||
want := h.Key()
|
||||
if idx := strings.LastIndexByte(want, '/'); idx >= 0 {
|
||||
if v := d.find(want[:idx]); v != nil {
|
||||
for child := range v.Children {
|
||||
if child == want {
|
||||
found := hashPath(want[:idx])
|
||||
return &found
|
||||
}
|
||||
_, ok := v.Children[want]
|
||||
if ok {
|
||||
found := hashPath(want[:idx])
|
||||
return &found
|
||||
}
|
||||
}
|
||||
}
|
||||
for k, v := range d.Cache {
|
||||
for child := range v.Children {
|
||||
if child == want {
|
||||
found := dataUsageHash(k)
|
||||
return &found
|
||||
}
|
||||
_, ok := v.Children[want]
|
||||
if ok {
|
||||
found := dataUsageHash(k)
|
||||
return &found
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
@ -621,7 +632,7 @@ func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHas
|
|||
d.Cache[hash.Key()] = e
|
||||
for ch := range e.Children {
|
||||
if ch == hash.Key() {
|
||||
logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference"))
|
||||
scannerLogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference"))
|
||||
return
|
||||
}
|
||||
d.copyWithChildren(src, dataUsageHash(ch), &hash)
|
||||
|
@ -718,6 +729,53 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
|
|||
}
|
||||
}
|
||||
|
||||
// forceCompact will force compact the cache of the top entry.
|
||||
// If the number of children is more than limit*100, it will compact self.
|
||||
// When above the limit a cleanup will also be performed to remove any possible abandoned entries.
|
||||
func (d *dataUsageCache) forceCompact(limit int) {
|
||||
if d == nil || len(d.Cache) <= limit {
|
||||
return
|
||||
}
|
||||
top := hashPath(d.Info.Name).Key()
|
||||
topE := d.find(top)
|
||||
if topE == nil {
|
||||
scannerLogIf(GlobalContext, errors.New("forceCompact: root not found"))
|
||||
return
|
||||
}
|
||||
// If off by 2 orders of magnitude, compact self and log error.
|
||||
if len(topE.Children) > dataScannerForceCompactAtFolders {
|
||||
// If we still have too many children, compact self.
|
||||
scannerLogOnceIf(GlobalContext, fmt.Errorf("forceCompact: %q has %d children. Force compacting. Expect reduced scanner performance", d.Info.Name, len(topE.Children)), d.Info.Name)
|
||||
d.reduceChildrenOf(hashPath(d.Info.Name), limit, true)
|
||||
}
|
||||
if len(d.Cache) <= limit {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for abandoned entries.
|
||||
found := make(map[string]struct{}, len(d.Cache))
|
||||
|
||||
// Mark all children recursively
|
||||
var mark func(entry dataUsageEntry)
|
||||
mark = func(entry dataUsageEntry) {
|
||||
for k := range entry.Children {
|
||||
found[k] = struct{}{}
|
||||
if ch, ok := d.Cache[k]; ok {
|
||||
mark(ch)
|
||||
}
|
||||
}
|
||||
}
|
||||
found[top] = struct{}{}
|
||||
mark(*topE)
|
||||
|
||||
// Delete all entries not found.
|
||||
for k := range d.Cache {
|
||||
if _, ok := found[k]; !ok {
|
||||
delete(d.Cache, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// StringAll returns a detailed string representation of all entries in the cache.
|
||||
func (d *dataUsageCache) StringAll() string {
|
||||
// Remove bloom filter from print.
|
||||
|
@ -901,6 +959,9 @@ func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry {
|
|||
return root
|
||||
}
|
||||
flat := d.flatten(*root)
|
||||
if flat.ReplicationStats.empty() {
|
||||
flat.ReplicationStats = nil
|
||||
}
|
||||
return &flat
|
||||
}
|
||||
|
||||
|
@ -989,11 +1050,23 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string)
|
|||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
|
||||
r, err := store.GetObjectNInfo(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, name), nil, http.Header{}, ObjectOptions{NoLock: true})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound, BucketNotFound:
|
||||
return false, nil
|
||||
r, err = store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound, BucketNotFound:
|
||||
return false, nil
|
||||
case InsufficientReadQuorum, StorageErr:
|
||||
return true, nil
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
err = d.deserialize(r)
|
||||
r.Close()
|
||||
return err != nil, nil
|
||||
case InsufficientReadQuorum, StorageErr:
|
||||
return true, nil
|
||||
}
|
||||
|
@ -1024,7 +1097,7 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string)
|
|||
}
|
||||
|
||||
if retries == 5 {
|
||||
logger.LogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache")
|
||||
scannerLogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -1054,24 +1127,11 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
|
|||
}
|
||||
|
||||
save := func(name string, timeout time.Duration) error {
|
||||
hr, err := hash.NewReader(ctx, bytes.NewReader(buf.Bytes()), int64(buf.Len()), "", "", int64(buf.Len()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Abandon if more than a minute, so we don't hold up scanner.
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
defer cancel()
|
||||
|
||||
_, err = store.PutObject(ctx,
|
||||
dataUsageBucket,
|
||||
name,
|
||||
NewPutObjReader(hr),
|
||||
ObjectOptions{NoLock: true})
|
||||
if isErrBucketNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return saveConfig(ctx, store, pathJoin(bucketMetaPrefix, name), buf.Bytes())
|
||||
}
|
||||
defer save(name+".bkp", 5*time.Second) // Keep a backup as well
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@ import (
|
|||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
"github.com/minio/minio/internal/cachevalue"
|
||||
"github.com/minio/minio/internal/logger"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -49,7 +48,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
|
|||
json := jsoniter.ConfigCompatibleWithStandardLibrary
|
||||
dataUsageJSON, err := json.Marshal(dataUsageInfo)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
scannerLogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
if attempts > 10 {
|
||||
|
@ -57,7 +56,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
|
|||
attempts = 1
|
||||
}
|
||||
if err = saveConfig(ctx, objAPI, dataUsageObjNamePath, dataUsageJSON); err != nil {
|
||||
logger.LogOnceIf(ctx, err, dataUsageObjNamePath)
|
||||
scannerLogOnceIf(ctx, err, dataUsageObjNamePath)
|
||||
}
|
||||
attempts++
|
||||
}
|
||||
|
@ -80,12 +79,12 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
|
|||
prefixUsageCache.InitOnce(30*time.Second,
|
||||
// No need to fail upon Update() error, fallback to old value.
|
||||
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
|
||||
func() (map[string]uint64, error) {
|
||||
func(ctx context.Context) (map[string]uint64, error) {
|
||||
m := make(map[string]uint64)
|
||||
for _, pool := range z.serverPools {
|
||||
for _, er := range pool.sets {
|
||||
// Load bucket usage prefixes
|
||||
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
|
||||
ctx, done := context.WithTimeout(ctx, 2*time.Second)
|
||||
ok := cache.load(ctx, er, bucket+slashSeparator+dataUsageCacheName) == nil
|
||||
done()
|
||||
if ok {
|
||||
|
@ -108,7 +107,7 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
|
|||
},
|
||||
)
|
||||
|
||||
return prefixUsageCache.Get()
|
||||
return prefixUsageCache.GetWithCtx(ctx)
|
||||
}
|
||||
|
||||
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {
|
||||
|
|
|
@ -364,6 +364,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
|
|||
}
|
||||
if e == nil {
|
||||
t.Fatal("got nil result")
|
||||
return
|
||||
}
|
||||
if e.Size != int64(w.size) {
|
||||
t.Error("got size", e.Size, "want", w.size)
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
|
||||
"github.com/minio/minio/internal/logger"
|
||||
"github.com/minio/mux"
|
||||
"github.com/minio/pkg/v2/policy"
|
||||
"github.com/minio/pkg/v3/policy"
|
||||
)
|
||||
|
||||
// Data types used for returning dummy tagging XML.
|
||||
|
|
|
@ -110,7 +110,7 @@ func kmsKeyIDFromMetadata(metadata map[string]string) string {
|
|||
//
|
||||
// DecryptETags uses a KMS bulk decryption API, if available, which
|
||||
// is more efficient than decrypting ETags sequentually.
|
||||
func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error {
|
||||
func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
|
||||
const BatchSize = 250 // We process the objects in batches - 250 is a reasonable default.
|
||||
var (
|
||||
metadata = make([]map[string]string, 0, BatchSize)
|
||||
|
@ -267,7 +267,11 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
oldKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kms.Context{bucket: path.Join(bucket, object)})
|
||||
oldKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
|
||||
Name: keyID,
|
||||
Ciphertext: kmsKey,
|
||||
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -276,7 +280,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
|||
return err
|
||||
}
|
||||
|
||||
newKey, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)})
|
||||
newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: GlobalKMS.DefaultKey,
|
||||
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -312,7 +319,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
|
|||
if _, ok := kmsCtx[bucket]; !ok {
|
||||
kmsCtx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
newKey, err := GlobalKMS.GenerateKey(ctx, newKeyID, kmsCtx)
|
||||
newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: newKeyID,
|
||||
AssociatedData: kmsCtx,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -352,7 +362,9 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
|
|||
if GlobalKMS == nil {
|
||||
return crypto.ObjectKey{}, errKMSNotConfigured
|
||||
}
|
||||
key, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)})
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
|
||||
})
|
||||
if err != nil {
|
||||
return crypto.ObjectKey{}, err
|
||||
}
|
||||
|
@ -379,7 +391,10 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
|
|||
if _, ok := kmsCtx[bucket]; !ok {
|
||||
kmsCtx[bucket] = path.Join(bucket, object)
|
||||
}
|
||||
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsCtx)
|
||||
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
|
||||
Name: keyID,
|
||||
AssociatedData: kmsCtx,
|
||||
})
|
||||
if err != nil {
|
||||
if errors.Is(err, kes.ErrKeyNotFound) {
|
||||
return crypto.ObjectKey{}, errKMSKeyNotFound
|
||||
|
@ -475,11 +490,10 @@ func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, m
|
|||
func decryptObjectMeta(key []byte, bucket, object string, metadata map[string]string) ([]byte, error) {
|
||||
switch kind, _ := crypto.IsEncrypted(metadata); kind {
|
||||
case crypto.S3:
|
||||
KMS := GlobalKMS
|
||||
if KMS == nil {
|
||||
if GlobalKMS == nil {
|
||||
return nil, errKMSNotConfigured
|
||||
}
|
||||
objectKey, err := crypto.S3.UnsealObjectKey(KMS, metadata, bucket, object)
|
||||
objectKey, err := crypto.S3.UnsealObjectKey(GlobalKMS, metadata, bucket, object)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1089,7 +1103,7 @@ func (o *ObjectInfo) decryptPartsChecksums() {
|
|||
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
|
||||
decrypted, err := o.metadataDecrypter()("object-checksum", data)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
encLogIf(GlobalContext, err)
|
||||
return
|
||||
}
|
||||
data = decrypted
|
||||
|
@ -1151,7 +1165,7 @@ func (o *ObjectInfo) decryptChecksums(part int) map[string]string {
|
|||
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
|
||||
decrypted, err := o.metadataDecrypter()("object-checksum", data)
|
||||
if err != nil {
|
||||
logger.LogIf(GlobalContext, err)
|
||||
encLogIf(GlobalContext, err)
|
||||
return nil
|
||||
}
|
||||
data = decrypted
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue