Compare commits

...

285 Commits

Author SHA1 Message Date
Harshavardhana ba54b39c02
fix: crash when audit webhook queue_dir is not writable (#19854)
This is regression introduced in #19275 refactor
2024-06-01 20:03:39 -07:00
Anis Eleuch 2a75225569
kafka: _MINIO_KAFKA_DEBUG to enable sarama debug messages (#19849) 2024-06-01 08:02:59 -07:00
Klaus Post e72429c79c
Add sizes to traces (#19851)
added to storage and grid traces. Can provide more context for traces that aren't HTTP. Others may apply.
2024-05-31 22:17:37 -07:00
Klaus Post c5b3f5553f
Add per connection RPC metrics (#19852)
Provides individual and aggregate stats for each RPC connection.

Example:

```
  "rpc": {
   "collectedAt": "2024-05-31T14:33:29.1373103+02:00",
   "connected": 30,
   "disconnected": 0,
   "outgoingStreams": 69,
   "incomingStreams": 0,
   "outgoingBytes": 174822796,
   "incomingBytes": 175821566,
   "outgoingMessages": 768595,
   "incomingMessages": 768589,
   "outQueue": 0,
   "lastPongTime": "2024-05-31T12:33:28Z",
   "byDestination": {
    "http://127.0.0.1:9001": {
     "collectedAt": "2024-05-31T14:33:29.1373103+02:00",
     "connected": 5,
     "disconnected": 0,
     "outgoingStreams": 2,
     "incomingStreams": 0,
     "outgoingBytes": 38432543,
     "incomingBytes": 66604052,
     "outgoingMessages": 229496,
     "incomingMessages": 229575,
     "outQueue": 0,
     "lastPongTime": "2024-05-31T12:33:27Z"
    },
    "http://127.0.0.1:9002": {
     "collectedAt": "2024-05-31T14:33:29.1373103+02:00",
     "connected": 5,
     "disconnected": 0,
     "outgoingStreams": 6,
     "incomingStreams": 0,
     "outgoingBytes": 38215680,
     "incomingBytes": 66121283,
     "outgoingMessages": 228525,
     "incomingMessages": 228510,
     "outQueue": 0,
     "lastPongTime": "2024-05-31T12:33:27Z"
    },
...
```
2024-05-31 22:16:24 -07:00
Klaus Post d3ae0aaad3
Add max buffering to SFTP (#19848)
Prevent OOM by adversarial use of SFTP upload by setting a 100MB max upload buffer.
2024-05-31 14:28:07 -07:00
Klaus Post d67bccf861
Add xl-meta partial shard reconstruction (#19841)
Add partial shard reconstruction

* Add partial shard reconstruction
* Fix padding causing the last shard to be rejected
* Add md5 checks on single parts
* Move md5 verified to `verified/filename.ext`
* Move complete (without md5) to `complete/filename.ext.partno`

It's not pretty, but at least now the md5 gives some confidence it works correctly.
2024-05-31 07:49:23 -07:00
Anis Eleuch 1277ad69a6
heal: Remove .healing.bin when all ES drives are healing (#19846)
In the very rare case when all drives in a erasure set need to be healed,
remove .healing.bin from all drives, otherwise it will be stuck in a
loop

Also, fix a unit test that fails sometimes due to wrong test.
2024-05-31 07:48:50 -07:00
Harshavardhana 8f93e81afb
change service account embedded policy size limit (#19840)
Bonus: trim-off all the unnecessary spaces to allow
for real 2048 characters in policies for STS handlers
and re-use the code in all STS handlers.
2024-05-30 11:10:41 -07:00
Harshavardhana 4af31e654b
avoid pre-populating buffers for deployments < 32GiB memory (#19839) 2024-05-30 04:58:12 -07:00
Harshavardhana aad50579ba
fix: wire up ILM sub-system properly for help (#19836) 2024-05-30 01:14:58 -07:00
Harshavardhana 38d059b0ae
fix: single node multi-drive must register local drives properly (#19832)
since #19688 there was a regression introduced during drive
lookups for single node multi-drive setups, drive replacement
would not work correctly without this PR.
2024-05-29 13:12:44 -07:00
Klaus Post bd4eeb4522
Fix flipped EcM, EcN in metadata header (#19831)
Since this is a tuple encoded field we can just flip the struct members.
2024-05-29 12:14:09 -07:00
jiuker 03e3493288
fix: correct parse the tagging error for PostPolicyBucketHandler (#19825) 2024-05-29 11:50:46 -07:00
Harshavardhana 64baedf5a4
fix: hide prefixes for Hadoop properly (#19821) 2024-05-28 15:53:15 -07:00
Minio Trusted 2f64d5f77e Update yaml files to latest version RELEASE.2024-05-28T17-19-04Z 2024-05-28 19:23:04 +00:00
Anis Eleuch f79a4ef4d0
policy: More defensive code validating svc:DurationSeconds (#19820)
This does not fix any current issue, but merging https://github.com/minio/madmin-go/pull/282
can lose the validation of the service account expiration time.

Add more defensive code for now. In the future, we should avoid doing
validation in another library.
2024-05-28 10:19:04 -07:00
Taran Pelkey 2d53854b19
Restrict access keys for users and groups to not allow '=' or ',' (#19749)
* initial commit

* Add UTF check

---------

Co-authored-by: Harshavardhana <harsha@minio.io>
2024-05-28 10:14:16 -07:00
Harshavardhana e5c83535af
chore: upgrade deps (#19819)
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-05-28 02:27:44 -07:00
jiuker c904ef966e
feat: support tags for PostPolicy upload (#19816) 2024-05-27 21:44:00 -07:00
Minio Trusted 8f266e0772 Update yaml files to latest version RELEASE.2024-05-27T19-17-46Z 2024-05-27 23:52:43 +00:00
Harshavardhana e0fe7cc391
fix: information disclosure bug in preconditions GET (#19810)
precondition check was being honored before, validating
if anonymous access is allowed on the metadata of an
object, leading to metadata disclosure of the following
headers.

```
Last-Modified
Etag
x-amz-version-id
Expires:
Cache-Control:
```

although the information presented is minimal in nature,
and of opaque nature. It still simply discloses that an
object by a specific name exists or not without even having
enough permissions.
2024-05-27 12:17:46 -07:00
Harshavardhana 9d20dec56a Revert "remove dataErrs from er.deleteIfDangling code"
This reverts commit 7d75b1e758.

This fails multipart tests we need this code to handle
existing challenges, so wait for the comprehensive fix.
2024-05-26 11:13:29 -07:00
Harshavardhana 597a785253
fix: authenticate LDAP via actual DN instead of normalized DN (#19805)
fix: authenticate LDAP via actual DN instead of normalized DN

Normalized DN is only for internal representation, not for
external communication, any communication to LDAP must be
based on actual user DN. LDAP servers do not understand
normalized DN.

fixes #19757
2024-05-25 06:43:06 -07:00
Harshavardhana 7d75b1e758 remove dataErrs from er.deleteIfDangling code
avoid this until a comprehensive change is
merged such as https://github.com/minio/minio/pull/19797
2024-05-24 18:20:04 -07:00
Aditya Manthramurthy 5f78691fcf
ldap: Add user DN attributes list config param (#19758)
This change uses the updated ldap library in minio/pkg (bumped
up to v3). A new config parameter is added for LDAP configuration to
specify extra user attributes to load from the LDAP server and to store
them as additional claims for the user.

A test is added in sts_handlers.go that shows how to access the LDAP
attributes as a claim.

This is in preparation for adding SSH pubkey authentication to MinIO's SFTP
integration.
2024-05-24 16:05:23 -07:00
Shireesh Anjal a591e06ae5
Add cluster scanner metrics in metrics-v3 (#19517)
endpoint: /minio/metrics/v3/cluster/scanner
metrics:
 - bucket_scans_finished (counter)
 - bucket_scans_started (counter)
 - directories_scanned (counter)
 - last_activity_nano_seconds (gauge)
 - objects_scanned (counter)
 - versions_scanned (counter)
2024-05-24 12:29:25 -07:00
Harshavardhana 443c93c634
compute time spent in ILM properly (#19806) 2024-05-24 12:28:51 -07:00
Shireesh Anjal 5659cddc84
Add cluster config metrics in metrics-v3 (#19507)
endpoint: /minio/metrics/v3/cluster/config
metrics:
- write_quorum
- rrs_parity
- standard_parity
2024-05-24 05:50:46 -07:00
Shireesh Anjal 2a03a34bde
Upgrade madmin-go to v3.0.52 (#19798)
This will ensure that content of /proc/cmdline from each server is
captured in the health report.
2024-05-24 05:34:57 -07:00
Shubhendu 1654a9b7e6
Use point in time values for `gauge` metrics in graphs (#19690)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-05-24 04:11:51 -07:00
Shireesh Anjal 673a521711
Change endpoint of v3 notification metrics (#19804)
from /cluster/notification to /notification
2024-05-24 04:10:24 -07:00
Harshavardhana 2e23076688
move windows runners to in-house (#19800)
GitHub CI runners for windows have gotten very slow,
moving them to our own hosted runners
2024-05-23 15:29:33 -07:00
Klaus Post b92ac55250
Add multipart combination to xl-meta (#19780)
Add combination of multiple parts.

Parts will be reconstructed and saved separately and can manually be combined to the complete object.

Parts will be named `(version_id)-(filename).(partnum).(in)complete`.
2024-05-23 09:37:31 -07:00
Shireesh Anjal 7981509cc8
Add cluster and bucket replication metrics in metrics-v3 (#19546)
endpoint: /minio/metrics/v3/cluster/replication
metrics:
- average_active_workers
- average_queued_bytes
- average_queued_count
- average_transfer_rate
- current_active_workers
- current_transfer_rate
- last_minute_queued_bytes
- last_minute_queued_count
- max_active_workers
- max_queued_bytes
- max_queued_count
- max_transfer_rate
- recent_backlog_count

endpoint: /minio/metrics/v3/api/bucket/replication
metrics:
- last_hour_failed_bytes
- last_hour_failed_count
- last_minute_failed_bytes
- last_minute_failed_count
- latency_ms
- proxied_delete_tagging_requests_total
- proxied_get_requests_failures
- proxied_get_requests_total
- proxied_get_tagging_requests_failures
- proxied_get_tagging_requests_total
- proxied_head_requests_failures
- proxied_head_requests_total
- proxied_put_tagging_requests_failures
- proxied_put_tagging_requests_total
- sent_bytes
- sent_count
- total_failed_bytes
- total_failed_count
- proxied_delete_tagging_requests_failures
2024-05-23 00:41:18 -07:00
Krishnan Parthasarathi 6d5bc045bc
Disallow ExpiredObjectAllVersions with object lock (#19792)
Relaxes restrictions on Expiration and NoncurrentVersionExpiration
placed by https://github.com/minio/minio/pull/19785.
ref: https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lock-managing.html#object-lock-managing-lifecycle

> Object lifecycle management configurations continue functioning
normally on protected objects, including placing delete markers.
However, a locked version of an object cannot be deleted by a S3
Lifecycle expiration policy. Object Lock is maintained regardless of
the object's storage class and throughout S3 Lifecycle
transitions between storage classes.
2024-05-22 18:12:48 -07:00
Harshavardhana d38e020b29
remove errant logs for disconnected remote (#19793)
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-05-22 18:12:23 -07:00
Poorna 7d29030292
fix list results returned for spark max-keys=2 listing (#19791)
This PR continues fix #19725 for some unhandled cases
2024-05-22 16:16:34 -07:00
Shubhendu 7c7650b7c3
Add sufficient deadlines and countermeasures to handle hung node scenario (#19688)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-05-22 16:07:14 -07:00
Harshavardhana ca80eced24
usage of deadline conn at Accept() breaks websocket (#19789)
fortunately not wired up to use, however if anyone
enables deadlines for conn then sporadically MinIO
startups fail.
2024-05-22 10:49:27 -07:00
Anis Eleuch d0e0b81d8e
Fix race get/set system/audit targest to avoid race errors (#19790) 2024-05-22 09:23:03 -07:00
jiuker 391baa1c9a
test: add reject ilm rule test case (#19788) 2024-05-22 04:26:59 -07:00
Harshavardhana ae14681c3e Revert "Fix two-way stream cancelation and pings (#19763)"
This reverts commit 4d698841f4.
2024-05-22 03:00:00 -07:00
Klaus Post 4d698841f4
Fix two-way stream cancelation and pings (#19763)
Do not log errors on oneway streams when sending ping fails. Instead, cancel the stream.

This also makes sure pings are sent when blocked on sending responses.
2024-05-22 01:25:25 -07:00
jiuker 9906b3ade9
fix: reject ilm rule when bucket LockEnabled (#19785) 2024-05-21 23:50:03 -07:00
Anis Eleuch bf1769d3e0
xl: Avoid marking a drive offline after one part read failure (#19779)
This commit will fix one rare case of a multipart object that
can be read in theory but GetObject API returned an error.

It turned out that a six years old code was marking a drive offline
when the bitrot streaming fails to read a part in a disk with any error.
This can affect reading a subsequent part, though having enough shards,
but unable to construct because one drive was marked offline earlier.

This commit will remove the drive marking offline code. It will also
close the bitrotstreaming reader before marking it as nil.
2024-05-21 07:36:21 -07:00
Harshavardhana 63e1ad9f29 fix: the user-agent for Veeam 2024-05-20 11:54:52 -07:00
Klaus Post 2c7bcee53f
Add cross-version remapped merges to xl-meta (#19765)
Adds `-xver` which can be used with `-export` and `-combine` to attempt to combine files across versions if data is suspected to be the same. Overlapping data is compared.

Bonus: Make `inspect` accept wildcards.
2024-05-19 08:31:54 -07:00
Harshavardhana 1fd90c93ff
re-use StorageAPI while loading drive formats (#19770)
Bonus: safe settings for deployment ID to avoid races
2024-05-19 01:06:49 -07:00
Poorna e947a844c9
Fix test scripts to use mc ready (#19768) 2024-05-18 11:19:01 -07:00
Poorna 4e2d39293a
Fix build script to wait for server to come up (#19767) 2024-05-17 14:43:59 -07:00
Krishnan Parthasarathi 1228d6bf1a
Return NumVersions in quorum when available (#19766)
Similar to https://github.com/minio/minio/pull/17925
2024-05-17 13:57:37 -07:00
Shireesh Anjal fc4561c64c
Start callhome immediately after enabling (#19764)
Currently, on enabling callhome (or restarting the server), the callhome
job gets scheduled. This means that one has to wait for 24hrs (the
default frequency duration) to see it in action and to figure out if it
is working as expected.

It will be a better user experience to perform the first callhome
execution immediately after enabling it (or on server start if already
enabled).

Also, generate audit event on callhome execution, setting the error
field in case the execution has failed.
2024-05-17 09:53:34 -07:00
Klaus Post 3b7747b42b
Tweak multipart uploads (#19756)
* Store ModTime in the upload ID; return it when listing instead of the current time.
* Use this ModTime to expire and skip reading the file info.
* Consistent upload sorting in listing (since it now has the ModTime).
* Exclude healing disks to avoid returning an empty list.
2024-05-17 09:40:09 -07:00
Harshavardhana e432e79324
avoid calling 'admin info' for disk, cpu, net metrics collection (#19762)
resource metrics collection was incorrectly making fan-out
liveness peer calls where it's not needed.
2024-05-17 08:15:13 -07:00
Harshavardhana 08d74819b6
handle racy updates to globalSite config (#19750)
```
==================
WARNING: DATA RACE
Read at 0x0000082be990 by goroutine 205:
  github.com/minio/minio/cmd.setCommonHeaders()

Previous write at 0x0000082be990 by main goroutine:
  github.com/minio/minio/cmd.lookupConfigs()
```
2024-05-16 16:13:47 -07:00
Poorna aa3fde1784
Add ListObjectsV2 unit test (#19753)
for PR: #19725
2024-05-15 20:40:51 -07:00
Harshavardhana 0b3eb7f218
add more deadlines and pass around context under most situations (#19752) 2024-05-15 15:19:00 -07:00
Anis Eleuch 69c9496c71
Upgrade github.com/minio/pkg/v2 and other deps (#19747) 2024-05-15 11:04:40 -07:00
Klaus Post b792b36495
Add Veeam storage class override (#19748)
Recent Veeam is very picky about storage class names. Add `_MINIO_VEEAM_FORCE_SC` env var.

It will override the storage class returned by the storage backend if it is non-standard
and we detect a Veeam client by checking the User Agent.

Applies to HeadObject/GetObject/ListObject*
2024-05-15 11:04:16 -07:00
Harshavardhana d3db7d31a3
fix: add deadlines for all synchronous REST callers (#19741)
add deadlines that can be dynamically changed via
the drive max timeout values.

Bonus: optimize "file not found" case and hung drives/network - circuit break the check and return right
away instead of waiting.
2024-05-15 09:52:29 -07:00
Shireesh Anjal c05ca63158
Fix crash on /minio/metrics/v3?list (#19745)
An unchecked map access was causing panic.
2024-05-15 09:06:35 -07:00
Klaus Post 6d3e0c7db6
Tweak one way stream ping (#19743)
Do not log errors on oneway streams when sending ping fails. Instead cancel the stream.

This also makes sure pings are sent when blocked on sending responses.

I will do a separate PR that includes this and adds pings to two-way streams as well as tests for pings.
2024-05-15 08:39:21 -07:00
Shireesh Anjal 0e59e50b39
Capture ttfb api metrics only for GetObject (#19733)
as that is the only API where the TTFB metric is beneficial, and
capturing this for all APIs exponentially increases the response size in
large clusters.
2024-05-14 23:25:13 -07:00
Klaus Post d4b391de1b
Add PutObject Ring Buffer (#19605)
Replace the `io.Pipe` from streamingBitrotWriter -> CreateFile with a fixed size ring buffer.

This will add an output buffer for encoded shards to be written to disk - potentially via RPC.

This will remove blocking when `(*streamingBitrotWriter).Write` is called, and it writes hashes and data.

With current settings, the write looks like this:

```
Outbound
┌───────────────────┐             ┌────────────────┐               ┌───────────────┐                      ┌────────────────┐
│                   │   Parr.     │                │  (http body)  │               │                      │                │
│ Bitrot Hash       │     Write   │      Pipe      │      Read     │  HTTP buffer  │    Write (syscall)   │  TCP Buffer    │
│ Erasure Shard     │ ──────────► │  (unbuffered)  │ ────────────► │   (64K Max)   │ ───────────────────► │    (4MB)       │
│                   │             │                │               │  (io.Copy)    │                      │                │
└───────────────────┘             └────────────────┘               └───────────────┘                      └────────────────┘
```

We write a Hash (32 bytes). Since the pipe is unbuffered, it will block until the 32 bytes have 
been delivered to the TCP buffer, and the next Read hits the Pipe.

Then we write the shard data. This will typically be bigger than 64KB, so it will block until two blocks 
have been read from the pipe.

When we insert a ring buffer:

```
Outbound
┌───────────────────┐             ┌────────────────┐               ┌───────────────┐                      ┌────────────────┐
│                   │             │                │  (http body)  │               │                      │                │
│ Bitrot Hash       │     Write   │  Ring Buffer   │      Read     │  HTTP buffer  │    Write (syscall)   │  TCP Buffer    │
│ Erasure Shard     │ ──────────► │    (2MB)       │ ────────────► │   (64K Max)   │ ───────────────────► │    (4MB)       │
│                   │             │                │               │  (io.Copy)    │                      │                │
└───────────────────┘             └────────────────┘               └───────────────┘                      └────────────────┘
```

The hash+shard will fit within the ring buffer, so writes will not block - but will complete after a 
memcopy. Reads can fill the 64KB buffer if there is data for it.

If the network is congested, the ring buffer will become filled, and all syscalls will be on full buffers.
Only when the ring buffer is filled will erasure coding start blocking.

Since there is always "space" to write output data, we remove the parallel writing since we are 
always writing to memory now, and the goroutine synchronization overhead probably not worth taking. 

If the output were blocked in the existing, we would still wait for it to unblock in parallel write, so it would 
make no difference there - except now the ring buffer smoothes out the load.

There are some micro-optimizations we could look at later. The biggest is that, in most cases, 
we could encode directly to the ring buffer - if we are not at a boundary. Also, "force filling" the 
Read requests (i.e., blocking until a full read can be completed) could be investigated and maybe 
allow concurrent memory on read and write.
2024-05-14 17:11:04 -07:00
Shubhendu de4d3dac00
Added tests for IAM policies for bucket operations (#19734)
* Added tests for bucket access policies

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>

* move to correct category of tests

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>

---------

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-05-14 08:43:07 -07:00
Olli Janatuinen 534e7161df
SFTP: Correctly inform client about unsupported commands (#19735) 2024-05-14 03:29:30 -07:00
Harshavardhana 9b219cd646
fix: return quorum based error, temporary failures must be ignored (#19732) 2024-05-14 03:29:17 -07:00
Shireesh Anjal 3bab4822f3
Add logger webhook metrics in metrics-v3 (#19515)
endpoint: /minio/metrics/v3/cluster/webhook
metrics:
- failed_messages (counter)
- online (gauge)
- queue_length (gauge)
- total_messages (counter)
2024-05-14 00:27:33 -07:00
coderwander 3c5f2d8916
fix some typo in struct name comments (#19513)
Signed-off-by: coderwander <770732124@qq.com>
2024-05-14 00:26:50 -07:00
Shireesh Anjal 5808190398
Add more metrics to v3/cluster/erasure-set (#19714)
Metrics being added:

- read_tolerance: No of drive failures that can be tolerated without
  disrupting read operations
- write_tolerance: No of drive failures that can be tolerated without
  disrupting write operations
- read_health: Health of the erasure set in a pool for read operations
  (1=healthy, 0=unhealthy)
- write_health: Health of the erasure set in a pool for write operations
  (1=healthy, 0=unhealthy)
2024-05-14 00:25:56 -07:00
Shireesh Anjal b2a82248b1
Move /system/go to /debug/go (#19707) 2024-05-14 00:25:37 -07:00
dependabot[bot] 4e5fcca8b9 build(deps): bump golang.org/x/net (#23)
Bumps the go_modules group with 1 update in the /docs/debugging/s3-verify directory: [golang.org/x/net](https://github.com/golang/net).


Updates `golang.org/x/net` from 0.24.0 to 0.25.0
- [Commits](https://github.com/golang/net/compare/v0.24.0...v0.25.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
  dependency-group: go_modules
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-05-13 10:59:52 -07:00
Klaus Post c36eaedb93
Re-add "Fix incorrect merging of slash-suffixed objects (#19729)
Adds regression test for #19699

Failures are a bit luck based, since it requires objects to be placed on different sets.

However this generates a failure prior to #19699

* Revert "Revert "Fix incorrect merging of slash-suffixed objects (#19699)""

This reverts commit f30417d9a8.

* Don't override when suffix doesn't match. Instead rely on quorum for each.
2024-05-13 09:30:24 -07:00
Poorna 7752b03add
optimize max-keys=2 listing for spark workloads (#19725)
to return results appropriately for versioned buckets, especially
when underlying prefixes have been deleted
2024-05-13 07:57:42 -07:00
jiuker 01bfc78535
Optimization: reuse hashedSecret when LookupConfig (#19724) 2024-05-12 22:52:27 -07:00
Shireesh Anjal 074d70112d
Consolidate drive health related metrics into single metric (#19706)
Instead of having "online" and "healing" as two metrics, replace with a
single metric "health" which can have following values:

0 = offline
1 = healthy
2 = healing
2024-05-12 10:23:50 -07:00
Harshavardhana e8d14c0d90
verify preconditions during CompleteMultipart (#19713)
Bonus: hold the write lock properly to apply
optimistic concurrency during NewMultipartUpload()
2024-05-10 17:31:22 -07:00
Shireesh Anjal 60d7e8143a
Move /cluster/audit to /audit (#19708)
As the audit metrics are server level and not 
overall cluster level.
2024-05-10 07:50:39 -07:00
Klaus Post 9667a170de
Add usage cache cleanup and lower forced top compaction (#19719)
Lower forced compaction to 250K entries.

If there is more than 250K entries on the top level force compact it and log an error.
2024-05-10 07:49:50 -07:00
Shubhendu abae30f9e1
Added decom test with KES using sse-s3 and sse-kms (#19695)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-05-10 01:24:14 -07:00
Minio Trusted f9311bc9d1 Update yaml files to latest version RELEASE.2024-05-10T01-41-38Z 2024-05-10 02:00:49 +00:00
Harshavardhana b598402738 fix: unexpected credentials missing while passing 2024-05-09 18:41:38 -07:00
Harshavardhana bd026b913f remove references for MINIO_SERVER_URL 2024-05-09 17:22:36 -07:00
Harshavardhana 72ff69d9bb
add log-prefix name for specifying custom log-name (#19712) 2024-05-09 14:29:37 -07:00
Harshavardhana f30417d9a8 Revert "Fix incorrect merging of slash-suffixed objects (#19699)"
This reverts commit 2f7a10ab31.
2024-05-09 12:32:05 -07:00
jiuker 47a4ad3cd7
fix: truncate Expiration to second when Add ServiceAccount (#19674)
Truncate Expiration at the second when Add ServiceAccount
2024-05-09 11:08:04 -07:00
Klaus Post 2f7a10ab31
Fix incorrect merging of slash-suffixed objects (#19699)
If two objects share everything but one object has a slash prefix, those would be merged in listings, 
with secondary properties used for a tiebreak.

Example: An object with the key `prefix/obj` would be merged with an object named `prefix/obj/`. 
While this violates the [no object can be a prefix of another](https://min.io/docs/minio/linux/operations/concepts/thresholds.html#conflicting-objects), let's resolve these.

If we have an object with 'name' and a directory named 'name/' discard the directory only - but allow objects 
of 'name' and 'name/' (xldir) to be uniquely returned.

Regression from #15772
2024-05-09 11:05:45 -07:00
Harshavardhana b534dc69ab
deprecate unexpected healing failed counters (#19705)
simplify this to avoid verbose metrics, and make
room for valid metrics to be reported for alerting
etc.
2024-05-09 11:04:41 -07:00
Harshavardhana 7b7d2ea7d4
pass around correct endpoint while registering remote storage (#19710) 2024-05-09 11:03:54 -07:00
Aditya Manthramurthy e00de1c302
ldap-import: Add additional logs (#19691)
These logs are being added to provide better debugging of LDAP
normalization on IAM import.
2024-05-09 10:52:53 -07:00
Harshavardhana 3549e583a6
results must be a single channel to avoid overwriting `healing.bin` (#19702) 2024-05-09 10:15:03 -07:00
Andi f5e3eedf34
chore: use errors.New to replace fmt.Errorf with no parameters (#19568)
Signed-off-by: ChengenH <hce19970702@gmail.com>
2024-05-09 01:44:07 -07:00
Harshavardhana 519dbfebf6 upgrade to go1.22.x 2024-05-09 01:36:00 -07:00
Harshavardhana 9a267f9270
allow caller context during reloads() to cancel (#19687)
canceled callers might linger around longer,
can potentially overwhelm the system. Instead
provider a caller context and canceled callers
don't hold on to them.

Bonus: we have no reason to cache errors, we should
never cache errors otherwise we can potentially have
quorum errors creeping in unexpectedly. We should
let the cache when invalidating hit the actual resources
instead.
2024-05-08 17:51:34 -07:00
Anis Eleuch 67bd71b7a5
grid: Fix a window of a disconnected node not marked as offline (#19703)
LastPong is saved as nanoseconds after a connection or reconnection but
saved as seconds when receiving a pong message. The code deciding if
a pong is too old can be skewed since it assumes LastPong is only in
seconds.
2024-05-08 17:50:13 -07:00
Klaus Post ec49fff583
Accept multipart checksums with part count (#19680)
Accept multipart uploads where the combined checksum provides the expected part count.

It seems this was added by AWS to make the API more consistent, even if the 
data is entirely superfluous on multiple levels.

Improves AWS S3 compatibility.
2024-05-08 09:18:34 -07:00
Andreas Auernhammer 8b660e18f2
kms: add support for MinKMS and remove some unused/broken code (#19368)
This commit adds support for MinKMS. Now, there are three KMS
implementations in `internal/kms`: Builtin, MinIO KES and MinIO KMS.

Adding another KMS integration required some cleanup. In particular:
 - Various KMS APIs that haven't been and are not used have been
   removed. A lot of the code was broken anyway.
 - Metrics are now monitored by the `kms.KMS` itself. For basic
   metrics this is simpler than collecting metrics for external
   servers. In particular, each KES server returns its own metrics
   and no cluster-level view.
 - The builtin KMS now uses the same en/decryption implemented by
   MinKMS and KES. It still supports decryption of the previous
   ciphertext format. It's backwards compatible.
 - Data encryption keys now include a master key version since MinKMS
   supports multiple versions (~4 billion in total and 10000 concurrent)
   per key name.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2024-05-07 16:55:37 -07:00
Harshavardhana 981497799a
return appropriate error upon reaching maxClients() (#19669) 2024-05-07 13:41:56 -07:00
Minio Trusted b9bdc17465 Update yaml files to latest version RELEASE.2024-05-07T06-41-25Z 2024-05-07 16:59:52 +00:00
Olli Janatuinen b413ff9fdb
Support user certificate based authentication on SFTP (#19650) 2024-05-06 23:41:25 -07:00
Harshavardhana 6a15580817
fix: collect quorum errors for deletePrefix() (#19685)
do not return error for single drive being offline.
2024-05-06 22:44:46 -07:00
Cesar N 39633a5581
Set Console Redirect URL env variable (#19683) 2024-05-06 19:47:59 -07:00
Alex 1e83f15e2f
Update Console version to v1.4.0 (#19684) 2024-05-06 19:47:37 -07:00
Harshavardhana 888d2bb1d8
support ETag value to be '*' (#19682)
This supports '*' as per behavior to
comply with AWS S3 behavior for

- 'If-Match: *'
- 'If-None-Match: *'
2024-05-06 17:08:42 -07:00
Klaus Post 847ee5ac45
Make WalkDir return errors (#19677)
If used, 'opts.Marker` will cause many missed entries since results are returned 
unsorted, and pools are serialized.

Switch to fully concurrent listing and merging across pools to return sorted entries.
2024-05-06 13:27:52 -07:00
jiuker 9a9a49aa84
fix: Ignore AWSAccessKeyId check for SignV2 policy condition (#19673) 2024-05-06 03:52:41 -07:00
Harshavardhana a03ca80269
support 'mc support perf object' with root login disabled (#19672)
It is expected that whoever is using the credentials which has
the proper set of permissions must be able to run.

`mc support perf object`

While the root login is disabled.
2024-05-06 02:45:10 -07:00
Harshavardhana 523bd769f1
add support for specific error response for InvalidRange (#19668)
fixes #19648

AWS S3 returns the actual object size as part of XML
response for InvalidRange error, this is used apparently
by SDKs to retry the request without the range.
2024-05-05 09:56:21 -07:00
Harshavardhana 8ff70ea5a9
turn-off coloring if we have std{err,out} dumb terminals (#19667) 2024-05-03 17:17:57 -07:00
Harshavardhana da3e7747ca
avoid using 10MiB EC buffers in maxAPI calculations (#19665)
max requests per node is more conservative in its value
causing premature serialization of the calls, avoid it
for newer deployments.
2024-05-03 13:08:20 -07:00
Klaus Post 4afb59e63f
fix: walk missing entries with opts.Marker set (#19661)
'opts.Marker` is causing many missed entries if used since results are returned unsorted. Also since pools are serialized.

Switch to do fully concurrent listing and merging across pools to return sorted entries.

Returning errors on listings is impossible with the current API, so document that.

Return an error at once if no drives are found instead of just returning an empty listing and no error.
2024-05-03 10:26:51 -07:00
Harshavardhana 1526e7ece3
extend server config.yaml to support per pool set drive count (#19663)
This is to support deployments migrating from a multi-pooled
wider stripe to lower stripe. MINIO_STORAGE_CLASS_STANDARD
is still expected to be same for all pools. So you can satisfy
adding custom drive count based pools by adjusting the storage
class value.

```
version: v2
address: ':9000'
rootUser: 'minioadmin'
rootPassword: 'minioadmin'
console-address: ':9001'
pools: # Specify the nodes and drives with pools
  -
    args:
        - 'node{11...14}.example.net/data{1...4}'
  -
    args:
        - 'node{15...18}.example.net/data{1...4}'
  -
    args:
        - 'node{19...22}.example.net/data{1...4}'
  -
    args:
        - 'node{23...34}.example.net/data{1...10}'
    set-drive-count: 6
```
2024-05-03 08:54:03 -07:00
Krishnan Parthasarathi 6c07bfee8a
With retention, skip actions expiring all versions (#19657)
ILM actions due to ExpiredObjectDeleteAllVersions and
DelMarkerExpiration are ignored when object locking is enabled on a
bucket.
Note: This applies to object versions which may not have retention
configured on them. This applies to all object versions in this bucket,
including those created before the retention config was applied.
2024-05-03 04:18:58 -07:00
Poorna 446c760820
replication: Avoid proxying if requested object is a deletemarker (#19656)
Fixes: #19654
2024-05-02 13:15:54 -07:00
Shireesh Anjal 04f92f1291
Change endpoint format for per-bucket metrics (#19655)
Per-bucket metrics endpoints always start with /bucket and the bucket
name is appended to the path. e.g. if the collector path is /bucket/api,
the endpoint for the bucket "mybucket" would be
/minio/metrics/v3/bucket/api/mybucket

Change the existing bucket api endpoint accordingly from /api/bucket to
/bucket/api
2024-05-02 10:37:57 -07:00
Klaus Post 4a60a7794d
Use better gzip for log rotate (#19651)
Should be 2x faster with same usage.
2024-05-02 04:38:40 -07:00
Bala FA e5b16adb1c
Add cluster IAM metrics in metrics-v3 (#19595)
Signed-off-by: Bala.FA <bala@minio.io>
2024-05-02 01:20:42 -07:00
Harshavardhana 402a3ac719
support compression after rotation of logs (#19647) 2024-05-01 15:38:07 -07:00
Aditya Manthramurthy f3d61c51fc
fix: Filter out cust. AssumeRole `Token` for audit (#19646)
The `Token` parameter is a sensitive value that should not be output in the Audit log for STS AssumeRoleWithCustomToken API.

Bonus: Add a simple tool that echoes audit logs to the console.
2024-05-01 14:31:13 -07:00
Klaus Post 0cde17ae5d
Return listing when exceeding min disk errors (#19644)
When listing, with drives returning `errFileNotFound,` `errVolumeNotFound`, or `errUnformattedDisk,`, 
we could get below `minDisks` drives being left.

This would result in a quorum never being reachable for any object. Therefore, the listing 
would continue, but no results would ever be produced.

Include `fnf` in the mindisk check since it is incremented on these errors. This will stop 
listing when minDisks are left.

Allow `opts.minDisks` to not return errVolumeNotFound or errFileNotFound and return that. 
That will allow for good results even if disks return something else.

We switch `errUnformattedDisk` to a regular error. If we have enough of those, we should just fail.
2024-05-01 10:59:08 -07:00
Harshavardhana 8c1bba681b
add logrotate support for MinIO logs (#19641) 2024-05-01 10:57:52 -07:00
Klaus Post dbfb5e797b
Wait one minute after startup to restart decommissioning (#19645)
Typically not all drives are connected, so we delay 3 minutes before resuming.
This greatly reduces risk of starting to list unconnected drives, or drives we risk being disconnected soon.

This delay is not applied when starting with an admin call.
2024-05-01 08:18:21 -07:00
Harshavardhana 08ff702434
enhance ListSVCs() API to return more info to avoid InfoSvc() (#19642)
ConsoleUI like applications rely on combination of

ListServiceAccounts() and InfoServiceAccount() to populate
UI elements, however individually these calls can be slow
causing the entire UI to load sluggishly.
2024-05-01 05:41:13 -07:00
Klaus Post 0e2148264a
Fix --stfp "mac-algos=..." overwrites cipher algorithms (#19643)
Setting MAC algorithms overwrites cipher algorithms.

Followup to #19636
2024-05-01 04:07:40 -07:00
Minio Trusted a75f42344b Update yaml files to latest version RELEASE.2024-05-01T01-11-10Z 2024-05-01 02:45:52 +00:00
Krishnan Parthasarathi 7926401cbd
ilm: Handle DeleteAllVersions action differently for DEL markers (#19481)
i.e., this rule element doesn't apply to DEL markers.

This is a breaking change to how ExpiredObejctDeleteAllVersions
functions today. This is necessary to avoid the following highly probable
footgun scenario in the future.

Scenario:
The user uses tags-based filtering to select an object's time to live(TTL). 
The application sometimes deletes objects, too, making its latest
version a DEL marker. The previous implementation skipped tag-based filters
if the newest version was DEL marker, voiding the tag-based TTL. The user is
surprised to find objects that have expired sooner than expected.

* Add DelMarkerExpiration action

This ILM action removes all versions of an object if its
the latest version is a DEL marker.

```xml
<DelMarkerObjectExpiration>
    <Days> 10 </Days>
</DelMarkerObjectExpiration>
```

1. Applies only to objects whose,
  • The latest version is a DEL marker.
  • satisfies the number of days criteria
2. Deletes all versions of this object
3. Associated rule can't have tag-based filtering

Includes,
- New bucket event type for deletion due to DelMarkerExpiration
2024-04-30 18:11:10 -07:00
Harshavardhana 8161411c5d
fix: a crash in RemoveReplication target (#19640)
calling a remote target remove with a perfectly
well constructed ARN can lead to a crash for a bucket
with no replication configured.

This PR fixes, and adds a crash check for ImportMetadata
as well.
2024-04-30 18:09:56 -07:00
Klaus Post f64dea2aac
Allow custom SFTP algorithm selection (#19636)
Algorithms are comma separated.
Note that valid values does not in all cases represent default values.

`--sftp=pub-key-algos=...` specifies the supported client public key
authentication algorithms. Note that this doesn't include certificate types
since those use the underlying algorithm. This list is sent to the client if
it supports the server-sig-algs extension. Order is irrelevant.

Valid values
```
ssh-ed25519
sk-ssh-ed25519@openssh.com
sk-ecdsa-sha2-nistp256@openssh.com
ecdsa-sha2-nistp256
ecdsa-sha2-nistp384
ecdsa-sha2-nistp521
rsa-sha2-256
rsa-sha2-512
ssh-rsa
ssh-dss
```

`--sftp=kex-algos=...` specifies the supported key-exchange algorithms in preference order.

Valid values:

```
curve25519-sha256
curve25519-sha256@libssh.org
ecdh-sha2-nistp256
ecdh-sha2-nistp384
ecdh-sha2-nistp521
diffie-hellman-group14-sha256
diffie-hellman-group16-sha512
diffie-hellman-group14-sha1
diffie-hellman-group1-sha1
```

`--sftp=cipher-algos=...` specifies the allowed cipher algorithms.
If unspecified then a sensible default is used.

Valid values:
```
aes128-ctr
aes192-ctr
aes256-ctr
aes128-gcm@openssh.com
aes256-gcm@openssh.com
chacha20-poly1305@openssh.com
arcfour256
arcfour128
arcfour
aes128-cbc
3des-cbc
```

`--sftp=mac-algos=...` specifies a default set of MAC algorithms in preference order.
This is based on RFC 4253, section 6.4, but with hmac-md5 variants removed because they have
reached the end of their useful life.

Valid values:

```
hmac-sha2-256-etm@openssh.com
hmac-sha2-512-etm@openssh.com
hmac-sha2-256
hmac-sha2-512
hmac-sha1
hmac-sha1-96
```
2024-04-30 08:15:45 -07:00
Shubhendu 6579304d8c
Suppress metrics with zero values (#19638)
This would reduce the size of data in response of metrics
listing. While graphing we can default these metrics with
a zero value if not found.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-04-30 08:05:22 -07:00
jiuker 6bb10a81a6
avoid data race for testing (#19635) 2024-04-30 08:03:35 -07:00
Klaus Post 3cf8a7c888
Always unfreeze when connection dies (#19634)
Unfreeze as soon as the incoming connection is terminated and don't wait for everything to complete.

We don't want to keep the services frozen if something becomes stuck.
2024-04-29 10:39:04 -07:00
Minio Trusted 2e38bb5175 Update yaml files to latest version RELEASE.2024-04-28T17-53-50Z 2024-04-29 17:09:28 +00:00
Harshavardhana a372c6a377
a bunch of fixes for error handling (#19627)
- handle errFileCorrupt properly
- micro-optimization of sending done() response quicker
  to close the goroutine.
- fix logger.Event() usage in a couple of places
- handle the rest of the client to return a different error other than
  lastErr() when the client is closed.
2024-04-28 10:53:50 -07:00
Harshavardhana 93b2f8a0c5 helm release v5.2.0
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-04-28 03:14:37 -07:00
opencmit2 1a6568a25d
helm support loadBalancerSourceRanges and externalTrafficPolicy (#19245)
Signed-off-by: JinXinWang <opencmit2@126.com>
2024-04-28 03:05:53 -07:00
Poorna 9e95703efc
iam reload policy mapping of STS users properly (#19626) 2024-04-27 03:04:10 -07:00
Anis Eleuch d8e05aca81
heal/list: Fix rare incomplete listing with flaky internode connections (#19625)
listPathRaw() counts errDiskNotFound as a valid error to indicate a
listing stream end. However, storage.WalkDir() is allowed to return
errDiskNotFound anytime since grid.ErrDisconnected is converted to
errDiskNotFound.

This affects fresh disk healing and should affect S3 listing as well.
2024-04-26 12:52:52 -07:00
Praveen raj Mani 410a1ac040
Handle failures in pool rebalancing (#19623) 2024-04-26 12:29:28 -07:00
Shireesh Anjal 4caa3422bd
Add process metrics in `metrics-v3` (#19612)
endpoint: /minio/metrics/v3/system/process
metrics:
- locks_read_total
- locks_write_total
- cpu_total_seconds
- go_routine_total
- io_rchar_bytes
- io_read_bytes
- io_wchar_bytes
- io_write_bytes
- start_time_seconds
- uptime_seconds
- file_descriptor_limit_total
- file_descriptor_open_total
- syscall_read_total
- syscall_write_total
- resident_memory_bytes
- virtual_memory_bytes
- virtual_memory_max_bytes

Since the standard process collector implements only a subset of these
metrics, remove it and implement our own custom process collector that
captures all the process metrics we need.
2024-04-26 09:07:23 -07:00
Dennis Marttinen a658b976f5
helm: fix port types in CiliumNetworkPolicy (#19232) 2024-04-26 00:50:24 -07:00
Anis Eleuch 135874ebdc
heal: Avoid marking a bucket as done when remote drives are offline (#19587) 2024-04-25 23:32:14 -07:00
Harshavardhana f4f1c42cba
deprecate usage of sha256-simd (#19621)
go1.21 already implements the necessary optimizations
2024-04-25 23:31:35 -07:00
Poorna e7aa26dc29
fix: allow DeleteObject unversioned objects with insufficient read quorum (#19581)
Since the object is being permanently deleted, the lack of read quorum should not
matter as long as sufficient disks are online to complete the deletion with parity
requirements.

If several pools have the same object with insufficient read quorum, attempt to
delete object from all the pools where it exists
2024-04-25 17:31:12 -07:00
Harshavardhana c54ffde568
add metrics ioerror counter for alerts on I/O errors (#19618) 2024-04-25 15:01:31 -07:00
Anis Eleuch 9a3c992d7a
heal: Fix regression in healing a new fresh drive (#19615) 2024-04-25 14:55:41 -07:00
Aditya Manthramurthy 0c855638de
fix: LDAP init. issue when LDAP server is down (#19619)
At server startup, LDAP configuration is validated against the LDAP
server. If the LDAP server is down at that point, we need to cleanly
disable LDAP configuration. Previously, LDAP would remain configured but
error out in strange ways because initialization did not complete
without errors.
2024-04-25 14:28:16 -07:00
Cesar N 943d815783
Update Console UI to v1.3.0 (#19617) 2024-04-25 12:12:03 -07:00
Ramon de Klein 4c0acba62d
Fixes an internal error while force-deleting a bucket (#19614) 2024-04-25 09:27:27 -07:00
Aditya Manthramurthy 62c3cdee75
fix: IAM LDAP access key import bug (#19608)
When importing access keys (i.e. service accounts) for LDAP accounts,
we are requiring groups to exist under one of the configured group base
DNs. This is not correct. This change fixes this by only checking for
existence and storing the normalized form of the group DN - we do not
return an error if the group is not under a base DN.

Test is updated to illustrate an import failure that would happen
without this change.
2024-04-25 08:50:16 -07:00
Aditya Manthramurthy 3212d0c8cd
fix: IAM import for LDAP should replace mappings (#19607)
Existing IAM import logic for LDAP creates new mappings when the
normalized form of the mapping key differs from the existing mapping key
in storage. This change effectively replaces the existing mapping key by
first deleting it and then recreating with the normalized form of the
mapping key.

For e.g. if an older deployment had a policy mapped to a user DN -

`UID=alice1,OU=people,OU=hwengg,DC=min,DC=io`

instead of adding a mapping for the normalized form -

`uid=alice1,ou=people,ou=hwengg,dc=min,dc=io`

we should replace the existing mapping.

This ensures that duplicates mappings won't remain after the import.

Some additional cleanup cases are also covered. If there are multiple
mappings for the name normalized key such as:

`UID=alice1,OU=people,OU=hwengg,DC=min,DC=io`
`uid=alice1,ou=people,ou=hwengg,DC=min,DC=io`
`uid=alice1,ou=people,ou=hwengg,dc=min,dc=io`

we check if the list of policies mapped to all these keys are exactly
the same, and if so remove all of them and create a single mapping with
the normalized key. However, if the policies mapped to such keys differ,
the import operation returns an error as the server cannot automatically
pick the "right" list of policies to map.
2024-04-25 08:49:53 -07:00
Harshavardhana 1d03bea965
support preserving renameData() on inlined content during overwrites (#19609)
extending #19548 to inlined-data as well.
2024-04-24 18:14:08 -07:00
Klaus Post fbfeb59658
xl-meta: Allow combining multiple unversioned objects (#19604)
When inspecting files like `.minio.sys/pool.bin` that may be present on multiple sets, use signature to separate them.

Also fixes null versions to actually be useful with `-export -combine`.
2024-04-24 10:56:22 -07:00
Ramon de Klein 701da1282a
Validates PostgreSQL table name (#19602) 2024-04-24 10:51:07 -07:00
jiuker df93ff92ba
fix: site-replication will reset group status when add user (#19594) 2024-04-24 08:54:24 -07:00
Shireesh Anjal 77d5331e85
Fix few wrongly defined metric types (#19586)
`minio_cluster_webhook_queue_length` was wrongly defined as `counter`
where-as it should be `gauge`

Following were wrongly defined as `gauge` when they should actually be
`counter`:

- minio_bucket_replication_sent_bytes
- minio_bucket_replication_received_bytes
- minio_bucket_replication_total_failed_bytes
- minio_bucket_replication_total_failed_count
2024-04-23 23:19:40 -07:00
Bala FA 14cdadfb56
Add cluster notification metrics in metrics-v3 (#19533)
Signed-off-by: Bala.FA <bala@minio.io>
2024-04-23 21:10:35 -07:00
Harshavardhana f3a52cc195
simplify listener implementation setup customizations in right place (#19589) 2024-04-23 21:08:47 -07:00
Aditya Manthramurthy 7640cd24c9
fix: avoid some IAM import errors if LDAP enabled (#19591)
When LDAP is enabled, previously we were:

- rejecting creation of users and groups via the IAM import functionality

- throwing a `not a valid DN` error when non-LDAP group mappings are present

This change allows for these cases as we need to support situations
where the MinIO server contains users, groups and policy mappings
created before LDAP was enabled.
2024-04-23 18:23:08 -07:00
Shireesh Anjal f7b665347e
Add system CPU metrics to metrics-v3 (#19560)
endpoint: /minio/metrics/v3/system/cpu

metrics:
- minio_system_cpu_avg_idle
- minio_system_cpu_avg_iowait
- minio_system_cpu_load
- minio_system_cpu_load_perc
- minio_system_cpu_nice
- minio_system_cpu_steal
- minio_system_cpu_system
- minio_system_cpu_user
2024-04-23 16:56:12 -07:00
Harshavardhana 9693c382a8
make renameData() more defensive during overwrites (#19548)
instead upon any error in renameData(), we still
preserve the existing dataDir in some form for
recoverability in strange situations such as out
of disk space type errors.

Bonus: avoid running list and heal() instead allow
versions disparity to return the actual versions,
uuid to heal. Currently limit this to 100 versions
and lesser disparate objects.

an undo now reverts back the xl.meta from xl.meta.bkp
during overwrites on such flaky setups.

Bonus: Save N depth syscalls via skipping the parents
upon overwrites and versioned updates.

Flaky setup examples are stretch clusters with regular
packet drops etc, we need to add some defensive code
around to avoid dangling objects.
2024-04-23 10:15:52 -07:00
jiuker ee1047bd52
fix: can't get total disksize for `decom status` (#19585) 2024-04-23 04:33:28 -07:00
Seiya 5ea5ab162b
Remove leading zero strings in return value of (*xlMetaV2)getDataDirs() (#19567)
remove leading zero strings in return value of getDataDirs()
2024-04-22 22:07:37 -07:00
Klaus Post b5a09ff96b
Fix RenameData data race (#19579)
RenameData could start operating on inline data after timing out 
and the call returned due to WithDeadline.

This could cause a buffer to write to the inline data being written.

Since no writes are in `RenameData` and the call is canceled, 
this doesn't present a corruption issue. But a race is a race and 
should be fixed.

Copy inline data to a fresh buffer.
2024-04-22 22:07:19 -07:00
Harshavardhana 95c65f4e8f
do not panic on rebalance during server restarts (#19563)
This PR makes a feasible approach to handle all the scenarios
that we must face to avoid returning "panic."

Instead, we must return "errServerNotInitialized" when a
bucketMetadataSys.Get() is called, allowing the caller to
retry their operation and wait.

Bonus fix the way data-usage-cache stores the object.
Instead of storing usage-cache.bin with the bucket as
`.minio.sys/buckets`, the `buckets` must be relative
to the bucket `.minio.sys` as part of the object name.

Otherwise, there is no way to decommission entries at
`.minio.sys/buckets` and their final erasure set positions.

A bucket must never have a `/` in it. Adds code to read()
from existing data-usage.bin upon upgrade.
2024-04-22 10:49:30 -07:00
Harshavardhana 6bfff7532e
re-use transport and set stronger backwards compatible Ciphers (#19565)
This PR fixes a few things

- FIPS support for missing for remote transports, causing
  MinIO could end up using non-FIPS Ciphers in FIPS mode

- Avoids too many transports, they all do the same thing
  to make connection pooling work properly re-use them.

- globalTCPOptions must be set before setting transport
  to make sure the client conn deadlines are honored properly.

- GCS warm tier must re-use our transport

- Re-enable trailing headers support.
2024-04-21 04:43:18 -07:00
Harshavardhana 1aa8896ad6 Revert "cleanup: Simplify usage of MinIOSourceProxyRequest (#19553)"
This reverts commit 928c0181bf.

This change was not correct, reverting.

We track 3 states with the ProxyRequest header - if replication process wants
to know if object is already replicated with a HEAD, it shouldn't proxy back
   - Poorna
2024-04-20 02:05:54 -07:00
Krishnan Parthasarathi 3e32ceb39f
Disable trailing header support for MinIO tiers (#19561)
AWS S3 trailing header support was recently enabled on the warm tier
client connection to MinIO type remote tiers. With this enabled, we are
seeing the following error message at http transport layer.

> Unsolicited response received on idle HTTP channel starting with "HTTP/1.1 400 Bad Request\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n400 Bad Request"; err=<nil>

This is an interim fix until we identify the root cause for this behaviour in the
minio-go client package.
2024-04-19 19:32:25 -07:00
dependabot[bot] ca1350b092
build(deps): bump golang.org/x/net from 0.19.0 to 0.23.0 in /docs/debugging/s3-verify (#19559)
build(deps): bump golang.org/x/net in /docs/debugging/s3-verify

Bumps [golang.org/x/net](https://github.com/golang/net) from 0.19.0 to 0.23.0.
- [Commits](https://github.com/golang/net/compare/v0.19.0...v0.23.0)

---
updated-dependencies:
- dependency-name: golang.org/x/net
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2024-04-19 14:24:47 -07:00
jiuker 9205434ed3
fix: ignore signaturev2 for policy header check (#19551) 2024-04-19 09:45:54 -07:00
Harshavardhana cd50e9b4bc
make LRU cache global for internode tokens (#19555) 2024-04-19 09:45:14 -07:00
Klaus Post ec816f3840
Reduce parallelReader allocs (#19558) 2024-04-19 09:44:59 -07:00
Klaus Post 5f774951b1
Store object EC in metadata header (#19534)
Keep the EC in header, so it can be retrieved easily for dynamic quorum calculations.

To not force a full metadata decode on every read the value will be 0/0 for data written in previous versions.

Size is expected to increase by 2 bytes per version, since all valid values can be represented with 1 byte each.

Example:
```
λ xl-meta xl.meta
{
  "Versions": [
    {
      "Header": {
        "EcM": 4,
        "EcN": 8,
        "Flags": 6,
        "ModTime": "2024-04-17T11:46:25.325613+02:00",
        "Signature": "0a409875",
        "Type": 1,
        "VersionID": "8e03504e11234957b2727bc53eda0d55"
      },
...
```

Not used for operations yet.
2024-04-19 09:43:43 -07:00
Harshavardhana 2ca9befd2a
add ILM + site-replication tests (#19554) 2024-04-19 05:48:19 -07:00
Harshavardhana 72f5cb577e
optimize ftp/sftp upload() implementations to avoid CPU load (#19552) 2024-04-19 05:23:42 -07:00
Robert Lützner 928c0181bf
cleanup: Simplify usage of MinIOSourceProxyRequest (#19553)
This replaces a convoluted condition that ultimately evaluated to

"is this HTTP header present in the request or not?"
2024-04-19 05:23:31 -07:00
Harshavardhana 03767d26da
fix: get rid of large buffers (#19549)
these lead to run-away usage of memory
beyond which the Go's GC can handle, we
have to re-visit this differently, remove
this for now.
2024-04-19 04:26:59 -07:00
Sveinn 108e6f92d4
updating tests to use new mc --enc flags (#19508) 2024-04-19 01:43:09 -07:00
Harshavardhana d653a59fc0 fix: flaky getHostIP test 2024-04-18 19:09:56 -07:00
Minio Trusted 01bfdf949a Update yaml files to latest version RELEASE.2024-04-18T19-09-19Z 2024-04-18 20:45:59 +00:00
Aditya Manthramurthy 98f7821eb3
fix: ldap: avoid unnecessary import errors (#19547)
Follow up for #19528

If there are multiple existing DN mappings for the same normalized DN,
if they all have the same policy mapping value, we pick one of them of
them instead of returning an import error.
2024-04-18 12:09:19 -07:00
jiuker 2d3898e0d5
add ftp example for to helm's values.yaml extraArgs field (#19541) 2024-04-18 08:57:22 -07:00
Aditya Manthramurthy ae46ce9937
ldap: Normalize DNs when importing (#19528)
This is a change to IAM export/import functionality. For LDAP enabled
setups, it performs additional validations:

- for policy mappings on LDAP users and groups, it ensures that the
corresponding user or group DN exists and if so uses a normalized form
of these DNs for storage

- for access keys (service accounts), it updates (i.e. validates
existence and normalizes) the internally stored parent user DN and group
DNs.

This allows for a migration path for setups in which LDAP mappings have
been stored in previous versions of the server, where the name of the
mapping file stored on drives is not in a normalized form.

An administrator needs to execute:

`mc admin iam export ALIAS`

followed by

`mc admin iam import ALIAS /path/to/export/file`

The validations are more strict and returns errors when multiple
mappings are found for the same user/group DN. This is to ensure the
mappings stored by the server are unambiguous and to reduce the
potential for confusion.

Bonus **bug fix**: IAM export of access keys (service accounts) did not
export key name, description and expiration. This is fixed in this
change too.
2024-04-18 08:15:02 -07:00
Anis Eleuch dfc112c06b
list: Fix rare listing continuation freeze (#19524)
Reading the list metacache is not protected by a lock; the code retries when it fails
to read the metacache object, however, it forgot to re-read the metacache object
from the drives, which is necessary, especially if the metacache object is inlined.

This commit will ensure that we always re-read the metacache object from the drives
when it is retrying.
2024-04-17 21:42:11 -07:00
Shireesh Anjal ca5fab8656
Add cluster audit metrics in metrics-v3 (#19514)
endpoint: /minio/metrics/v3/cluster/audit
metrics:
- failed_messages (counter)
- total_messages (counter)
- target_queue_length (gauge)
2024-04-17 02:18:02 -07:00
Shireesh Anjal 6df76ca73c
Add system memory metrics in v3 (#19486)
Following memory metrics will be added under /system/memory

- available
- buffers
- cache
- free
- shared
- total
- used
- used_perc
2024-04-16 22:10:25 -07:00
Harshavardhana f65dd3e5a2
reload from drive tier-config when in-memory cache is not found (#19527)
avoid probing tier target while reloading() tier config
2024-04-16 22:09:58 -07:00
Harshavardhana a8d601b64a
allow detaching any non-normalized DN (#19525) 2024-04-16 17:36:43 -07:00
Viktor Szépe 73b4794cf7
Improve typos configuration (#19489) 2024-04-16 17:36:28 -07:00
Klaus Post e2709ea129
ftp: Return current time for prefixes/directories (#19519) 2024-04-16 17:35:55 -07:00
Allan Roger Reid 740ec80819
At server init, use the correct context when creating the KMS Master Key (#19526) 2024-04-16 17:34:45 -07:00
Harshavardhana d95e054282
update all deps regular cadence (#19523) 2024-04-16 11:48:56 -07:00
Allan Roger Reid 7c1f9667d1
Use GetDuration() helper for MINIO_KMS_KEY_CACHE_INTERVAL as time.Duration (#19512)
Bonus: Use default duration of 10 seconds if invalid input < time.Second is specified
2024-04-16 08:43:39 -07:00
Klaus Post 9246990496
fix: ListObjectVersions returning duplicates when resuming with null version id (#19518)
When resuming a versioned listing where `version-id-marker=null`, the `null` object would 
always be returned, causing duplicate entries to be returned.

Add check against empty version
2024-04-16 08:41:27 -07:00
Markus Wagner 0cf3d93360
removed hardcoded datasource uid (#19477) 2024-04-15 03:03:01 -07:00
Harshavardhana cb06aee5ac
convert multipart-cleanup from a blocking unlink() to a rename to trash (#19495)
unlinking() at two different locations on a disk when there
are lots to purge, this can lead to huge IOwaits, instead
rely on rename() to .trash to avoid running multiple unlinks()
in parallel.
2024-04-15 03:02:39 -07:00
Shubhendu 1c70e9ed1b
ILM expiry replication status only if enabled (#19503)
Report ILM expiry replication status only if atleast one site has the
feature enabled.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-04-15 02:40:39 -07:00
jiuker f3d6a2dd37
code clean for dynamicSleeper (#19499) 2024-04-15 02:40:19 -07:00
Harshavardhana d1c58fc2eb
remove older deploymentID fix behavior to speed up startup (#19497)
since mid 2018 we do not have any deployments
without deployment-id, it is time to put this
code to rest, this PR removes this old code as
its no longer valuable.

on setups with 1000's of drives these are all
quite expensive operations.
2024-04-15 01:25:46 -07:00
Allan Roger Reid b8f05b1471
Keep an up-to-date copy of the KMS master key (#19492) 2024-04-15 00:42:50 -07:00
Klaus Post e7baf78ee8
fix: list operations resuming when hitting different node (#19494)
The rest of the peer clients were not consistent across nodes. So, meta cache requests 
would not go to the same server if a continuation happens on a different node.
2024-04-12 11:13:36 -07:00
guangwu 87299eba10
fix: close sessionPolicyFile in the sts-assume-role example (#19428) 2024-04-12 09:09:55 -07:00
Shubhendu d3a07c29ba
Correct sample for node scrape configuration (#19491)
As node metrics should be scraped per node basis, use a sample
configuartion using all the nodes in targets.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-04-12 08:49:30 -07:00
Aditya Manthramurthy 8d39b715dc
Fix some CI warnings (#19482) 2024-04-12 02:25:58 -07:00
Harshavardhana 7e3166475d
simplify common functions in replication (#19480) 2024-04-11 17:27:32 -07:00
Klaus Post 5206c0e883
Inspect: Add error if no results (#19476)
When no results match or another error occurs, add an error to the stream. Keep the "inspect-input.txt" as the only thing in the zip for reference.

Example:

```
λ mc support inspect --airgap myminio/testbucket/fjghfjh/**
mc: Using public key from C:\Users\klaus\mc\support_public.pem
File data successfully downloaded as inspect-data.enc

λ inspect inspect-data.enc
Using private key from support_private.pem
output written to inspect-data.zip
2024/04/11 14:10:51 next stream: GetRawData: No files matched the given pattern

λ unzip -l inspect-data.zip
Archive:  inspect-data.zip
  Length      Date    Time    Name
---------  ---------- -----   ----
      222  2024-04-11 14:10   inspect-input.txt
---------                     -------
      222                     1 file

λ
```

Modifies inspect to read until end of stream to report the error.

Bonus: Add legacy commandline params
2024-04-11 14:22:47 -07:00
Harshavardhana 41ec038523
remove permission denied error for being drive error (#19478) 2024-04-11 14:22:15 -07:00
Shireesh Anjal 08d3d06a06
Add drive metrics in metrics-v3 (#19452)
Add following metrics:

- used_inodes
- total_inodes
- healing
- online
- reads_per_sec
- reads_kb_per_sec
- reads_await
- writes_per_sec
- writes_kb_per_sec
- writes_await
- perc_util

To be able to calculate the `per_sec` values, we capture the IOStats-related 
data in the beginning (along with the time at which they were captured), 
and compare them against the current values subsequently. This is because 
dividing by "time since server uptime." doesn't work in k8s environments.
2024-04-11 10:46:34 -07:00
Harshavardhana 074febd9e1
remove SetDiskLoc() rely on the endpoint values instead (#19475)
the disk location never changes in the lifetime of a
MinIO cluster, even if it did validate this close to the
disk instead at the higher layer.

Return appropriate errors indicating an invalid drive, so
that the drive is not recognized as part of a valid
drive.
2024-04-11 10:45:28 -07:00
Harshavardhana aa8d25797b
update versioning tests to cover CopyObject() (#19472)
adds tests to cover #19468
2024-04-11 02:50:52 -07:00
Alex 8d7d4adb91
Updated Console UI to v1.2.0 (#19467) 2024-04-11 01:31:16 -07:00
Poorna ffa91f9794
fix CopyObject with replace overwriting inline status (#19468)
Fixes #19450 - internal inline-data header can get overwritten
during copy with replace before this fix.
2024-04-10 23:42:51 -07:00
Harshavardhana 0c31e61343
allow protection from invalid config values (#19460)
we have had numerous reports on some config
values not having default values, causing
features misbehaving and not having default
values set properly.

This PR tries to address all these concerns
once and for all.

Each new sub-system that gets added

- must check for invalid keys
- must have default values set
- must not "return err" when being saved into
  a global state() instead collate as part of
  other subsystem errors allow other sub-systems
  to independently initialize.
2024-04-10 18:10:30 -07:00
Harshavardhana 9b926f7dbe
avoid busy loops in bad path component (#19466)
use it in places where we are looking
for such bad path components.
2024-04-10 18:08:52 -07:00
Harshavardhana 35d8728990
handle missing LDAP normalization in SetPolicy() API (#19465) 2024-04-10 15:37:42 -07:00
Allan Roger Reid f7ed9a75ba
Allow specifying the local server with env variable _MINIO_SERVER_LOCAL (#19453)
* Allow specifying the local server, with env variable _MINIO_SERVER_LOCAL, in systems where the hostname cannot be resolved to local IP

* Limit scope of the _MINIO_SERVER_LOCAL solution to only containerized implementations
2024-04-10 09:34:59 -07:00
jiuker 9496c17e13
doc: add Content-Type to s3zip (#19455) 2024-04-10 09:28:27 -07:00
jiuker ed64e91f06
fix: noHost for collectLocalMetric (#19457) 2024-04-10 09:28:08 -07:00
jiuker a481825ae1
fix: unknow contentType for ArchiveFileHandler (#19451) 2024-04-09 03:41:25 -07:00
Harshavardhana 7bb0f32332
make if-none-match PUT/POST RFC compliant (#19448)
fixes #19442
2024-04-09 01:17:49 -07:00
Anis Eleuch c6f8dc431e
Add a warning when the total size of an object versions exceeds 1 TiB (#19435) 2024-04-08 10:45:03 -07:00
Alexander Thaller 78f177b8ee
Allow setting readOnlyRootFilesystem in securityContext (#19437) 2024-04-08 09:31:05 -07:00
Anis Eleuch 787c44c39d
batch-repl: Do not allow both source/target to be remote (#19434)
Return an error when the user specifies endpoints for both source
and target. This can generate many type of errors as the code considers
a deployment remote if its endpoint is specified.
2024-04-08 07:11:38 -07:00
Anis Eleuch f06fee0364
heal: Add more per disk healing result in the audit (#19427)
HealObject() does not return an error in some cases, for example, when
an object is successfully reconstructed in one disk but fails with other
disks, another case is when a disk does not have the object is temporarily
disconnected

Add the After heal drives result in the audit output for better
analysis.
2024-04-08 02:26:14 -07:00
Harshavardhana c957e0d426
fix: increase the tiering part size to 128MiB (#19424)
also introduce 8MiB buffer to read from for
bigger parts
2024-04-08 02:22:27 -07:00
Harshavardhana 04101d472f
fix: add fallbackDisks for disk healing (#19425) 2024-04-08 02:22:13 -07:00
Minio Trusted 51fc145161 Update yaml files to latest version RELEASE.2024-04-06T05-26-02Z 2024-04-06 06:44:30 +00:00
Taran Pelkey 9d63bb1b41
Added new API errors for LDAP (#19415)
* change internal errors to named errors

* Change names
2024-04-05 22:26:02 -07:00
Aditya Manthramurthy 8ff2a7a2b9
fix: IAM import/export: remove sts group handling (#19422)
There are no separate STS group mappings to be handled.

Also add tests for basic import/export sanity.
2024-04-05 20:13:35 -07:00
Harshavardhana 91f91d8f47
fix: a regression in IAM policy reload routine() (#19421)
all policy reloading is broken since last release since

48deccdc40

fixes #19417
2024-04-05 14:26:41 -07:00
Harshavardhana a207bd6790
turn-off Nlink readdir() optimization for NFS/CIFS (#19420)
fixes #19418
fixes #19416
2024-04-05 08:17:08 -07:00
Harshavardhana 96d226c0b1
remove frivolous log about abort-multipart failure in replication (#19413) 2024-04-05 04:39:55 -07:00
Krishnan Parthasarathi a86d98826d
Set object's original modTime when being restored (#19414)
Set object's modTime when being restored

restored here refers to making a temporary local copy in the hot tier
for a tiered object using the RestoreObject API
2024-04-05 04:39:31 -07:00
Harshavardhana 1bb670ecba
use new generics based LRU from hashicorp (#19409)
we have been using an LRU caching for internode
auth tokens, migrate to using a typed implementation
and also do not cache auth tokens when its an error.
2024-04-04 11:58:48 -07:00
Aditya Manthramurthy c9e9a8e2b9
fix: ldap: use validated base DNs (#19406)
This fixes a regression from #19358 which prevents policy mappings
created in the latest release from being displayed in policy entity
listing APIs.

This is due to the possibility that the base DNs in the LDAP config are
not in a normalized form and #19358 introduced normalized of mapping
keys (user DNs and group DNs). When listing, we check if the policy
mappings are on entities that parse as valid DNs that are descendants of
the base DNs in the config.

Test added that demonstrates a failure without this fix.
2024-04-04 11:36:18 -07:00
jiuker 272367ccd2
feat: add memlimit flags for setMaxResources (#19400) 2024-04-04 05:06:57 -07:00
Anis Eleuch 95bf4a57b6
logging: Add subsystem to log API (#19002)
Create new code paths for multiple subsystems in the code. This will
make maintaing this easier later.

Also introduce bugLogIf() for errors that should not happen in the first
place.
2024-04-04 05:04:40 -07:00
Harshavardhana 2228eb61cb
Add more tests for ARN and its format (#19408)
Original work from #17566 modified to fit the new requirements
2024-04-04 01:31:34 -07:00
Alexander Thaller 5f07eb2d17
Add env variable MINIO_IDENTITY_OPENID_REDIRECT_URI to statefulset (#18949)
Using oidc.redirectUri in the values.yaml only works for the deployment.

When using the statefulset the environment variable
MINIO_IDENTITY_OPENID_REDIRECT_URI is not set. This leads to errors with
oicd providers. For example keycloak throws the error 'invalid
redirect_uri'.

This pull request fixes that.
2024-04-03 23:34:45 -07:00
Shubhendu d96d696841
Dont use deprecated angular (#19396)
Support for Angular would be stopped with newer versions of grafana

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-04-03 19:01:53 -07:00
Harshavardhana e18c0ab9bf update vulncheck to go1.21.9
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-04-03 19:00:59 -07:00
Andreas Auernhammer faeb2b7e79
use `GenerateKey` as more reliable KMS health-check (#19404)
This commit replaces the `KMS.Stat` API call with a
`KMS.GenerateKey` call. This approach is more reliable
since data key generation also works when the KMS backend
is unavailable (temp. offline), but KES has cached the
key. Ref: KES offline caching.

With this change, it is less likely that MinIO readiness
checks fail in cases where the KMS backend is offline.

Signed-off-by: Andreas Auernhammer <github@aead.dev>
2024-04-03 14:13:20 -07:00
Anis Eleuch 97ce11cb6b
Avoid using a nil transport when the config is not initialized (#19405)
Make sure to pass a nil pointer as a Transport to minio-go  when the API config
is not initialized, this will make sure that we do not pass an interface
with a known type but a nil value.

This will also fix the update of the API remote_transport_deadline
configuration without requiring the cluster restart.
2024-04-03 11:27:05 -07:00
Harshavardhana d7daae4762
update to latest deps (#19399)
Signed-off-by: Harshavardhana <harsha@minio.io>
2024-04-03 09:28:32 -07:00
jiuker 3d86ae12bc
feat: support EdDSA/Ed25519 for oss (#19397) 2024-04-02 16:02:35 -07:00
Sveinn ba46ee5dfa
Adding console targets back into systemtarget log slice (#19398) 2024-04-02 15:56:14 -07:00
Klaus Post 912bbb2f1d
Always return slice with cap (#19395)
Documentation promised this - so we should do it as well. Try to get a buffer and stash if it isn't big enough.
2024-04-02 08:56:18 -07:00
Harshavardhana 4f660a8eb7
fix: missing metrics for healed objects (#19392)
all healed successful objects via queueHealTask
in a non-blocking heal weren't being reported
correctly, this PR fixes this comprehensively.
2024-04-01 23:48:36 -07:00
Praveen raj Mani ae4fb1b72e
Prioritize the bucket configs first during the decommissioning (#19393) 2024-04-01 23:48:26 -07:00
Klaus Post b435806d91
Reduce big message RPC allocations (#19390)
Use `ODirectPoolSmall` buffers for inline data in PutObject.

Add a separate call for inline data that will fetch a buffer for the inline data before unmarshal.
2024-04-01 16:42:09 -07:00
Minio Trusted 06929258bc Update yaml files to latest version RELEASE.2024-03-30T09-41-56Z 2024-03-30 18:32:38 +00:00
Harshavardhana cb577835d9 add curl to hotfix, release.fips 2024-03-30 02:41:56 -07:00
Harshavardhana 7f35f74f14 add missing curl for other platforms 2024-03-29 12:10:49 -07:00
Klaus Post 3d6194e93c
Remove empty replication stats (#19385)
When sending final stats upstream also trim empty ReplicationStats.
2024-03-29 11:57:52 -07:00
Harshavardhana 72c7845f7e
add static curl to container (#19383) 2024-03-29 08:08:52 -07:00
Harshavardhana 1c99597a06
update() inlineBlock settings properly in storageClass config (#19382) 2024-03-29 08:07:06 -07:00
Harshavardhana feb9d8480b
add auditing for healing objects (#19379) 2024-03-28 16:46:19 -07:00
Aditya Manthramurthy 4e670458b8
fix: CI warnings (#19380) 2024-03-28 16:44:49 -07:00
Aditya Manthramurthy 48deccdc40
fix: sts accounts map refresh and fewer list calls (#19376)
This fixes a bug where STS Accounts map accumulates accounts in memory
and never removes expired accounts and the STS Policy mappings were not
being refreshed.

The STS purge routine now runs with every IAM credentials load instead
of every 4th time.

The listing of IAM files is now cached on every IAM load operation to
prevent re-listing for STS accounts purging/reload.

Additionally this change makes each server pick a time for IAM loading
that is randomly distributed from a 10 minute interval - this is to
prevent server from thundering while performing the IAM load.

On average, IAM loading will happen between every 5-15min after the
previous IAM load operation completes.
2024-03-28 16:43:50 -07:00
Andi Bräu 2eee744e34
Fix issue [#19314], resolve the absence of the sed command in ub… (#19315)
Fix issue [minio#19314], resolve the absence of the sed command in ubi-micro by replacing it with echo.

Signed-off-by: Andreas Bräu <ab@andi95.de>
Co-authored-by: jiuker <2818723467@qq.com>
2024-03-28 16:20:50 -07:00
Kaan Kabalak 3f72439b8a
Suppress error log for force-deleting object in locked bucket (#19378) 2024-03-28 14:37:42 -07:00
Shubhendu 468a9fae83
Enable replication of SSE-C objects (#19107)
If site replication enabled across sites, replicate the SSE-C
objects as well. These objects could be read from target sites
using the same client encryption keys.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-03-28 10:44:56 -07:00
Shubhendu d87f91720b
Split the replication dashboard in cluster and node level (#19374)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-03-28 10:15:39 -07:00
Klaus Post aa0eec16ab
Remove empty replication stats when sending update (#19375)
When sending update and there is no replication stats - remove the struct.

Will remove an unneeded alloc on the receiver.
2024-03-28 10:13:07 -07:00
Shubhendu d63e603040
Pre populate the server names using a query (#19367)
User doesn't need to remember and enter the server values,
rather they can select from the pre populated list.

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-03-28 08:14:26 -07:00
jiuker 8222a640ac
fix: slice append lose the data for NSScanner (#19373) 2024-03-28 08:13:36 -07:00
Aditya Manthramurthy 7e45d84ace
ldap: improve normalization of DN values (#19358)
Instead of relying on user input values, we use the DN value returned by
the LDAP server.

This handles cases like when a mapping is set on a DN value
`uid=svc.algorithm,OU=swengg,DC=min,DC=io` with a user input value (with
unicode variation) of `uid=svc﹒algorithm,OU=swengg,DC=min,DC=io`. The
LDAP server on lookup of this DN returns the normalized value where the
unicode dot character `SMALL FULL STOP` (in the user input), gets
replaced with regular full stop.
2024-03-27 23:45:26 -07:00
Harshavardhana 139a606f0a
use bigger partSize per part for tiering to MinIO (#19361)
Bonus: remove persistent md5sum calculation, turn-off
sha256 as well. Instead we always enable crc32c which
is enough for payload verification also support for
trailing headers checksum.
2024-03-27 23:45:08 -07:00
Harshavardhana 289223b6de
expire ILM all versions verify quorum on action (#19359) 2024-03-27 23:44:52 -07:00
Harshavardhana c61dd16a1e
fix: avoid fan-out DeletePrefix calls for batch-expire and ILM (#19365) 2024-03-27 20:18:15 -07:00
Harshavardhana 3e38fa54a5
set max versions to be IntMax to avoid premature failures (#19360)
let users/customers set relevant values make default value
to be non-applicable.
2024-03-27 18:08:07 -07:00
jiuker 4a02189ba0
feat: add env to choose which node to start decom (#19310)
add a temporary env  _MINIO_DECOM_ENDPOINT to choose 
the node to start decom from, in situations when first node
first pool is not available.
2024-03-27 16:18:40 -07:00
Shubhendu 3d4fc28ec9
Render node graphs by node (#19356)
As total drives count, online vs offline are per node basis, its
corect to select node for which graphs need to be rendered.

Set prometheus scrape jobs to fetch metrics from all nodes. A sample
scrape job for node metrics could be as below

```
- job_name: minio-job-node
  bearer_token: <token>
  metrics_path: /minio/v2/metrics/node
  scheme: https
  tls_config:
    insecure_skip_verify: true
  static_configs:
  - targets: [tenant1-ss-0-0.tenant1-hl.tenant-ns.svc.cluster.local:9000,tenant1-ss-0-1.tenant1-hl.tenant-ns.svc.cluster.local:9000,tenant1-ss-0-2.tenant1-hl.tenant-ns.svc.cluster.local:9000,tenant1-ss-0-3.tenant1-hl.tenant-ns.svc.cluster.local:9000]
```

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-03-27 10:41:08 -07:00
jiuker ec3a3bb10d
fix: Remove unnecessary loops for searchParent (#19353) 2024-03-27 08:12:14 -07:00
Harshavardhana 364d3a0ac9
fix: new staticheck and linter issues reported (#19340) 2024-03-27 08:10:40 -07:00
Harshavardhana cb536a73eb use pkger v2.2.9 2024-03-26 16:49:14 -07:00
Minio Trusted 428155add9 Update yaml files to latest version RELEASE.2024-03-26T22-10-45Z 2024-03-26 23:12:03 +00:00
Poorna 8bce123bba
fix: precondition check for multipart with existing object replication (#19349) 2024-03-26 15:10:45 -07:00
Harshavardhana 0a56dbde2f
allow configuring inline shard size value (#19336) 2024-03-26 15:06:19 -07:00
Klaus Post 7ff4164d65
Fix races in IAM cache lazy loading (#19346)
Fix races in IAM cache

Fixes #19344

On the top level we only grab a read lock, but we write to the cache if we manage to fetch it.

a03dac41eb/cmd/iam-store.go (L446) is also flipped to what it should be AFAICT.

Change the internal cache structure to a concurrency safe implementation.

Bonus: Also switch grid implementation.
2024-03-26 11:12:57 -07:00
Shubhendu 53a14c7301
Adding dashboard for MinIO node metrics (#19329)
Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
2024-03-26 08:01:28 -07:00
Harshavardhana dc45a5010d
bring back minor DNS cache for k8s setups (#19341)
k8s as it stands is flaky in DNS lookups,
bring this change back such that we can
cache DNS atleast for 30secs TTL.
2024-03-26 08:00:38 -07:00
jiuker 4b9192034c
fix: should return when error happend (#19342) 2024-03-26 07:51:56 -07:00
Harshavardhana deeadd1a37
fix: convert multiple callers to use toStorageErr(err) correctly (#19339)
we must attempt to convert all errors at storage-rest-client
into StorageErr() regardless of what functionality is being
called in, this PR fixes this for multiple callers including
some internally used functions.
2024-03-25 23:24:59 -07:00
Sveinn 1fc4203c19
Webhook targets refactor and bug fixes (#19275)
- old version was unable to retain messages during config reload
- old version could not go from memory to disk during reload
- new version can batch disk queue entries to single for to reduce I/O load
- error logging has been improved, previous version would miss certain errors.
- logic for spawning/despawning additional workers has been adjusted to trigger when half capacity is reached, instead of when the log queue becomes full.
- old version would json marshall x2 and unmarshal 1x for every log item. Now we only do marshal x1 and then we GetRaw from the store and send it without having to re-marshal.
2024-03-25 09:44:20 -07:00
Minio Trusted 15b930be1f Update yaml files to latest version RELEASE.2024-03-21T23-13-43Z 2024-03-22 20:08:28 +00:00
466 changed files with 33422 additions and 16419 deletions

View File

@ -9,6 +9,6 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: 'Checkout Repository'
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: 'Dependency Review'
uses: actions/dependency-review-action@v1
uses: actions/dependency-review-action@v4

View File

@ -3,12 +3,11 @@ name: Crosscompile
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
@ -21,11 +20,11 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c # v3
- uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true

View File

@ -3,8 +3,7 @@ name: FIPS Build Test
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -21,11 +20,11 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}

View File

@ -3,8 +3,7 @@ name: Healing Functional Tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -21,11 +20,11 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true

View File

@ -3,12 +3,11 @@ name: Linters and Tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
concurrency:
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref }}
cancel-in-progress: true
@ -21,23 +20,24 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
os: [ubuntu-latest, windows-latest]
go-version: [1.22.x]
os: [ubuntu-latest, Windows]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true
- name: Build on ${{ matrix.os }}
if: matrix.os == 'windows-latest'
if: matrix.os == 'Windows'
env:
CGO_ENABLED: 0
GO111MODULE: on
run: |
Set-MpPreference -DisableRealtimeMonitoring $true
netsh int ipv4 set dynamicport tcp start=60000 num=61000
go build --ldflags="-s -w" -o %GOPATH%\bin\minio.exe
go test -v --timeout 50m ./...
go test -v --timeout 120m ./...
- name: Build on ${{ matrix.os }}
if: matrix.os == 'ubuntu-latest'
env:

View File

@ -3,8 +3,7 @@ name: Functional Tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -21,11 +20,11 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true

View File

@ -3,8 +3,7 @@ name: Helm Chart linting
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -20,7 +19,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Install Helm
uses: azure/setup-helm@v3

View File

@ -3,8 +3,7 @@ name: IAM integration
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -62,7 +61,7 @@ jobs:
# are turned off - i.e. if ldap="", then ldap server is not enabled for
# the tests.
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
ldap: ["", "localhost:389"]
etcd: ["", "http://localhost:2379"]
openid: ["", "http://127.0.0.1:5556/dex"]
@ -76,8 +75,8 @@ jobs:
openid: "http://127.0.0.1:5556/dex"
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true
@ -112,6 +111,12 @@ jobs:
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
go run docs/iam/access-manager-plugin.go &
make test-iam
- name: Test MinIO Old Version data to IAM import current version
if: matrix.ldap == 'ldaphost:389'
env:
_MINIO_LDAP_TEST_SERVER: ${{ matrix.ldap }}
run: |
make test-iam-ldap-upgrade-import
- name: Test LDAP for automatic site replication
if: matrix.ldap == 'localhost:389'
run: |

View File

@ -4,7 +4,6 @@ on:
pull_request:
branches:
- master
- next
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -25,12 +24,12 @@ jobs:
sudo -S rm -rf ${GITHUB_WORKSPACE}
mkdir ${GITHUB_WORKSPACE}
- name: checkout-step
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: setup-go-step
uses: actions/setup-go@v2
uses: actions/setup-go@v5
with:
go-version: 1.21.x
go-version: 1.22.x
- name: github sha short
id: vars
@ -56,6 +55,10 @@ jobs:
run: |
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
- name: resiliency
run: |
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "resiliency" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
- name: The job must cleanup
if: ${{ always() }}
run: |

View File

@ -0,0 +1,78 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:${JOB_NAME}
command: server --console-address ":9001" http://minio{1...4}/rdata{1...2}
expose:
- "9000"
- "9001"
environment:
MINIO_CI_CD: "on"
MINIO_ROOT_USER: "minio"
MINIO_ROOT_PASSWORD: "minio123"
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
MINIO_DRIVE_MAX_TIMEOUT: "5s"
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio1:
<<: *minio-common
hostname: minio1
volumes:
- rdata1-1:/rdata1
- rdata1-2:/rdata2
minio2:
<<: *minio-common
hostname: minio2
volumes:
- rdata2-1:/rdata1
- rdata2-2:/rdata2
minio3:
<<: *minio-common
hostname: minio3
volumes:
- rdata3-1:/rdata1
- rdata3-2:/rdata2
minio4:
<<: *minio-common
hostname: minio4
volumes:
- rdata4-1:/rdata1
- rdata4-2:/rdata2
nginx:
image: nginx:1.19.2-alpine
hostname: nginx
volumes:
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
ports:
- "9000:9000"
- "9001:9001"
depends_on:
- minio1
- minio2
- minio3
- minio4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
rdata1-1:
rdata1-2:
rdata2-1:
rdata2-2:
rdata3-1:
rdata3-2:
rdata4-1:
rdata4-2:

View File

@ -23,10 +23,9 @@ http {
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
server minio1:9000 max_fails=1 fail_timeout=10s;
server minio2:9000 max_fails=1 fail_timeout=10s;
server minio3:9000 max_fails=1 fail_timeout=10s;
}
upstream console {

View File

@ -23,14 +23,14 @@ http {
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
server minio5:9000;
server minio6:9000;
server minio7:9000;
server minio8:9000;
server minio1:9000 max_fails=1 fail_timeout=10s;
server minio2:9000 max_fails=1 fail_timeout=10s;
server minio3:9000 max_fails=1 fail_timeout=10s;
server minio4:9000 max_fails=1 fail_timeout=10s;
server minio5:9000 max_fails=1 fail_timeout=10s;
server minio6:9000 max_fails=1 fail_timeout=10s;
server minio7:9000 max_fails=1 fail_timeout=10s;
server minio8:9000 max_fails=1 fail_timeout=10s;
}
upstream console {

View File

@ -23,10 +23,10 @@ http {
# include /etc/nginx/conf.d/*.conf;
upstream minio {
server minio1:9000;
server minio2:9000;
server minio3:9000;
server minio4:9000;
server minio1:9000 max_fails=1 fail_timeout=10s;
server minio2:9000 max_fails=1 fail_timeout=10s;
server minio3:9000 max_fails=1 fail_timeout=10s;
server minio4:9000 max_fails=1 fail_timeout=10s;
}
upstream console {

View File

@ -24,11 +24,6 @@ if [ ! -f ./mc ]; then
chmod +x mc
fi
(
cd ./docs/debugging/s3-check-md5
go install -v
)
export RELEASE=RELEASE.2023-08-29T23-07-35Z
docker-compose -f docker-compose-site1.yaml up -d
@ -48,10 +43,10 @@ sleep 30s
sleep 5
s3-check-md5 -h
./s3-check-md5 -h
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
if [ $failed_count_site1 -ne 0 ]; then
echo "failed with multipart on site1 uploads"
@ -67,8 +62,8 @@ fi
sleep 5
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
## we do not need to fail here, since we are going to test
## upgrading to master, healing and being able to recover
@ -96,8 +91,8 @@ for i in $(seq 1 10); do
./mc admin heal -r --remove --json site2/ 2>&1 >/dev/null
done
failed_count_site1=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site1=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site1-nginx:9001 -bucket testbucket 2>&1 | grep FAILED | wc -l)
failed_count_site2=$(./s3-check-md5 -versions -access-key minioadmin -secret-key minioadmin -endpoint http://site2-nginx:9002 -bucket testbucket 2>&1 | grep FAILED | wc -l)
if [ $failed_count_site1 -ne 0 ]; then
echo "failed with multipart on site1 uploads"
@ -109,6 +104,43 @@ if [ $failed_count_site2 -ne 0 ]; then
exit 1
fi
# Add user group test
./mc admin user add site1 site-replication-issue-user site-replication-issue-password
./mc admin group add site1 site-replication-issue-group site-replication-issue-user
max_wait_attempts=30
wait_interval=5
attempt=1
while true; do
diff <(./mc admin group info site1 site-replication-issue-group) <(./mc admin group info site2 site-replication-issue-group)
if [[ $? -eq 0 ]]; then
echo "Outputs are consistent."
break
fi
remaining_attempts=$((max_wait_attempts - attempt))
if ((attempt >= max_wait_attempts)); then
echo "Outputs remain inconsistent after $max_wait_attempts attempts. Exiting with error."
exit 1
else
echo "Outputs are inconsistent. Waiting for $wait_interval seconds (attempt $attempt/$max_wait_attempts)."
sleep $wait_interval
fi
((attempt++))
done
status=$(./mc admin group info site1 site-replication-issue-group --json | jq .groupStatus | tr -d '"')
if [[ $status == "enabled" ]]; then
echo "Success"
else
echo "Expected status: enabled, actual status: $status"
exit 1
fi
cleanup
## change working directory

View File

@ -3,8 +3,7 @@ name: MinIO advanced tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -22,11 +21,11 @@ jobs:
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true
@ -36,6 +35,18 @@ jobs:
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-decom
- name: Test ILM
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-ilm
- name: Test PBAC
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0
sudo sysctl net.ipv6.conf.default.disable_ipv6=0
make test-pbac
- name: Test Config File
run: |
sudo sysctl net.ipv6.conf.all.disable_ipv6=0

View File

@ -3,8 +3,7 @@ name: Root lockdown tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -21,12 +20,12 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true

View File

@ -16,7 +16,7 @@ docker volume rm $(docker volume ls -f dangling=true) || true
cd .github/workflows/mint
docker-compose -f minio-${MODE}.yaml up -d
sleep 30s
sleep 1m
docker system prune -f || true
docker volume prune -f || true
@ -26,6 +26,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
# Pause one node, to check that all S3 calls work while one node goes wrong
[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4
docker run --rm --net=mint_default \
--name="mint-${MODE}-${JOB_NAME}" \
-e SERVER_ENDPOINT="nginx:9000" \
@ -35,6 +38,18 @@ docker run --rm --net=mint_default \
-e MINT_MODE="${MINT_MODE}" \
docker.io/minio/mint:edge
# FIXME: enable this after fixing aws-sdk-java-v2 tests
# # unpause the node, to check that all S3 calls work while one node goes wrong
# [ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml unpause minio4
# [ "${MODE}" == "resiliency" ] && docker run --rm --net=mint_default \
# --name="mint-${MODE}-${JOB_NAME}" \
# -e SERVER_ENDPOINT="nginx:9000" \
# -e ACCESS_KEY="${ACCESS_KEY}" \
# -e SECRET_KEY="${SECRET_KEY}" \
# -e ENABLE_HTTPS=0 \
# -e MINT_MODE="${MINT_MODE}" \
# docker.io/minio/mint:edge
docker-compose -f minio-${MODE}.yaml down || true
sleep 10s

View File

@ -3,8 +3,7 @@ name: Shell formatting checks
on:
pull_request:
branches:
- master
- next
- master
permissions:
contents: read
@ -14,7 +13,7 @@ jobs:
name: runner / shfmt
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
- uses: luizm/action-sh-checker@master
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,5 +1,5 @@
---
name: Test GitHub Action
name: Spelling
on: [pull_request]
jobs:

View File

@ -3,8 +3,7 @@ name: Upgrade old version tests
on:
pull_request:
branches:
- master
- next
- master
# This ensures that previous jobs for the PR are canceled when the PR is
# updated.
@ -21,12 +20,12 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
go-version: [1.21.x]
go-version: [1.22.x]
os: [ubuntu-latest]
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v3
- uses: actions/checkout@v4
- uses: actions/setup-go@v5
with:
go-version: ${{ matrix.go-version }}
check-latest: true

View File

@ -17,15 +17,15 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
uses: actions/checkout@v4
- name: Set up Go
uses: actions/setup-go@v3
uses: actions/setup-go@v5
with:
go-version: 1.21.8
go-version: 1.22.3
check-latest: true
- name: Get official govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
shell: bash
- name: Run govulncheck
run: govulncheck ./...
run: govulncheck -show verbose ./...
shell: bash

11
.gitignore vendored
View File

@ -43,4 +43,13 @@ docs/debugging/inspect/inspect
docs/debugging/pprofgoparser/pprofgoparser
docs/debugging/reorder-disks/reorder-disks
docs/debugging/populate-hard-links/populate-hardlinks
docs/debugging/xattr/xattr
docs/debugging/xattr/xattr
hash-set
healing-bin
inspect
pprofgoparser
reorder-disks
s3-check-md5
s3-verify
xattr
xl-meta

View File

@ -12,20 +12,27 @@ extend-ignore-re = [
"[0-9A-Za-z/+=]{64}",
"ZXJuZXQxDjAMBgNVBA-some-junk-Q4wDAYDVQQLEwVNaW5pbzEOMAwGA1UEAxMF",
"eyJmb28iOiJiYXIifQ",
"eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.*",
"MIIDBTCCAe2gAwIBAgIQWHw7h.*",
'http\.Header\{"X-Amz-Server-Side-Encryptio":',
'sessionToken',
"ZoEoZdLlzVbOlT9rbhD7ZN7TLyiYXSAlB79uGEge",
]
[default.extend-words]
"encrypter" = "encrypter"
"kms" = "kms"
"requestor" = "requestor"
[default.extend-identifiers]
"bui" = "bui"
"toi" = "toi"
"ot" = "ot"
"dm2nd" = "dm2nd"
"HashiCorp" = "HashiCorp"
[type.go.extend-identifiers]
"bui" = "bui"
"dm2nd" = "dm2nd"
"ot" = "ot"
"ParseND" = "ParseND"
"ParseNDStream" = "ParseNDStream"
"pn" = "pn"
"TestGetPartialObjectMisAligned" = "TestGetPartialObjectMisAligned"
"thr" = "thr"
"toi" = "toi"

4531
CREDITS

File diff suppressed because it is too large Load Diff

View File

@ -21,6 +21,11 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
chmod +x /go/bin/mc
RUN if [ "$TARGETARCH" = "amd64" ]; then \
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
chmod +x /go/bin/curl; \
fi
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio /usr/bin/minio
COPY --from=build /go/bin/mc /usr/bin/mc
COPY --from=build /go/bin/cur* /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE

View File

@ -21,6 +21,11 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
chmod +x /go/bin/mc
RUN if [ "$TARGETARCH" = "amd64" ]; then \
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
chmod +x /go/bin/curl; \
fi
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio /usr/bin/minio
COPY --from=build /go/bin/mc /usr/bin/mc
COPY --from=build /go/bin/cur* /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE

View File

@ -16,6 +16,11 @@ RUN curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archiv
curl -s -q https://dl.min.io/server/minio/release/linux-${TARGETARCH}/archive/minio.${RELEASE}.fips.minisig -o /go/bin/minio.minisig && \
chmod +x /go/bin/minio
RUN if [ "$TARGETARCH" = "amd64" ]; then \
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
chmod +x /go/bin/curl; \
fi
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
@ -41,6 +46,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio /usr/bin/minio
COPY --from=build /go/bin/cur* /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE

View File

@ -21,6 +21,11 @@ RUN curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc -o /go
curl -s -q https://dl.min.io/client/mc/release/linux-${TARGETARCH}/mc.minisig -o /go/bin/mc.minisig && \
chmod +x /go/bin/mc
RUN if [ "$TARGETARCH" = "amd64" ]; then \
curl -L -s -q https://github.com/moparisthebest/static-curl/releases/latest/download/curl-${TARGETARCH} -o /go/bin/curl; \
chmod +x /go/bin/curl; \
fi
# Verify binary signature using public key "RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGavRUN"
RUN minisign -Vqm /go/bin/minio -x /go/bin/minio.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav && \
minisign -Vqm /go/bin/mc -x /go/bin/mc.minisig -P RWTx5Zr1tiHQLwG9keckT0c45M3AGeHD6IvimQHpyRywVWGbP1aVSGav
@ -49,6 +54,7 @@ ENV MINIO_ACCESS_KEY_FILE=access_key \
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /go/bin/minio /usr/bin/minio
COPY --from=build /go/bin/mc /usr/bin/mc
COPY --from=build /go/bin/cur* /usr/bin/
COPY CREDITS /licenses/CREDITS
COPY LICENSE /licenses/LICENSE

View File

@ -45,7 +45,7 @@ lint-fix: getdeps ## runs golangci-lint suite of linters with automatic fixes
@$(GOLANGCI) run --build-tags kqueue --timeout=10m --config ./.golangci.yml --fix
check: test
test: verifiers build build-debugging ## builds minio, runs linters, tests
test: verifiers build ## builds minio, runs linters, tests
@echo "Running unit tests"
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -v -tags kqueue ./...
@ -53,12 +53,21 @@ test-root-disable: install-race
@echo "Running minio root lockdown tests"
@env bash $(PWD)/buildscripts/disable-root.sh
test-ilm: install-race
@echo "Running ILM tests"
@env bash $(PWD)/docs/bucket/replication/setup_ilm_expiry_replication.sh
test-pbac: install-race
@echo "Running bucket policies tests"
@env bash $(PWD)/docs/iam/policies/pbac-tests.sh
test-decom: install-race
@echo "Running minio decom tests"
@env bash $(PWD)/docs/distributed/decom.sh
@env bash $(PWD)/docs/distributed/decom-encrypted.sh
@env bash $(PWD)/docs/distributed/decom-encrypted-sse-s3.sh
@env bash $(PWD)/docs/distributed/decom-compressed-sse-s3.sh
@env bash $(PWD)/docs/distributed/decom-encrypted-kes.sh
test-versioning: install-race
@echo "Running minio versioning tests"
@ -81,6 +90,10 @@ test-iam: build ## verify IAM (external IDP, etcd backends)
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue -v -run TestIAM* ./cmd
test-iam-ldap-upgrade-import: build ## verify IAM (external LDAP IDP)
@echo "Running upgrade tests for IAM (LDAP backend)"
@env bash $(PWD)/buildscripts/minio-iam-ldap-upgrade-import-test.sh
test-sio-error:
@(env bash $(PWD)/docs/bucket/replication/sio-error.sh)
@ -107,37 +120,39 @@ test-site-replication-oidc: install-race ## verify automatic site replication
test-site-replication-minio: install-race ## verify automatic site replication
@echo "Running tests for automatic site replication of IAM (with MinIO IDP)"
@(env bash $(PWD)/docs/site-replication/run-multi-site-minio-idp.sh)
@echo "Running tests for automatic site replication of SSE-C objects"
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication.sh)
@echo "Running tests for automatic site replication of SSE-C objects with SSE-KMS enabled for bucket"
@(env bash $(PWD)/docs/site-replication/run-sse-kms-object-replication.sh)
@echo "Running tests for automatic site replication of SSE-C objects with compression enabled for site"
@(env bash $(PWD)/docs/site-replication/run-ssec-object-replication-with-compression.sh)
verify: ## verify minio various setups
verify: install-race ## verify minio various setups
@echo "Verifying build with race"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-build.sh)
verify-healing: ## verify healing and replacing disks with minio binary
verify-healing: install-race ## verify healing and replacing disks with minio binary
@echo "Verify healing build with race"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-healing.sh)
@(env bash $(PWD)/buildscripts/verify-healing-empty-erasure-set.sh)
@(env bash $(PWD)/buildscripts/heal-inconsistent-versions.sh)
verify-healing-with-root-disks: ## verify healing root disks
verify-healing-with-root-disks: install-race ## verify healing root disks
@echo "Verify healing with root drives"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/verify-healing-with-root-disks.sh)
verify-healing-with-rewrite: ## verify healing to rewrite old xl.meta -> new xl.meta
verify-healing-with-rewrite: install-race ## verify healing to rewrite old xl.meta -> new xl.meta
@echo "Verify healing with rewrite"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/rewrite-old-new.sh)
verify-healing-inconsistent-versions: ## verify resolving inconsistent versions
verify-healing-inconsistent-versions: install-race ## verify resolving inconsistent versions
@echo "Verify resolving inconsistent versions build with race"
@GORACE=history_size=7 CGO_ENABLED=1 go build -race -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@(env bash $(PWD)/buildscripts/resolve-right-versions.sh)
build-debugging:
@(env bash $(PWD)/docs/debugging/build.sh)
build: checks ## builds minio to $(PWD)
build: checks build-debugging ## builds minio to $(PWD)
@echo "Building minio binary to './minio'"
@CGO_ENABLED=0 go build -tags kqueue -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@ -147,9 +162,9 @@ hotfix-vars:
$(eval VERSION := $(shell git describe --tags --abbrev=0).hotfix.$(shell git rev-parse --short HEAD))
hotfix: hotfix-vars clean install ## builds minio binary with hotfix tags
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.1/pkger_2.2.1_linux_amd64.deb
@wget -q -c https://github.com/minio/pkger/releases/download/v2.2.9/pkger_2.2.9_linux_amd64.deb
@wget -q -c https://raw.githubusercontent.com/minio/minio-service/v1.0.1/linux-systemd/distributed/minio.service
@sudo apt install ./pkger_2.2.1_linux_amd64.deb --yes
@sudo apt install ./pkger_2.2.9_linux_amd64.deb --yes
@mkdir -p minio-release/$(GOOS)-$(GOARCH)/archive
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio
@cp -af ./minio minio-release/$(GOOS)-$(GOARCH)/minio.$(VERSION)
@ -176,15 +191,15 @@ docker: build ## builds minio docker container
@echo "Building minio docker image '$(TAG)'"
@docker build -q --no-cache -t $(TAG) . -f Dockerfile
install-race: checks ## builds minio to $(PWD)
install-race: checks build-debugging ## builds minio to $(PWD)
@echo "Building minio binary with -race to './minio'"
@GORACE=history_size=7 CGO_ENABLED=1 go build -tags kqueue -race -trimpath --ldflags "$(LDFLAGS)" -o $(PWD)/minio 1>/dev/null
@echo "Installing minio binary with -race to '$(GOPATH)/bin/minio'"
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
install: build ## builds minio and installs it to $GOPATH/bin.
@echo "Installing minio binary to '$(GOPATH)/bin/minio'"
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/minio $(GOPATH)/bin/minio
@mkdir -p $(GOPATH)/bin && cp -af $(PWD)/minio $(GOPATH)/bin/minio
@echo "Installation successful. To learn more, try \"minio --help\"."
clean: ## cleanup all generated assets

View File

@ -210,10 +210,6 @@ For deployments behind a load balancer, proxy, or ingress rule where the MinIO h
For example, consider a MinIO deployment behind a proxy `https://minio.example.net`, `https://console.minio.example.net` with rules for forwarding traffic on port :9000 and :9001 to MinIO and the MinIO Console respectively on the internal network. Set `MINIO_BROWSER_REDIRECT_URL` to `https://console.minio.example.net` to ensure the browser receives a valid reachable URL.
Similarly, if your TLS certificates do not have the IP SAN for the MinIO server host, the MinIO Console may fail to validate the connection to the server. Use the `MINIO_SERVER_URL` environment variable and specify the proxy-accessible hostname of the MinIO server to allow the Console to use the MinIO server API using the TLS certificate.
For example: `export MINIO_SERVER_URL="https://minio.example.net"`
| Dashboard | Creating a bucket |
| ------------- | ------------- |
| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) |

View File

@ -32,6 +32,7 @@ fi
set +e
export MC_HOST_minioadm=http://minioadmin:minioadmin@localhost:9100/
./mc ready minioadm
./mc ls minioadm/
@ -56,7 +57,7 @@ done
set +e
sleep 10
./mc ready minioadm/
./mc ls minioadm/
if [ $? -ne 0 ]; then
@ -81,11 +82,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 20s
export MC_HOST_sitea=http://minioadmin:minioadmin@127.0.0.1:9001
export MC_HOST_siteb=http://minioadmin:minioadmin@127.0.0.1:9004
./mc ready sitea
./mc ready siteb
./mc admin replicate add sitea siteb
./mc admin user add sitea foobar foo12345
@ -109,11 +111,12 @@ minio server --address 127.0.0.1:9003 "http://127.0.0.1:9003/tmp/multisiteb/data
minio server --address 127.0.0.1:9004 "http://127.0.0.1:9003/tmp/multisiteb/data/disterasure/xl{1...4}" \
"http://127.0.0.1:9004/tmp/multisiteb/data/disterasure/xl{5...8}" >/tmp/siteb_2.log 2>&1 &
sleep 20s
export MC_HOST_sitea=http://foobar:foo12345@127.0.0.1:9001
export MC_HOST_siteb=http://foobar:foo12345@127.0.0.1:9004
./mc ready sitea
./mc ready siteb
./mc admin user add sitea foobar-admin foo12345
sleep 2s

View File

@ -0,0 +1,127 @@
#!/bin/bash
# This script is used to test the migration of IAM content from old minio
# instance to new minio instance.
#
# To run it locally, start the LDAP server in github.com/minio/minio-iam-testing
# repo (e.g. make podman-run), and then run this script.
#
# This script assumes that LDAP server is at:
#
# `localhost:1389`
#
# if this is not the case, set the environment variable
# `_MINIO_LDAP_TEST_SERVER`.
OLD_VERSION=RELEASE.2024-03-26T22-10-45Z
OLD_BINARY_LINK=https://dl.min.io/server/minio/release/linux-amd64/archive/minio.${OLD_VERSION}
__init__() {
if which curl &>/dev/null; then
echo "curl is already installed"
else
echo "Installing curl:"
sudo apt install curl -y
fi
export GOPATH=/tmp/gopath
export PATH="${PATH}":"${GOPATH}"/bin
if which mc &>/dev/null; then
echo "mc is already installed"
else
echo "Installing mc:"
go install github.com/minio/mc@latest
fi
if [ ! -x ./minio.${OLD_VERSION} ]; then
echo "Downloading minio.${OLD_VERSION} binary"
curl -o minio.${OLD_VERSION} ${OLD_BINARY_LINK}
chmod +x minio.${OLD_VERSION}
fi
if [ -z "$_MINIO_LDAP_TEST_SERVER" ]; then
export _MINIO_LDAP_TEST_SERVER=localhost:1389
echo "Using default LDAP endpoint: $_MINIO_LDAP_TEST_SERVER"
fi
rm -rf /tmp/data
}
create_iam_content_in_old_minio() {
echo "Creating IAM content in old minio instance."
MINIO_CI_CD=1 ./minio.${OLD_VERSION} server /tmp/data/{1...4} &
sleep 5
set -x
mc alias set old-minio http://localhost:9000 minioadmin minioadmin
mc ready old-minio
mc idp ldap add old-minio \
server_addr=localhost:1389 \
server_insecure=on \
lookup_bind_dn=cn=admin,dc=min,dc=io \
lookup_bind_password=admin \
user_dn_search_base_dn=dc=min,dc=io \
user_dn_search_filter="(uid=%s)" \
group_search_base_dn=ou=swengg,dc=min,dc=io \
group_search_filter="(&(objectclass=groupOfNames)(member=%d))"
mc admin service restart old-minio
mc idp ldap policy attach old-minio readwrite --user=UID=dillon,ou=people,ou=swengg,dc=min,dc=io
mc idp ldap policy attach old-minio readwrite --group=CN=project.c,ou=groups,ou=swengg,dc=min,dc=io
mc idp ldap policy entities old-minio
mc admin cluster iam export old-minio
set +x
mc admin service stop old-minio
}
import_iam_content_in_new_minio() {
echo "Importing IAM content in new minio instance."
# Assume current minio binary exists.
MINIO_CI_CD=1 ./minio server /tmp/data/{1...4} &
sleep 5
set -x
mc alias set new-minio http://localhost:9000 minioadmin minioadmin
echo "BEFORE IMPORT mappings:"
mc ready new-minio
mc idp ldap policy entities new-minio
mc admin cluster iam import new-minio ./old-minio-iam-info.zip
echo "AFTER IMPORT mappings:"
mc idp ldap policy entities new-minio
set +x
# mc admin service stop new-minio
}
verify_iam_content_in_new_minio() {
output=$(mc idp ldap policy entities new-minio --json)
groups=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .groups[]')
if [ "$groups" != "cn=project.c,ou=groups,ou=swengg,dc=min,dc=io" ]; then
echo "Failed to verify groups: $groups"
exit 1
fi
users=$(echo "$output" | jq -r '.result.policyMappings[] | select(.policy == "readwrite") | .users[]')
if [ "$users" != "uid=dillon,ou=people,ou=swengg,dc=min,dc=io" ]; then
echo "Failed to verify users: $users"
exit 1
fi
mc admin service stop new-minio
}
main() {
create_iam_content_in_old_minio
import_iam_content_in_new_minio
verify_iam_content_in_new_minio
}
(__init__ "$@" && main "$@")

View File

@ -55,6 +55,11 @@ __init__() {
go install github.com/minio/mc@latest
## this is needed because github actions don't have
## docker-compose on all runners
go install github.com/docker/compose/v2/cmd@latest
mv -v /tmp/gopath/bin/cmd /tmp/gopath/bin/docker-compose
TAG=minio/minio:dev make docker
MINIO_VERSION=RELEASE.2019-12-19T22-52-26Z docker-compose \

View File

@ -45,7 +45,8 @@ function verify_rewrite() {
"${MINIO_OLD[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
"${WORK_DIR}/mc" ready minio/
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
@ -77,7 +78,8 @@ function verify_rewrite() {
"${MINIO[@]}" --address ":$start_port" "${WORK_DIR}/xl{1...16}" >"${WORK_DIR}/server1.log" 2>&1 &
pid=$!
disown $pid
sleep 10
"${WORK_DIR}/mc" ready minio/
if ! ps -p ${pid} 1>&2 >/dev/null; then
echo "server1 log:"
@ -87,17 +89,12 @@ function verify_rewrite() {
exit 1
fi
(
cd ./docs/debugging/s3-check-md5
go install -v
)
if ! s3-check-md5 \
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \
-secret-key minio123 \
-endpoint http://127.0.0.1:${start_port}/ 2>&1 | grep INTACT; then
-endpoint "http://127.0.0.1:${start_port}/" 2>&1 | grep INTACT; then
echo "server1 log:"
cat "${WORK_DIR}/server1.log"
echo "FAILED"
@ -117,7 +114,7 @@ function verify_rewrite() {
go run ./buildscripts/heal-manual.go "127.0.0.1:${start_port}" "minio" "minio123"
sleep 1
if ! s3-check-md5 \
if ! ./s3-check-md5 \
-debug \
-versions \
-access-key minio \

View File

@ -15,13 +15,14 @@ WORK_DIR="$PWD/.verify-$RANDOM"
export MINT_MODE=core
export MINT_DATA_DIR="$WORK_DIR/data"
export SERVER_ENDPOINT="127.0.0.1:9000"
export MC_HOST_verify="http://minio:minio123@${SERVER_ENDPOINT}/"
export MC_HOST_verify_ipv6="http://minio:minio123@[::1]:9000/"
export ACCESS_KEY="minio"
export SECRET_KEY="minio123"
export ENABLE_HTTPS=0
export GO111MODULE=on
export GOGC=25
export ENABLE_ADMIN=1
export MINIO_CI_CD=1
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
@ -36,18 +37,21 @@ function start_minio_fs() {
export MINIO_ROOT_USER=$ACCESS_KEY
export MINIO_ROOT_PASSWORD=$SECRET_KEY
"${MINIO[@]}" server "${WORK_DIR}/fs-disk" >"$WORK_DIR/fs-minio.log" 2>&1 &
sleep 10
"${WORK_DIR}/mc" ready verify
}
function start_minio_erasure() {
"${MINIO[@]}" server "${WORK_DIR}/erasure-disk1" "${WORK_DIR}/erasure-disk2" "${WORK_DIR}/erasure-disk3" "${WORK_DIR}/erasure-disk4" >"$WORK_DIR/erasure-minio.log" 2>&1 &
sleep 15
"${WORK_DIR}/mc" ready verify
}
function start_minio_erasure_sets() {
export MINIO_ENDPOINTS="${WORK_DIR}/erasure-disk-sets{1...32}"
"${MINIO[@]}" server >"$WORK_DIR/erasure-minio-sets.log" 2>&1 &
sleep 15
"${WORK_DIR}/mc" ready verify
}
function start_minio_pool_erasure_sets() {
@ -57,7 +61,7 @@ function start_minio_pool_erasure_sets() {
"${MINIO[@]}" server --address ":9000" >"$WORK_DIR/pool-minio-9000.log" 2>&1 &
"${MINIO[@]}" server --address ":9001" >"$WORK_DIR/pool-minio-9001.log" 2>&1 &
sleep 40
"${WORK_DIR}/mc" ready verify
}
function start_minio_pool_erasure_sets_ipv6() {
@ -67,7 +71,7 @@ function start_minio_pool_erasure_sets_ipv6() {
"${MINIO[@]}" server --address="[::1]:9000" >"$WORK_DIR/pool-minio-ipv6-9000.log" 2>&1 &
"${MINIO[@]}" server --address="[::1]:9001" >"$WORK_DIR/pool-minio-ipv6-9001.log" 2>&1 &
sleep 40
"${WORK_DIR}/mc" ready verify_ipv6
}
function start_minio_dist_erasure() {
@ -78,7 +82,7 @@ function start_minio_dist_erasure() {
"${MINIO[@]}" server --address ":900${i}" >"$WORK_DIR/dist-minio-900${i}.log" 2>&1 &
done
sleep 40
"${WORK_DIR}/mc" ready verify
}
function run_test_fs() {
@ -222,7 +226,7 @@ function __init__() {
exit 1
fi
(cd "${MC_BUILD_DIR}" && go build -o "$WORK_DIR/mc")
(cd "${MC_BUILD_DIR}" && go build -o "${WORK_DIR}/mc")
# remove mc source.
purge "${MC_BUILD_DIR}"

View File

@ -0,0 +1,166 @@
#!/bin/bash -e
#
set -E
set -o pipefail
if [ ! -x "$PWD/minio" ]; then
echo "minio executable binary not found in current directory"
exit 1
fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
function start_minio_3_node() {
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
export MINIO_CI_CD=1
start_port=$1
args=""
for i in $(seq 1 3); do
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
done
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
pid1=$!
disown ${pid1}
"${MINIO[@]}" --address ":$((start_port + 2))" $args >"${WORK_DIR}/dist-minio-server2.log" 2>&1 &
pid2=$!
disown $pid2
"${MINIO[@]}" --address ":$((start_port + 3))" $args >"${WORK_DIR}/dist-minio-server3.log" 2>&1 &
pid3=$!
disown $pid3
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
/tmp/mc ready myminio
# Wait for all drives to be online and formatted
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].state | select(. != "ok")' | wc -l) -gt 0 ]; do sleep 1; done
# Wait for all drives to be healed
while [ $(/tmp/mc admin info --json myminio | jq '.info.servers[].drives[].healing | select(. != null) | select(. == true)' | wc -l) -gt 0 ]; do sleep 1; done
# Wait for Status: in MinIO output
while true; do
rv=$(check_online)
if [ "$rv" != "1" ]; then
# success
break
fi
# Check if we should retry
retry=$((retry + 1))
if [ $retry -le 20 ]; then
sleep 5
continue
fi
# Failure
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
pkill -9 minio
echo "FAILED"
purge "$WORK_DIR"
exit 1
done
if ! ps -p $pid1 1>&2 >/dev/null; then
echo "server1 log:"
cat "${WORK_DIR}/dist-minio-server1.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ps -p $pid2 1>&2 >/dev/null; then
echo "server2 log:"
cat "${WORK_DIR}/dist-minio-server2.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! ps -p $pid3 1>&2 >/dev/null; then
echo "server3 log:"
cat "${WORK_DIR}/dist-minio-server3.log"
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
if ! pkill minio; then
for i in $(seq 1 3); do
echo "server$i log:"
cat "${WORK_DIR}/dist-minio-server$i.log"
done
echo "FAILED"
purge "$WORK_DIR"
exit 1
fi
sleep 1
if pgrep minio; then
# forcibly killing, to proceed further properly.
if ! pkill -9 minio; then
echo "no minio process running anymore, proceed."
fi
fi
}
function check_online() {
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
fi
}
function purge() {
echo rm -rf "$1"
}
function __init__() {
echo "Initializing environment"
mkdir -p "$WORK_DIR"
mkdir -p "$MINIO_CONFIG_DIR"
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
if [ ! -f /tmp/mc ]; then
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x /tmp/mc
fi
}
function perform_test() {
start_minio_3_node $2
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
rm -rf ${WORK_DIR}/${1}/*/
set -x
start_minio_3_node $2
}
function main() {
# use same ports for all tests
start_port=$(shuf -i 10000-65000 -n 1)
perform_test "2" ${start_port}
perform_test "1" ${start_port}
perform_test "3" ${start_port}
}
(__init__ "$@" && main "$@")
rv=$?
purge "$WORK_DIR"
exit "$rv"

View File

@ -12,17 +12,26 @@ fi
WORK_DIR="$PWD/.verify-$RANDOM"
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
MINIO=("$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" server)
GOPATH=/tmp/gopath
function start_minio_3_node() {
for i in $(seq 1 3); do
rm "${WORK_DIR}/dist-minio-server$i.log"
done
export MINIO_ROOT_USER=minio
export MINIO_ROOT_PASSWORD=minio123
export MINIO_ERASURE_SET_DRIVE_COUNT=6
export MINIO_CI_CD=1
start_port=$2
first_time=$(find ${WORK_DIR}/ | grep format.json | wc -l)
start_port=$1
args=""
for i in $(seq 1 3); do
args="$args http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/1/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/2/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/3/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/4/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/5/ http://127.0.0.1:$((start_port + i))${WORK_DIR}/$i/6/"
for d in $(seq 1 3 5); do
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
d=$((d + 1))
args="$args http://127.0.0.1:$((start_port + 1))${WORK_DIR}/1/${d}/ http://127.0.0.1:$((start_port + 2))${WORK_DIR}/2/${d}/ http://127.0.0.1:$((start_port + 3))${WORK_DIR}/3/${d}/ "
done
"${MINIO[@]}" --address ":$((start_port + 1))" $args >"${WORK_DIR}/dist-minio-server1.log" 2>&1 &
@ -37,7 +46,11 @@ function start_minio_3_node() {
pid3=$!
disown $pid3
sleep "$1"
export MC_HOST_myminio="http://minio:minio123@127.0.0.1:$((start_port + 1))"
/tmp/mc ready myminio
[ ${first_time} -eq 0 ] && upload_objects
[ ${first_time} -ne 0 ] && sleep 120
if ! ps -p $pid1 1>&2 >/dev/null; then
echo "server1 log:"
@ -82,10 +95,23 @@ function start_minio_3_node() {
fi
}
function check_online() {
function check_heal() {
if ! grep -q 'Status:' ${WORK_DIR}/dist-minio-*.log; then
echo "1"
return 1
fi
for ((i = 0; i < 20; i++)); do
test -f ${WORK_DIR}/$1/1/.minio.sys/format.json
v1=$?
nextInES=$(($1 + 1)) && [ $nextInES -gt 3 ] && nextInES=1
foundFiles1=$(find ${WORK_DIR}/$1/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
foundFiles2=$(find ${WORK_DIR}/$nextInES/1/ | grep -v .minio.sys | grep xl.meta | wc -l)
test $foundFiles1 -eq $foundFiles2
v2=$?
[ $v1 == 0 -a $v2 == 0 ] && return 0
sleep 10
done
return 1
}
function purge() {
@ -99,20 +125,35 @@ function __init__() {
## version is purposefully set to '3' for minio to migrate configuration file
echo '{"version": "3", "credential": {"accessKey": "minio", "secretKey": "minio123"}, "region": "us-east-1"}' >"$MINIO_CONFIG_DIR/config.json"
if [ ! -f /tmp/mc ]; then
wget --quiet -O /tmp/mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
chmod +x /tmp/mc
fi
}
function upload_objects() {
/tmp/mc mb myminio/testbucket/
for ((i = 0; i < 20; i++)); do
echo "my content" | /tmp/mc pipe myminio/testbucket/file-$i
done
}
function perform_test() {
start_minio_3_node 120 $2
start_port=$2
start_minio_3_node $start_port
echo "Testing Distributed Erasure setup healing of drives"
echo "Remove the contents of the disks belonging to '${1}' erasure set"
echo "Remove the contents of the disks belonging to '${1}' node"
rm -rf ${WORK_DIR}/${1}/*/
set -x
start_minio_3_node 120 $2
start_minio_3_node $start_port
rv=$(check_online)
check_heal ${1}
rv=$?
if [ "$rv" == "1" ]; then
for i in $(seq 1 3); do
echo "server$i log:"

View File

@ -25,7 +25,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// Data types used for returning dummy access control

View File

@ -39,9 +39,8 @@ import (
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@ -99,7 +98,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
}
// Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, bucketMeta))
// Write success response.
writeSuccessResponseHeadersOnly(w)
@ -431,7 +430,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
case bucketNotificationConfig:
config, err := globalBucketMetadataSys.GetNotificationConfig(bucket)
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
@ -447,7 +446,7 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r *
if errors.Is(err, BucketLifecycleNotFound{Bucket: bucket}) {
continue
}
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponse(ctx, w, exportError(ctx, err, cfgFile, bucket), r.URL)
return
}
@ -736,7 +735,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
rpt.SetStatus(bucket, fileName, fmt.Errorf("An Object Lock configuration is present on this bucket, so the versioning state cannot be suspended."))
continue
}
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
if rcfg, _ := getReplicationConfig(ctx, bucket); rcfg != nil && v.Suspended() {
rpt.SetStatus(bucket, fileName, fmt.Errorf("A replication configuration is present on this bucket, so the versioning state cannot be suspended."))
continue
}
@ -784,7 +783,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
}
switch fileName {
case bucketNotificationConfig:
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region, globalEventNotifier.targetList)
config, err := event.ParseConfig(io.LimitReader(reader, sz), globalSite.Region(), globalEventNotifier.targetList)
if err != nil {
rpt.SetStatus(bucket, fileName, fmt.Errorf("%s (%s)", errorCodes[ErrMalformedXML].Description, err))
continue
@ -838,9 +837,13 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
rpt.SetStatus(bucket, fileName, err)
continue
}
rcfg, err := globalBucketObjectLockSys.Get(bucket)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
// Validate the received bucket policy document
if err = bucketLifecycle.Validate(); err != nil {
if err = bucketLifecycle.Validate(rcfg); err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
}
@ -875,8 +878,10 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
}
kmsKey := encConfig.KeyID()
if kmsKey != "" {
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: kmsKey,
AssociatedData: kms.Context{"MinIO admin API": "ServerInfoHandler"}, // Context for a test key operation
})
if err != nil {
if errors.Is(err, kes.ErrKeyNotFound) {
rpt.SetStatus(bucket, fileName, errKMSKeyNotFound)

View File

@ -27,7 +27,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/config"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// validateAdminReq will validate request against and return whether it is allowed.

View File

@ -37,7 +37,7 @@ import (
"github.com/minio/minio/internal/config/subnet"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// DelConfigKVHandler - DELETE /minio/admin/v3/del-config-kv
@ -58,7 +58,7 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
@ -162,7 +162,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
@ -443,7 +443,7 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
password := cred.SecretKey
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}

View File

@ -31,10 +31,9 @@ import (
"github.com/minio/minio/internal/config"
cfgldap "github.com/minio/minio/internal/config/identity/ldap"
"github.com/minio/minio/internal/config/identity/openid"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/ldap"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/ldap"
"github.com/minio/pkg/v3/policy"
)
func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, isUpdate bool) {
@ -60,7 +59,7 @@ func addOrUpdateIDPHandler(ctx context.Context, w http.ResponseWriter, r *http.R
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}

View File

@ -20,16 +20,15 @@ package cmd
import (
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
xldap "github.com/minio/pkg/v3/ldap"
"github.com/minio/pkg/v3/policy"
)
// ListLDAPPolicyMappingEntities lists users/groups mapped to given/all policies.
@ -105,6 +104,12 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
return
}
// fail if ldap is not enabled
if !globalIAMSys.LDAPConfig.Enabled() {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
return
}
if r.ContentLength > maxEConfigJSONSize || r.ContentLength == -1 {
// More than maxConfigSize bytes were available
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigTooLarge), r.URL)
@ -132,7 +137,7 @@ func (a adminAPIHandlers) AttachDetachPolicyLDAP(w http.ResponseWriter, r *http.
password := cred.SecretKey
reqBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
@ -192,7 +197,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
// fail if ldap is not enabled
if !globalIAMSys.LDAPConfig.Enabled() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errors.New("LDAP not enabled")), r.URL)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminLDAPNotEnabled), r.URL)
return
}
// Find the user for the request sender (as it may be sent via a service
@ -217,9 +223,8 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
err error
)
// If we are creating svc account for request sender, ensure
// that targetUser is a real user (i.e. not derived
// credentials).
// If we are creating svc account for request sender, ensure that targetUser
// is a real user (i.e. not derived credentials).
if isSvcAccForRequestor {
if requestorIsDerivedCredential {
if requestorParentUser == "" {
@ -232,12 +237,12 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
targetGroups = requestorGroups
// Deny if the target user is not LDAP
isLDAP, err := globalIAMSys.LDAPConfig.DoesUsernameExist(targetUser)
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(targetUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
if isLDAP == "" {
if foundResult == nil {
err := errors.New("Specified user does not exist on LDAP server")
APIErr := errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err)
writeErrorResponseJSON(ctx, w, APIErr, r.URL)
@ -253,20 +258,36 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
opts.claims[k] = v
}
} else {
isDN := globalIAMSys.LDAPConfig.IsLDAPUserDN(targetUser)
// We still need to ensure that the target user is a valid LDAP user.
//
// The target user may be supplied as a (short) username or a DN.
// However, for now, we only support using the short username.
isDN := globalIAMSys.LDAPConfig.ParsesAsDN(targetUser)
opts.claims[ldapUserN] = targetUser // simple username
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
var lookupResult *xldap.DNSearchResult
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
if err != nil {
// if not found, check if DN
if strings.Contains(err.Error(), "not found") && isDN {
// warn user that DNs are not allowed
err = fmt.Errorf("Must use short username to add service account. %w", err)
if strings.Contains(err.Error(), "User DN not found for:") {
if isDN {
// warn user that DNs are not allowed
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminLDAPExpectedLoginName, err), r.URL)
} else {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrAdminNoSuchUser, err), r.URL)
}
}
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
targetUser = lookupResult.NormDN
opts.claims[ldapUser] = targetUser // DN
opts.claims[ldapActualUser] = lookupResult.ActualDN
// Add LDAP attributes that were looked up into the claims.
for attribKey, attribValue := range lookupResult.Attributes {
opts.claims[ldapAttribPrefix+attribKey] = attribValue
}
}
newCred, updatedAt, err := globalIAMSys.NewServiceAccount(ctx, targetUser, targetGroups, opts)
@ -300,7 +321,7 @@ func (a adminAPIHandlers) AddServiceAccountLDAP(w http.ResponseWriter, r *http.R
// Call hook for cluster-replication if the service account is not for a
// root user.
if newCred.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{
Create: &madmin.SRSvcAccCreate{
@ -373,14 +394,16 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
}
}
targetAccount, err := globalIAMSys.LDAPConfig.DoesUsernameExist(userDN)
dnResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
} else if userDN == "" {
}
if dnResult == nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errNoSuchUser), r.URL)
return
}
targetAccount := dnResult.NormDN
listType := r.Form.Get("listType")
if listType != "sts-only" && listType != "svcacc-only" && listType != "" {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@ -18,6 +18,7 @@
package cmd
import (
"context"
"encoding/json"
"errors"
"fmt"
@ -25,9 +26,9 @@ import (
"strconv"
"strings"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/policy"
)
var (
@ -106,21 +107,12 @@ func (a adminAPIHandlers) StartDecommission(w http.ResponseWriter, r *http.Reque
poolIndices = append(poolIndices, idx)
}
if len(poolIndices) > 0 && !globalEndpoints[poolIndices[0]].Endpoints[0].IsLocal {
ep := globalEndpoints[poolIndices[0]].Endpoints[0]
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return
}
}
if len(poolIndices) == 0 || !proxyDecommissionRequest(ctx, globalEndpoints[poolIndices[0]].Endpoints[0], w, r) {
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
if err := z.Decommission(r.Context(), poolIndices...); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Request) {
@ -162,20 +154,12 @@ func (a adminAPIHandlers) CancelDecommission(w http.ResponseWriter, r *http.Requ
return
}
if ep := globalEndpoints[idx].Endpoints[0]; !ep.IsLocal {
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == ep.Host {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return
}
}
if !proxyDecommissionRequest(ctx, globalEndpoints[idx].Endpoints[0], w, r) {
if err := pools.DecommissionCancel(ctx, idx); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
if err := pools.DecommissionCancel(ctx, idx); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
@ -225,7 +209,7 @@ func (a adminAPIHandlers) StatusPool(w http.ResponseWriter, r *http.Request) {
return
}
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(&status))
adminLogIf(r.Context(), json.NewEncoder(w).Encode(&status))
}
func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
@ -258,7 +242,7 @@ func (a adminAPIHandlers) ListPools(w http.ResponseWriter, r *http.Request) {
poolsStatus[idx] = status
}
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
adminLogIf(r.Context(), json.NewEncoder(w).Encode(poolsStatus))
}
func (a adminAPIHandlers) RebalanceStart(w http.ResponseWriter, r *http.Request) {
@ -365,11 +349,11 @@ func (a adminAPIHandlers) RebalanceStatus(w http.ResponseWriter, r *http.Request
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminRebalanceNotStarted), r.URL)
return
}
logger.LogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
adminLogIf(ctx, fmt.Errorf("failed to fetch rebalance status: %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
logger.LogIf(r.Context(), json.NewEncoder(w).Encode(rs))
adminLogIf(r.Context(), json.NewEncoder(w).Encode(rs))
}
func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request) {
@ -389,5 +373,20 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
// Cancel any ongoing rebalance operation
globalNotificationSys.StopRebalance(r.Context())
writeSuccessResponseHeadersOnly(w)
logger.LogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
}
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
host := env.Get("_MINIO_DECOM_ENDPOINT_HOST", defaultEndPoint.Host)
if host == "" {
return
}
for nodeIdx, proxyEp := range globalProxyEndpoints {
if proxyEp.Endpoint.Host == host && !proxyEp.IsLocal {
if proxyRequestByNodeIndex(ctx, w, r, nodeIdx) {
return true
}
}
}
return
}

View File

@ -32,9 +32,8 @@ import (
"github.com/dustin/go-humanize"
"github.com/minio/madmin-go/v3"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// SiteReplicationAdd - PUT /minio/admin/v3/site-replication/add
@ -55,7 +54,7 @@ func (a adminAPIHandlers) SiteReplicationAdd(w http.ResponseWriter, r *http.Requ
opts := getSRAddOptions(r)
status, err := globalSiteReplicationSys.AddPeerClusters(ctx, sites, opts)
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -93,7 +92,7 @@ func (a adminAPIHandlers) SRPeerJoin(w http.ResponseWriter, r *http.Request) {
}
if err := globalSiteReplicationSys.PeerJoinReq(ctx, joinArg); err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -140,7 +139,7 @@ func (a adminAPIHandlers) SRPeerBucketOps(w http.ResponseWriter, r *http.Request
globalSiteReplicationSys.purgeDeletedBucket(ctx, objectAPI, bucket)
}
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -192,7 +191,7 @@ func (a adminAPIHandlers) SRPeerReplicateIAMItem(w http.ResponseWriter, r *http.
err = globalSiteReplicationSys.PeerGroupInfoChangeHandler(ctx, item.GroupInfo, item.UpdatedAt)
}
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -263,7 +262,7 @@ func (a adminAPIHandlers) SRPeerReplicateBucketItem(w http.ResponseWriter, r *ht
err = globalSiteReplicationSys.PeerBucketLCConfigHandler(ctx, item.Bucket, item.ExpiryLCConfig, item.UpdatedAt)
}
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -316,7 +315,6 @@ func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptio
if encryptionKey != "" {
data, err = madmin.DecryptData(encryptionKey, bytes.NewReader(data))
if err != nil {
logger.LogIf(ctx, err)
return SRError{
Cause: err,
Code: ErrSiteReplicationInvalidRequest,
@ -349,6 +347,18 @@ func (a adminAPIHandlers) SiteReplicationStatus(w http.ResponseWriter, r *http.R
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Report the ILMExpiryStats only if at least one site has replication of ILM expiry enabled
var replicateILMExpiry bool
for _, site := range info.Sites {
if site.ReplicateILMExpiry {
replicateILMExpiry = true
break
}
}
if !replicateILMExpiry {
// explicitly send nil for ILMExpiryStats
info.ILMExpiryStats = nil
}
if err = json.NewEncoder(w).Encode(info); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
@ -396,7 +406,7 @@ func (a adminAPIHandlers) SiteReplicationEdit(w http.ResponseWriter, r *http.Req
opts := getSREditOptions(r)
status, err := globalSiteReplicationSys.EditPeerCluster(ctx, site, opts)
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -433,7 +443,7 @@ func (a adminAPIHandlers) SRPeerEdit(w http.ResponseWriter, r *http.Request) {
}
if err := globalSiteReplicationSys.PeerEditReq(ctx, pi); err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -456,7 +466,7 @@ func (a adminAPIHandlers) SRStateEdit(w http.ResponseWriter, r *http.Request) {
return
}
if err := globalSiteReplicationSys.PeerStateEditReq(ctx, state); err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -493,7 +503,7 @@ func (a adminAPIHandlers) SiteReplicationRemove(w http.ResponseWriter, r *http.R
}
status, err := globalSiteReplicationSys.RemovePeerCluster(ctx, objectAPI, rreq)
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -524,7 +534,7 @@ func (a adminAPIHandlers) SRPeerRemove(w http.ResponseWriter, r *http.Request) {
}
if err := globalSiteReplicationSys.InternalRemoveReq(ctx, objectAPI, req); err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -586,7 +596,7 @@ func (a adminAPIHandlers) SiteReplicationDevNull(w http.ResponseWriter, r *http.
// If there is a disconnection before globalNetPerfMinDuration (we give a margin of error of 1 sec)
// would mean the network is not stable. Logging here will help in debugging network issues.
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
}
}
if err != nil {
@ -609,5 +619,5 @@ func (a adminAPIHandlers) SiteReplicationNetPerf(w http.ResponseWriter, r *http.
duration = globalNetPerfMinDuration
}
result := siteNetperf(r.Context(), duration)
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
adminLogIf(r.Context(), gob.NewEncoder(w).Encode(result))
}

View File

@ -32,7 +32,7 @@ import (
"github.com/minio/madmin-go/v3"
minio "github.com/minio/minio-go/v7"
"github.com/minio/pkg/v2/sync/errgroup"
"github.com/minio/pkg/v3/sync/errgroup"
)
func runAllIAMConcurrencyTests(suite *TestSuiteIAM, c *check) {

View File

@ -29,15 +29,17 @@ import (
"sort"
"strconv"
"time"
"unicode/utf8"
"github.com/klauspost/compress/zip"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/cachevalue"
"github.com/minio/minio/internal/config/dns"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
xldap "github.com/minio/pkg/v3/ldap"
"github.com/minio/pkg/v3/policy"
"github.com/puzpuzpuz/xsync/v3"
)
// RemoveUser - DELETE /minio/admin/v3/remove-user?accessKey=<access_key>
@ -74,7 +76,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
return
}
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemIAMUser,
IAMUser: &madmin.SRIAMUser{
AccessKey: accessKey,
@ -271,14 +273,21 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
return
}
}
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
if globalIAMSys.LDAPConfig.Enabled() {
// We don't allow internal group manipulation in this API when LDAP
// is enabled for now.
err = errIAMActionNotAllowed
} else {
updatedAt, err = globalIAMSys.AddUsersToGroup(ctx, updReq.Group, updReq.Members)
}
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemGroupInfo,
GroupInfo: &madmin.SRGroupInfo{
UpdateReq: updReq,
@ -368,7 +377,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
return
}
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemGroupInfo,
GroupInfo: &madmin.SRGroupInfo{
UpdateReq: madmin.GroupAddRemove{
@ -406,7 +415,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
return
}
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemIAMUser,
IAMUser: &madmin.SRIAMUser{
AccessKey: accessKey,
@ -466,6 +475,11 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
return
}
if !utf8.ValidString(accessKey) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAddUserValidUTF), r.URL)
return
}
checkDenyOnly := false
if accessKey == cred.AccessKey {
// Check that there is no explicit deny - otherwise it's allowed
@ -495,25 +509,31 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
password := cred.SecretKey
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
var ureq madmin.AddOrUpdateUserReq
if err = json.Unmarshal(configBytes, &ureq); err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
return
}
// We don't allow internal user creation with LDAP enabled for now.
if globalIAMSys.LDAPConfig.Enabled() {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, errIAMActionNotAllowed), r.URL)
return
}
updatedAt, err := globalIAMSys.CreateUser(ctx, accessKey, ureq)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemIAMUser,
IAMUser: &madmin.SRIAMUser{
AccessKey: accessKey,
@ -687,12 +707,20 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
// In case of LDAP we need to resolve the targetUser to a DN and
// query their groups:
opts.claims[ldapUserN] = targetUser // simple username
targetUser, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
var lookupResult *xldap.DNSearchResult
lookupResult, targetGroups, err = globalIAMSys.LDAPConfig.LookupUserDN(targetUser)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
targetUser = lookupResult.NormDN
opts.claims[ldapUser] = targetUser // username DN
opts.claims[ldapActualUser] = lookupResult.ActualDN
// Add LDAP attributes that were looked up into the claims.
for attribKey, attribValue := range lookupResult.Attributes {
opts.claims[ldapAttribPrefix+attribKey] = attribValue
}
// NOTE: if not using LDAP, then internal IDP or open ID is
// being used - in the former, group info is enforced when
@ -731,7 +759,7 @@ func (a adminAPIHandlers) AddServiceAccount(w http.ResponseWriter, r *http.Reque
// Call hook for cluster-replication if the service account is not for a
// root user.
if newCred.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{
Create: &madmin.SRSvcAccCreate{
@ -802,7 +830,11 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
}
condValues := getConditionValues(r, "", cred)
addExpirationToCondValues(updateReq.NewExpiration, condValues)
err = addExpirationToCondValues(updateReq.NewExpiration, condValues)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Permission checks:
//
@ -853,7 +885,7 @@ func (a adminAPIHandlers) UpdateServiceAccount(w http.ResponseWriter, r *http.Re
// Call site replication hook - non-root user accounts are replicated.
if svcAccount.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{
Update: &madmin.SRSvcAccUpdate{
@ -1026,8 +1058,13 @@ func (a adminAPIHandlers) ListServiceAccounts(w http.ResponseWriter, r *http.Req
for _, svc := range serviceAccounts {
expiryTime := svc.Expiration
serviceAccountList = append(serviceAccountList, madmin.ServiceAccountInfo{
AccessKey: svc.AccessKey,
Expiration: &expiryTime,
Description: svc.Description,
ParentUser: svc.ParentUser,
Name: svc.Name,
AccountStatus: svc.Status,
AccessKey: svc.AccessKey,
ImpliedPolicy: svc.IsImpliedPolicy(),
Expiration: &expiryTime,
})
}
@ -1115,7 +1152,7 @@ func (a adminAPIHandlers) DeleteServiceAccount(w http.ResponseWriter, r *http.Re
// Call site replication hook - non-root user accounts are replicated.
if svcAccount.ParentUser != "" && svcAccount.ParentUser != globalActiveCred.AccessKey {
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemSvcAcc,
SvcAccChange: &madmin.SRSvcAccChange{
Delete: &madmin.SRSvcAccDelete{
@ -1199,9 +1236,9 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
}
bucketStorageCache.InitOnce(10*time.Second,
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
func() (DataUsageInfo, error) {
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
cachevalue.Opts{ReturnLastGood: true},
func(ctx context.Context) (DataUsageInfo, error) {
ctx, done := context.WithTimeout(ctx, 2*time.Second)
defer done()
return loadDataUsageFromBackend(ctx, objectAPI)
@ -1273,7 +1310,7 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ
default:
policies, err := globalIAMSys.PolicyDBGet(accountName, cred.Groups...)
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -1425,7 +1462,7 @@ func (a adminAPIHandlers) ListBucketPolicies(w http.ResponseWriter, r *http.Requ
for name, p := range policies {
_, err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
continue
}
newPolicies[name] = p
@ -1455,7 +1492,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
for name, p := range policies {
_, err = json.Marshal(p)
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
continue
}
newPolicies[name] = p
@ -1485,7 +1522,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
// Call cluster-replication policy creation hook to replicate policy deletion to
// other minio clusters.
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicy,
Name: policyName,
UpdatedAt: UTCNow(),
@ -1548,7 +1585,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
// Call cluster-replication policy creation hook to replicate policy to
// other minio clusters.
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicy,
Name: policyName,
Policy: iamPolicyBytes,
@ -1556,7 +1593,12 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
}))
}
// SetPolicyForUserOrGroup - PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
// SetPolicyForUserOrGroup - sets a policy on a user or a group.
//
// PUT /minio/admin/v3/set-policy?policy=xxx&user-or-group=?[&is-group]
//
// Deprecated: This API is replaced by attach/detach policy APIs for specific
// type of users (builtin or LDAP).
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -1608,6 +1650,32 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
userType := regUser
if globalIAMSys.GetUsersSysType() == LDAPUsersSysType {
userType = stsUser
// Validate that the user or group exists in LDAP and use the normalized
// form of the entityName (which will be an LDAP DN).
var err error
if isGroup {
var foundGroupDN *xldap.DNSearchResult
var underBaseDN bool
if foundGroupDN, underBaseDN, err = globalIAMSys.LDAPConfig.GetValidatedGroupDN(nil, entityName); err != nil {
iamLogIf(ctx, err)
} else if foundGroupDN == nil || !underBaseDN {
err = errNoSuchGroup
}
entityName = foundGroupDN.NormDN
} else {
var foundUserDN *xldap.DNSearchResult
if foundUserDN, err = globalIAMSys.LDAPConfig.GetValidatedDNForUsername(entityName); err != nil {
iamLogIf(ctx, err)
} else if foundUserDN == nil {
err = errNoSuchUser
}
entityName = foundUserDN.NormDN
}
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
}
updatedAt, err := globalIAMSys.PolicyDBSet(ctx, entityName, policyName, userType, isGroup)
@ -1616,7 +1684,7 @@ func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http
return
}
logger.LogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
replLogIf(ctx, globalSiteReplicationSys.IAMChangeHook(ctx, madmin.SRIAMItem{
Type: madmin.SRIAMItemPolicyMapping,
PolicyMapping: &madmin.SRPolicyMapping{
UserOrGroup: entityName,
@ -1756,17 +1824,27 @@ func (a adminAPIHandlers) AttachDetachPolicyBuiltin(w http.ResponseWriter, r *ht
}
const (
allPoliciesFile = "policies.json"
allUsersFile = "users.json"
allGroupsFile = "groups.json"
allSvcAcctsFile = "svcaccts.json"
userPolicyMappingsFile = "user_mappings.json"
groupPolicyMappingsFile = "group_mappings.json"
stsUserPolicyMappingsFile = "stsuser_mappings.json"
stsGroupPolicyMappingsFile = "stsgroup_mappings.json"
iamAssetsDir = "iam-assets"
allPoliciesFile = "policies.json"
allUsersFile = "users.json"
allGroupsFile = "groups.json"
allSvcAcctsFile = "svcaccts.json"
userPolicyMappingsFile = "user_mappings.json"
groupPolicyMappingsFile = "group_mappings.json"
stsUserPolicyMappingsFile = "stsuser_mappings.json"
iamAssetsDir = "iam-assets"
)
var iamExportFiles = []string{
allPoliciesFile,
allUsersFile,
allGroupsFile,
allSvcAcctsFile,
userPolicyMappingsFile,
groupPolicyMappingsFile,
stsUserPolicyMappingsFile,
}
// ExportIAMHandler - exports all iam info as a zipped file
func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
@ -1790,38 +1868,28 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
sys: nil,
})
if zerr != nil {
logger.LogIf(ctx, zerr)
adminLogIf(ctx, zerr)
return nil
}
header.Method = zip.Deflate
zwriter, zerr := zipWriter.CreateHeader(header)
if zerr != nil {
logger.LogIf(ctx, zerr)
adminLogIf(ctx, zerr)
return nil
}
if _, err := io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
}
return nil
}
iamFiles := []string{
allPoliciesFile,
allUsersFile,
allGroupsFile,
allSvcAcctsFile,
userPolicyMappingsFile,
groupPolicyMappingsFile,
stsUserPolicyMappingsFile,
stsGroupPolicyMappingsFile,
}
for _, f := range iamFiles {
for _, f := range iamExportFiles {
iamFile := pathJoin(iamAssetsDir, f)
switch f {
case allPoliciesFile:
allPolicies, err := globalIAMSys.ListPolicies(ctx, "")
if err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
@ -1900,7 +1968,7 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
_, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
sa, policy, err := globalIAMSys.GetServiceAccount(ctx, acc.Credentials.AccessKey)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
@ -1922,6 +1990,9 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
Claims: claims,
SessionPolicy: json.RawMessage(policyJSON),
Status: acc.Credentials.Status,
Name: sa.Name,
Description: sa.Description,
Expiration: &sa.Expiration,
}
}
@ -1936,13 +2007,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
return
}
case userPolicyMappingsFile:
userPolicyMap := make(map[string]MappedPolicy)
userPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
err := globalIAMSys.store.loadMappedPolicies(ctx, regUser, false, userPolicyMap)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
userPolData, err := json.Marshal(userPolicyMap)
userPolData, err := json.Marshal(mappedPoliciesToMap(userPolicyMap))
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
@ -1953,13 +2024,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
return
}
case groupPolicyMappingsFile:
groupPolicyMap := make(map[string]MappedPolicy)
groupPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
err := globalIAMSys.store.loadMappedPolicies(ctx, regUser, true, groupPolicyMap)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
grpPolData, err := json.Marshal(groupPolicyMap)
grpPolData, err := json.Marshal(mappedPoliciesToMap(groupPolicyMap))
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
@ -1970,13 +2041,13 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
return
}
case stsUserPolicyMappingsFile:
userPolicyMap := make(map[string]MappedPolicy)
userPolicyMap := xsync.NewMapOf[string, MappedPolicy]()
err := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, false, userPolicyMap)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
userPolData, err := json.Marshal(userPolicyMap)
userPolData, err := json.Marshal(mappedPoliciesToMap(userPolicyMap))
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
@ -1985,22 +2056,6 @@ func (a adminAPIHandlers) ExportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
case stsGroupPolicyMappingsFile:
groupPolicyMap := make(map[string]MappedPolicy)
err := globalIAMSys.store.loadMappedPolicies(ctx, stsUser, true, groupPolicyMap)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
grpPolData, err := json.Marshal(groupPolicyMap)
if err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
if err = rawDataFn(bytes.NewReader(grpPolData), iamFile, len(grpPolData)); err != nil {
writeErrorResponse(ctx, w, exportError(ctx, err, iamFile, ""), r.URL)
return
}
}
}
}
@ -2170,12 +2225,12 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
// If group does not exist, then check if the group has beginning and end space characters
// we will reject such group names.
if errors.Is(gerr, errNoSuchGroup) && hasSpaceBE(group) {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, err, allGroupsFile, group), r.URL)
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminResourceInvalidArgument, gerr, allGroupsFile, group), r.URL)
return
}
}
if _, gerr := globalIAMSys.AddUsersToGroup(ctx, group, grpInfo.Members); gerr != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allGroupsFile, group), r.URL)
writeErrorResponseJSON(ctx, w, importError(ctx, gerr, allGroupsFile, group), r.URL)
return
}
}
@ -2202,6 +2257,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, allSvcAcctsFile, ""), r.URL)
return
}
// Validations for LDAP enabled deployments.
if globalIAMSys.LDAPConfig.Enabled() {
err := globalIAMSys.NormalizeLDAPAccessKeypairs(ctx, serviceAcctReqs)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, ""), r.URL)
return
}
}
for user, svcAcctReq := range serviceAcctReqs {
var sp *policy.Policy
var err error
@ -2212,7 +2277,8 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
return
}
}
// service account access key cannot have space characters beginning and end of the string.
// service account access key cannot have space characters
// beginning and end of the string.
if hasSpaceBE(svcAcctReq.AccessKey) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument), r.URL)
return
@ -2238,20 +2304,14 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
updateReq = false
}
if updateReq {
opts := updateServiceAccountOpts{
secretKey: svcAcctReq.SecretKey,
status: svcAcctReq.Status,
name: svcAcctReq.Name,
description: svcAcctReq.Description,
expiration: svcAcctReq.Expiration,
sessionPolicy: sp,
}
_, err = globalIAMSys.UpdateServiceAccount(ctx, svcAcctReq.AccessKey, opts)
// If the service account exists, we remove it to ensure a
// clean import.
err := globalIAMSys.DeleteServiceAccount(ctx, svcAcctReq.AccessKey, true)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
delErr := fmt.Errorf("failed to delete existing service account(%s) before importing it: %w", svcAcctReq.AccessKey, err)
writeErrorResponseJSON(ctx, w, importError(ctx, delErr, allSvcAcctsFile, user), r.URL)
return
}
continue
}
opts := newServiceAccountOpts{
accessKey: user,
@ -2264,18 +2324,6 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
allowSiteReplicatorAccount: false,
}
// In case of LDAP we need to resolve the targetUser to a DN and
// query their groups:
if globalIAMSys.LDAPConfig.Enabled() {
opts.claims[ldapUserN] = svcAcctReq.AccessKey // simple username
targetUser, _, err := globalIAMSys.LDAPConfig.LookupUserDN(svcAcctReq.AccessKey)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
return
}
opts.claims[ldapUser] = targetUser // username DN
}
if _, _, err = globalIAMSys.NewServiceAccount(ctx, svcAcctReq.Parent, svcAcctReq.Groups, opts); err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, allSvcAcctsFile, user), r.URL)
return
@ -2344,6 +2392,17 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, groupPolicyMappingsFile, ""), r.URL)
return
}
// Validations for LDAP enabled deployments.
if globalIAMSys.LDAPConfig.Enabled() {
isGroup := true
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, isGroup, grpPolicyMap)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, ""), r.URL)
return
}
}
for g, pm := range grpPolicyMap {
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, groupPolicyMappingsFile, g), r.URL)
@ -2373,6 +2432,16 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsUserPolicyMappingsFile, ""), r.URL)
return
}
// Validations for LDAP enabled deployments.
if globalIAMSys.LDAPConfig.Enabled() {
isGroup := true
err := globalIAMSys.NormalizeLDAPMappingImport(ctx, !isGroup, userPolicyMap)
if err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, ""), r.URL)
return
}
}
for u, pm := range userPolicyMap {
// disallow setting policy mapping if user is a temporary user
ok, _, err := globalIAMSys.IsTempUser(u)
@ -2384,6 +2453,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, importError(ctx, errIAMActionNotAllowed, stsUserPolicyMappingsFile, u), r.URL)
return
}
if _, err := globalIAMSys.PolicyDBSet(ctx, u, pm.Policies, stsUser, false); err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsUserPolicyMappingsFile, u), r.URL)
return
@ -2391,42 +2461,18 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
}
}
}
// import sts group policy mappings
{
f, err := zr.Open(pathJoin(iamAssetsDir, stsGroupPolicyMappingsFile))
switch {
case errors.Is(err, os.ErrNotExist):
case err != nil:
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL)
return
default:
defer f.Close()
var grpPolicyMap map[string]MappedPolicy
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL)
return
}
if err = json.Unmarshal(data, &grpPolicyMap); err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrAdminConfigBadJSON, err, stsGroupPolicyMappingsFile, ""), r.URL)
return
}
for g, pm := range grpPolicyMap {
if _, err := globalIAMSys.PolicyDBSet(ctx, g, pm.Policies, unknownIAMUserType, true); err != nil {
writeErrorResponseJSON(ctx, w, importError(ctx, err, stsGroupPolicyMappingsFile, g), r.URL)
return
}
}
}
}
}
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) {
if exp == nil {
return
func addExpirationToCondValues(exp *time.Time, condValues map[string][]string) error {
if exp == nil || exp.IsZero() || exp.Equal(timeSentinel) {
return nil
}
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(exp.Sub(time.Now()).Seconds()), 10)}
dur := exp.Sub(time.Now())
if dur <= 0 {
return errors.New("unsupported expiration time")
}
condValues["DurationSeconds"] = []string{strconv.FormatInt(int64(dur.Seconds()), 10)}
return nil
}
func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials, newServiceAccountOpts, madmin.AddServiceAccountReq, string, APIError) {
@ -2454,6 +2500,12 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErrWithErr(ErrAdminConfigBadJSON, err)
}
if createReq.Expiration != nil && !createReq.Expiration.IsZero() {
// truncate expiration at the second.
truncateTime := createReq.Expiration.Truncate(time.Second)
createReq.Expiration = &truncateTime
}
// service account access key cannot have space characters beginning and end of the string.
if hasSpaceBE(createReq.AccessKey) {
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", errorCodes.ToAPIErr(ErrAdminResourceInvalidArgument)
@ -2485,7 +2537,10 @@ func commonAddServiceAccount(r *http.Request) (context.Context, auth.Credentials
}
condValues := getConditionValues(r, "", cred)
addExpirationToCondValues(createReq.Expiration, condValues)
err = addExpirationToCondValues(createReq.Expiration, condValues)
if err != nil {
return ctx, auth.Credentials{}, newServiceAccountOpts{}, madmin.AddServiceAccountReq{}, "", toAdminAPIErr(ctx, err)
}
// Check if action is allowed if creating access key for another user
// Check if action is explicitly denied if for self

View File

@ -39,7 +39,7 @@ import (
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/signer"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
const (

View File

@ -36,6 +36,7 @@ import (
"net/url"
"os"
"path"
"path/filepath"
"regexp"
"runtime"
"sort"
@ -49,6 +50,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/madmin-go/v3/estream"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/dsync"
"github.com/minio/minio/internal/grid"
"github.com/minio/minio/internal/handlers"
@ -57,9 +59,9 @@ import (
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/logger/message/log"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/logger/message/log"
xnet "github.com/minio/pkg/v3/net"
"github.com/minio/pkg/v3/policy"
"github.com/secure-io/sio-go"
"github.com/zeebo/xxh3"
)
@ -130,7 +132,7 @@ func (a adminAPIHandlers) ServerUpdateV2Handler(w http.ResponseWriter, r *http.R
// Download Binary Once
binC, bin, err := downloadBinary(u, mode)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -354,7 +356,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
// Download Binary Once
binC, bin, err := downloadBinary(u, mode)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -368,7 +370,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
StatusCode: http.StatusInternalServerError,
}
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -376,7 +378,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
err = verifyBinary(u, sha256Sum, releaseInfo, mode, bytes.NewReader(bin))
if err != nil {
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -389,7 +391,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
StatusCode: http.StatusInternalServerError,
}
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -397,7 +399,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
err = commitBinary()
if err != nil {
logger.LogIf(ctx, fmt.Errorf("server update failed with %w", err))
adminLogIf(ctx, fmt.Errorf("server update failed with %w", err))
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -420,7 +422,7 @@ func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Req
for _, nerr := range globalNotificationSys.SignalService(serviceRestart) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
adminLogIf(ctx, nerr.Err)
}
}
@ -451,7 +453,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
case madmin.ServiceActionUnfreeze:
serviceSig = serviceUnFreeze
default:
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
return
}
@ -473,7 +475,7 @@ func (a adminAPIHandlers) ServiceHandler(w http.ResponseWriter, r *http.Request)
for _, nerr := range globalNotificationSys.SignalService(serviceSig) {
if nerr.Err != nil {
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
logger.LogIf(ctx, nerr.Err)
adminLogIf(ctx, nerr.Err)
}
}
@ -534,7 +536,7 @@ func (a adminAPIHandlers) ServiceV2Handler(w http.ResponseWriter, r *http.Reques
case madmin.ServiceActionUnfreeze:
serviceSig = serviceUnFreeze
default:
logger.LogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
adminLogIf(ctx, fmt.Errorf("Unrecognized service action %s requested", action), logger.ErrorKind)
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
return
}
@ -1239,7 +1241,7 @@ func extractHealInitParams(vars map[string]string, qParams url.Values, r io.Read
if hip.clientToken == "" {
jerr := json.NewDecoder(r).Decode(&hip.hs)
if jerr != nil {
logger.LogIf(GlobalContext, jerr, logger.ErrorKind)
adminLogIf(GlobalContext, jerr, logger.ErrorKind)
err = ErrRequestBodyParse
return
}
@ -1429,11 +1431,11 @@ func getAggregatedBackgroundHealState(ctx context.Context, o ObjectLayer) (madmi
if globalIsDistErasure {
// Get heal status from other peers
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus()
peersHealStates, nerrs := globalNotificationSys.BackgroundHealStatus(ctx)
var errCount int
for _, nerr := range nerrs {
if nerr.Err != nil {
logger.LogIf(ctx, nerr.Err)
adminLogIf(ctx, nerr.Err)
errCount++
}
}
@ -1561,7 +1563,7 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
// would mean the network is not stable. Logging here will help in debugging network issues.
if time.Since(connectTime) < (globalNetPerfMinDuration - time.Second) {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
}
}
totalRx += n
@ -1575,7 +1577,8 @@ func (a adminAPIHandlers) ClientDevNull(w http.ResponseWriter, r *http.Request)
// NetperfHandler - perform mesh style network throughput test
func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
if objectAPI == nil {
@ -1596,6 +1599,15 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
ctx = lkctx.Context()
defer nsLock.Unlock(lkctx)
// Freeze all incoming S3 API calls before running speedtest.
globalNotificationSys.ServiceFreeze(ctx, true)
// Unfreeze as soon as request context is canceled or when the function returns.
go func() {
<-ctx.Done()
globalNotificationSys.ServiceFreeze(ctx, false)
}()
durationStr := r.Form.Get(peerRESTDuration)
duration, err := time.ParseDuration(durationStr)
if err != nil {
@ -1616,18 +1628,73 @@ func (a adminAPIHandlers) NetperfHandler(w http.ResponseWriter, r *http.Request)
}
}
func isAllowedRWAccess(r *http.Request, cred auth.Credentials, bucketName string) (rd, wr bool) {
owner := cred.AccessKey == globalActiveCred.AccessKey
// Set prefix value for "s3:prefix" policy conditionals.
r.Header.Set("prefix", "")
// Set delimiter value for "s3:delimiter" policy conditionals.
r.Header.Set("delimiter", SlashSeparator)
isAllowedAccess := func(bucketName string) (rd, wr bool) {
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.GetObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
ObjectName: "",
Claims: cred.Claims,
}) {
rd = true
}
if globalIAMSys.IsAllowed(policy.Args{
AccountName: cred.AccessKey,
Groups: cred.Groups,
Action: policy.PutObjectAction,
BucketName: bucketName,
ConditionValues: getConditionValues(r, "", cred),
IsOwner: owner,
ObjectName: "",
Claims: cred.Claims,
}) {
wr = true
}
return rd, wr
}
return isAllowedAccess(bucketName)
}
// ObjectSpeedTestHandler - reports maximum speed of a cluster by performing PUT and
// GET operations on the server, supports auto tuning by default by automatically
// increasing concurrency and stopping when we have reached the limits on the
// system.
func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
objectAPI, creds := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
if objectAPI == nil {
return
}
if !globalAPIConfig.permitRootAccess() {
rd, wr := isAllowedRWAccess(r, creds, globalObjectPerfBucket)
if !rd || !wr {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
Code: "XMinioSpeedtestInsufficientPermissions",
Message: fmt.Sprintf("%s does not have read and write access to '%s' bucket", creds.AccessKey,
globalObjectPerfBucket),
StatusCode: http.StatusForbidden,
}), r.URL)
return
}
}
sizeStr := r.Form.Get(peerRESTSize)
durationStr := r.Form.Get(peerRESTDuration)
concurrentStr := r.Form.Get(peerRESTConcurrent)
@ -1636,6 +1703,7 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
autotune := r.Form.Get("autotune") == "true"
noClear := r.Form.Get("noclear") == "true"
enableSha256 := r.Form.Get("enableSha256") == "true"
enableMultipart := r.Form.Get("enableMultipart") == "true"
size, err := strconv.Atoi(sizeStr)
if err != nil {
@ -1691,8 +1759,11 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
// Freeze all incoming S3 API calls before running speedtest.
globalNotificationSys.ServiceFreeze(ctx, true)
// unfreeze all incoming S3 API calls after speedtest.
defer globalNotificationSys.ServiceFreeze(ctx, false)
// Unfreeze as soon as request context is canceled or when the function returns.
go func() {
<-ctx.Done()
globalNotificationSys.ServiceFreeze(ctx, false)
}()
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
@ -1706,6 +1777,8 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
storageClass: storageClass,
bucketName: customBucket,
enableSha256: enableSha256,
enableMultipart: enableMultipart,
creds: creds,
})
var prevResult madmin.SpeedTestResult
for {
@ -1792,7 +1865,8 @@ func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo,
// DriveSpeedtestHandler - reports throughput of drives available in the cluster
func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.HealthInfoAdminAction)
if objectAPI == nil {
@ -1802,8 +1876,11 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R
// Freeze all incoming S3 API calls before running speedtest.
globalNotificationSys.ServiceFreeze(ctx, true)
// unfreeze all incoming S3 API calls after speedtest.
defer globalNotificationSys.ServiceFreeze(ctx, false)
// Unfreeze as soon as request context is canceled or when the function returns.
go func() {
<-ctx.Done()
globalNotificationSys.ServiceFreeze(ctx, false)
}()
serial := r.Form.Get("serial") == "true"
blockSizeStr := r.Form.Get("blocksize")
@ -2096,7 +2173,9 @@ func (a adminAPIHandlers) KMSCreateKeyHandler(w http.ResponseWriter, r *http.Req
return
}
if err := GlobalKMS.CreateKey(ctx, r.Form.Get("key-id")); err != nil {
if err := GlobalKMS.CreateKey(ctx, &kms.CreateKeyRequest{
Name: r.Form.Get("key-id"),
}); err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -2117,22 +2196,12 @@ func (a adminAPIHandlers) KMSStatusHandler(w http.ResponseWriter, r *http.Reques
return
}
stat, err := GlobalKMS.Stat(ctx)
stat, err := GlobalKMS.Status(ctx)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
status := madmin.KMSStatus{
Name: stat.Name,
DefaultKeyID: stat.DefaultKey,
Endpoints: make(map[string]madmin.ItemState, len(stat.Endpoints)),
}
for _, endpoint := range stat.Endpoints {
status.Endpoints[endpoint] = madmin.ItemOnline // TODO(aead): Implement an online check for mTLS
}
resp, err := json.Marshal(status)
resp, err := json.Marshal(stat)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
@ -2154,15 +2223,9 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
return
}
stat, err := GlobalKMS.Stat(ctx)
if err != nil {
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInternalError), err.Error(), r.URL)
return
}
keyID := r.Form.Get("key-id")
if keyID == "" {
keyID = stat.DefaultKey
keyID = GlobalKMS.DefaultKey
}
response := madmin.KMSKeyStatus{
KeyID: keyID,
@ -2170,7 +2233,10 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
kmsContext := kms.Context{"MinIO admin API": "KMSKeyStatusHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsContext)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: keyID,
AssociatedData: kmsContext,
})
if err != nil {
response.EncryptionErr = err.Error()
resp, err := json.Marshal(response)
@ -2183,7 +2249,11 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
}
// 2. Verify that we can indeed decrypt the (encrypted) key
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
decryptedKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
Name: key.KeyID,
Ciphertext: key.Ciphertext,
AssociatedData: kmsContext,
})
if err != nil {
response.DecryptionErr = err.Error()
resp, err := json.Marshal(response)
@ -2277,7 +2347,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
notifyTarget := fetchLambdaInfo()
local := getLocalServerProperty(globalEndpoints, r, metrics)
servers := globalNotificationSys.ServerInfo(metrics)
servers := globalNotificationSys.ServerInfo(ctx, metrics)
servers = append(servers, local)
var poolsInfo map[int]map[int]madmin.ErasureSetInfo
@ -2336,8 +2406,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
domain := globalDomainNames
services := madmin.Services{
KMS: fetchKMSStatus(),
KMSStatus: fetchKMSStatusV2(ctx),
KMSStatus: fetchKMSStatus(ctx),
LDAP: ldap,
Logger: log,
Audit: audit,
@ -2347,7 +2416,7 @@ func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) ma
return madmin.InfoMessage{
Mode: string(mode),
Domain: domain,
Region: globalSite.Region,
Region: globalSite.Region(),
SQSARN: globalEventNotifier.GetARNList(false),
DeploymentID: globalDeploymentID(),
Buckets: buckets,
@ -2375,7 +2444,7 @@ func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo {
}
client := &http.Client{
Transport: globalHealthChkTransport,
Transport: globalRemoteTargetTransport,
Timeout: 10 * time.Second,
}
@ -2800,7 +2869,7 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
w.Header().Get(xhttp.AmzRequestID), w.Header().Get(xhttp.AmzRequestHostID))
encodedErrorResponse := encodeResponse(errorResponse)
healthInfo.Error = string(encodedErrorResponse)
logger.LogIf(ctx, enc.Encode(healthInfo))
adminLogIf(ctx, enc.Encode(healthInfo))
}
deadline := 10 * time.Second // Default deadline is 10secs for health diagnostics.
@ -2947,66 +3016,25 @@ func fetchLambdaInfo() []map[string][]madmin.TargetIDStatus {
return notify
}
// fetchKMSStatus fetches KMS-related status information.
func fetchKMSStatus() madmin.KMS {
kmsStat := madmin.KMS{}
if GlobalKMS == nil {
kmsStat.Status = "disabled"
return kmsStat
}
stat, err := GlobalKMS.Stat(context.Background())
if err != nil {
kmsStat.Status = string(madmin.ItemOffline)
return kmsStat
}
if len(stat.Endpoints) == 0 {
kmsStat.Status = stat.Name
return kmsStat
}
kmsStat.Status = string(madmin.ItemOnline)
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
// 1. Generate a new key using the KMS.
key, err := GlobalKMS.GenerateKey(context.Background(), "", kmsContext)
if err != nil {
kmsStat.Encrypt = fmt.Sprintf("Encryption failed: %v", err)
} else {
kmsStat.Encrypt = "success"
}
// 2. Verify that we can indeed decrypt the (encrypted) key
decryptedKey, err := GlobalKMS.DecryptKey(key.KeyID, key.Ciphertext, kmsContext)
switch {
case err != nil:
kmsStat.Decrypt = fmt.Sprintf("Decryption failed: %v", err)
case subtle.ConstantTimeCompare(key.Plaintext, decryptedKey) != 1:
kmsStat.Decrypt = "Decryption failed: decrypted key does not match generated key"
default:
kmsStat.Decrypt = "success"
}
return kmsStat
}
// fetchKMSStatusV2 fetches KMS-related status information for all instances
func fetchKMSStatusV2(ctx context.Context) []madmin.KMS {
// fetchKMSStatus fetches KMS-related status information for all instances
func fetchKMSStatus(ctx context.Context) []madmin.KMS {
if GlobalKMS == nil {
return []madmin.KMS{}
}
results := GlobalKMS.Verify(ctx)
stats := []madmin.KMS{}
for _, result := range results {
stats = append(stats, madmin.KMS{
Status: result.Status,
Endpoint: result.Endpoint,
Encrypt: result.Encrypt,
Decrypt: result.Decrypt,
Version: result.Version,
})
stat, err := GlobalKMS.Status(ctx)
if err != nil {
kmsLogIf(ctx, err, "failed to fetch KMS status information")
return []madmin.KMS{}
}
stats := make([]madmin.KMS, 0, len(stat.Endpoints))
for endpoint, state := range stat.Endpoints {
stats = append(stats, madmin.KMS{
Status: string(state),
Endpoint: endpoint,
})
}
return stats
}
@ -3113,7 +3141,7 @@ func getClusterMetaInfo(ctx context.Context) []byte {
case ci := <-resultCh:
out, err := json.MarshalIndent(ci, "", " ")
if err != nil {
logger.LogIf(ctx, err)
bugLogIf(ctx, err)
return nil
}
return out
@ -3172,11 +3200,11 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
}
file = strings.ReplaceAll(file, string(os.PathSeparator), "/")
file = filepath.ToSlash(file)
// Reject attempts to traverse parent or absolute paths.
if strings.Contains(file, "..") || strings.Contains(volume, "..") {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
if hasBadPathComponent(volume) || hasBadPathComponent(file) {
writeErrorResponse(r.Context(), w, errorCodes.ToAPIErr(ErrInvalidResourceName), r.URL)
return
}
@ -3195,6 +3223,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
return
}
}
addErr := func(msg string) {}
// Write a version for making *incompatible* changes.
// The AdminClient will reject any version it does not know.
@ -3206,18 +3235,18 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
clusterKey, err := bytesToPublicKey(getSubnetAdminPublicKey())
if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error()))
bugLogIf(ctx, stream.AddError(err.Error()))
return
}
err = stream.AddKeyEncrypted(clusterKey)
if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error()))
bugLogIf(ctx, stream.AddError(err.Error()))
return
}
if b := getClusterMetaInfo(ctx); len(b) > 0 {
w, err := stream.AddEncryptedStream("cluster.info", nil)
if err != nil {
logger.LogIf(ctx, err)
bugLogIf(ctx, err)
return
}
w.Write(b)
@ -3226,14 +3255,19 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
// Add new key for inspect data.
if err := stream.AddKeyEncrypted(publicKey); err != nil {
logger.LogIf(ctx, stream.AddError(err.Error()))
bugLogIf(ctx, stream.AddError(err.Error()))
return
}
encStream, err := stream.AddEncryptedStream("inspect.zip", nil)
if err != nil {
logger.LogIf(ctx, stream.AddError(err.Error()))
bugLogIf(ctx, stream.AddError(err.Error()))
return
}
addErr = func(msg string) {
inspectZipW.Close()
encStream.Close()
stream.AddError(msg)
}
defer encStream.Close()
inspectZipW = zip.NewWriter(encStream)
@ -3244,7 +3278,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
// MUST use crypto/rand
n, err := crand.Read(key[:])
if err != nil || n != len(key) {
logger.LogIf(ctx, err)
bugLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
@ -3258,7 +3292,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
stream, err := sio.AES_256_GCM.Stream(key[:])
if err != nil {
logger.LogIf(ctx, err)
bugLogIf(ctx, err)
return
}
// Zero nonce, we only use each key once, and 32 bytes is plenty.
@ -3272,7 +3306,7 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
defer inspectZipW.Close()
if b := getClusterMetaInfo(ctx); len(b) > 0 {
logger.LogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600))
adminLogIf(ctx, embedFileInZip(inspectZipW, "cluster.info", b, 0o600))
}
}
@ -3300,32 +3334,20 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
sys: nil,
})
if zerr != nil {
logger.LogIf(ctx, zerr)
bugLogIf(ctx, zerr)
return nil
}
header.Method = zip.Deflate
zwriter, zerr := inspectZipW.CreateHeader(header)
if zerr != nil {
logger.LogIf(ctx, zerr)
bugLogIf(ctx, zerr)
return nil
}
if _, err := io.Copy(zwriter, r); err != nil {
logger.LogIf(ctx, err)
adminLogIf(ctx, err)
}
return nil
}
err := o.GetRawData(ctx, volume, file, rawDataFn)
if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err)
}
// save the format.json as part of inspect by default
if !(volume == minioMetaBucket && file == formatConfigFile) {
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
}
if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err)
}
// save args passed to inspect command
var sb bytes.Buffer
@ -3336,7 +3358,25 @@ func (a adminAPIHandlers) InspectDataHandler(w http.ResponseWriter, r *http.Requ
sb.WriteString(pool.CmdLine)
}
sb.WriteString("\n")
logger.LogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
adminLogIf(ctx, embedFileInZip(inspectZipW, "inspect-input.txt", sb.Bytes(), 0o600))
err := o.GetRawData(ctx, volume, file, rawDataFn)
if err != nil {
if errors.Is(err, errFileNotFound) {
addErr("GetRawData: No files matched the given pattern")
return
}
embedFileInZip(inspectZipW, "GetRawData-err.txt", []byte(err.Error()), 0o600)
adminLogIf(ctx, err)
}
// save the format.json as part of inspect by default
if !(volume == minioMetaBucket && file == formatConfigFile) {
err = o.GetRawData(ctx, minioMetaBucket, formatConfigFile, rawDataFn)
}
if !errors.Is(err, errFileNotFound) {
adminLogIf(ctx, err)
}
scheme := "https"
if !globalIsTLS {
@ -3370,7 +3410,7 @@ function main() {
}
main "$@"`, scheme)
logger.LogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755))
adminLogIf(ctx, embedFileInZip(inspectZipW, "start-minio.sh", scrb.Bytes(), 0o755))
}
func getSubnetAdminPublicKey() []byte {

View File

@ -63,8 +63,8 @@ const (
)
var (
errHealIdleTimeout = fmt.Errorf("healing results were not consumed for too long")
errHealStopSignalled = fmt.Errorf("heal stop signaled")
errHealIdleTimeout = errors.New("healing results were not consumed for too long")
errHealStopSignalled = errors.New("heal stop signaled")
errFnHealFromAPIErr = func(ctx context.Context, err error) error {
apiErr := toAdminAPIErr(ctx, err)
@ -329,21 +329,25 @@ func (ahs *allHealState) LaunchNewHealSequence(h *healSequence, objAPI ObjectLay
// Add heal state and start sequence
ahs.healSeqMap[hpath] = h
// Launch top-level background heal go-routine
go h.healSequenceStart(objAPI)
clientToken := h.clientToken
if globalIsDistErasure {
clientToken = fmt.Sprintf("%s:%d", h.clientToken, GetProxyEndpointLocalIndex(globalProxyEndpoints))
}
if h.clientToken == bgHealingUUID {
// For background heal do nothing, do not spawn an unnecessary goroutine.
} else {
// Launch top-level background heal go-routine
go h.healSequenceStart(objAPI)
}
b, err := json.Marshal(madmin.HealStartSuccess{
ClientToken: clientToken,
ClientAddress: h.clientAddress,
StartTime: h.startTime,
})
if err != nil {
logger.LogIf(h.ctx, err)
bugLogIf(h.ctx, err)
return nil, toAdminAPIErr(h.ctx, err), ""
}
return b, noError, ""
@ -390,7 +394,7 @@ func (ahs *allHealState) PopHealStatusJSON(hpath string,
if err != nil {
h.currentStatus.Items = nil
logger.LogIf(h.ctx, err)
bugLogIf(h.ctx, err)
return nil, ErrInternalError
}
@ -451,8 +455,8 @@ type healSequence struct {
// Number of total items healed against item type
healedItemsMap map[madmin.HealItemType]int64
// Number of total items where healing failed against endpoint and drive state
healFailedItemsMap map[string]int64
// Number of total items where healing failed against item type
healFailedItemsMap map[madmin.HealItemType]int64
// The time of the last scan/heal activity
lastHealActivity time.Time
@ -493,7 +497,7 @@ func newHealSequence(ctx context.Context, bucket, objPrefix, clientAddr string,
ctx: ctx,
scannedItemsMap: make(map[madmin.HealItemType]int64),
healedItemsMap: make(map[madmin.HealItemType]int64),
healFailedItemsMap: make(map[string]int64),
healFailedItemsMap: make(map[madmin.HealItemType]int64),
}
}
@ -537,14 +541,14 @@ func (h *healSequence) getHealedItemsMap() map[madmin.HealItemType]int64 {
return retMap
}
// gethealFailedItemsMap - returns map of all items where heal failed against
// getHealFailedItemsMap - returns map of all items where heal failed against
// drive endpoint and status
func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
func (h *healSequence) getHealFailedItemsMap() map[madmin.HealItemType]int64 {
h.mutex.RLock()
defer h.mutex.RUnlock()
// Make a copy before returning the value
retMap := make(map[string]int64, len(h.healFailedItemsMap))
retMap := make(map[madmin.HealItemType]int64, len(h.healFailedItemsMap))
for k, v := range h.healFailedItemsMap {
retMap[k] = v
}
@ -552,6 +556,30 @@ func (h *healSequence) gethealFailedItemsMap() map[string]int64 {
return retMap
}
func (h *healSequence) countFailed(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
h.healFailedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
func (h *healSequence) countScanned(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
func (h *healSequence) countHealed(healType madmin.HealItemType) {
h.mutex.Lock()
defer h.mutex.Unlock()
h.healedItemsMap[healType]++
h.lastHealActivity = UTCNow()
}
// isQuitting - determines if the heal sequence is quitting (due to an
// external signal)
func (h *healSequence) isQuitting() bool {
@ -704,10 +732,7 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
task.opts.ScanMode = madmin.HealNormalScan
}
h.mutex.Lock()
h.scannedItemsMap[healType]++
h.lastHealActivity = UTCNow()
h.mutex.Unlock()
h.countScanned(healType)
if source.noWait {
select {
@ -739,37 +764,21 @@ func (h *healSequence) queueHealTask(source healSource, healType madmin.HealItem
// task queued, now wait for the response.
select {
case res := <-task.respCh:
if res.err == nil {
h.countHealed(healType)
} else {
h.countFailed(healType)
}
if !h.reportProgress {
if errors.Is(res.err, errSkipFile) { // this is only sent usually by nopHeal
return nil
}
h.mutex.Lock()
defer h.mutex.Unlock()
// Progress is not reported in case of background heal processing.
// Instead we increment relevant counter based on the heal result
// for prometheus reporting.
if res.err != nil {
for _, d := range res.result.After.Drives {
// For failed items we report the endpoint and drive state
// This will help users take corrective actions for drives
h.healFailedItemsMap[d.Endpoint+","+d.State]++
}
} else {
// Only object type reported for successful healing
h.healedItemsMap[res.result.Type]++
}
// Report caller of any failure
return res.err
}
res.result.Type = healType
if res.err != nil {
// Only report object error
if healType != madmin.HealItemObject {
return res.err
}
res.result.Detail = res.err.Error()
}
return h.pushHealResultItem(res.result)

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@ -18,7 +18,6 @@
package cmd
import (
"context"
"math"
"net/http"
"os"
@ -31,7 +30,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
xnet "github.com/minio/pkg/v3/net"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
@ -65,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
network[nodeName] = string(madmin.ItemOnline)
} else {
network[nodeName] = string(madmin.ItemOffline)
// log once the error
logger.LogOnceIf(context.Background(), err, nodeName)
if xnet.IsNetworkOrHostDown(err, false) {
network[nodeName] = string(madmin.ItemOffline)
} else if xnet.IsNetworkOrHostDown(err, true) {
network[nodeName] = "connection attempt timedout"
}
}
}
}

View File

@ -67,7 +67,7 @@ type ObjectToDelete struct {
ReplicateDecisionStr string `xml:"-"`
}
// createBucketConfiguration container for bucket configuration request from client.
// createBucketLocationConfiguration container for bucket configuration request from client.
// Used for parsing the location from the request body for Makebucket.
type createBucketLocationConfiguration struct {
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`

View File

@ -48,7 +48,7 @@ import (
levent "github.com/minio/minio/internal/config/lambda/event"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/hash"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// APIError structure
@ -56,19 +56,23 @@ type APIError struct {
Code string
Description string
HTTPStatusCode int
ObjectSize string
RangeRequested string
}
// APIErrorResponse - error response format
type APIErrorResponse struct {
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
Resource string
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
RequestID string `xml:"RequestId" json:"RequestId"`
HostID string `xml:"HostId" json:"HostId"`
XMLName xml.Name `xml:"Error" json:"-"`
Code string
Message string
Key string `xml:"Key,omitempty" json:"Key,omitempty"`
BucketName string `xml:"BucketName,omitempty" json:"BucketName,omitempty"`
Resource string
Region string `xml:"Region,omitempty" json:"Region,omitempty"`
RequestID string `xml:"RequestId" json:"RequestId"`
HostID string `xml:"HostId" json:"HostId"`
ActualObjectSize string `xml:"ActualObjectSize,omitempty" json:"ActualObjectSize,omitempty"`
RangeRequested string `xml:"RangeRequested,omitempty" json:"RangeRequested,omitempty"`
}
// APIErrorCode type of error status.
@ -263,6 +267,7 @@ const (
ErrInvalidResourceName
ErrInvalidLifecycleQueryParameter
ErrServerNotInitialized
ErrBucketMetadataNotInitialized
ErrRequestTimedout
ErrClientDisconnected
ErrTooManyRequests
@ -278,9 +283,11 @@ const (
ErrMalformedJSON
ErrAdminNoSuchUser
ErrAdminNoSuchUserLDAPWarn
ErrAdminLDAPExpectedLoginName
ErrAdminNoSuchGroup
ErrAdminGroupNotEmpty
ErrAdminGroupDisabled
ErrAdminInvalidGroupName
ErrAdminNoSuchJob
ErrAdminNoSuchPolicy
ErrAdminPolicyChangeAlreadyApplied
@ -300,6 +307,7 @@ const (
ErrAdminConfigIDPCfgNameDoesNotExist
ErrInsecureClientRequest
ErrObjectTampered
ErrAdminLDAPNotEnabled
// Site-Replication errors
ErrSiteReplicationInvalidRequest
@ -418,6 +426,7 @@ const (
ErrAdminProfilerNotEnabled
ErrInvalidDecompressedSize
ErrAddUserInvalidArgument
ErrAddUserValidUTF
ErrAdminResourceInvalidArgument
ErrAdminAccountNotEligible
ErrAccountNotEligible
@ -449,9 +458,9 @@ func (e errorCodeMap) ToAPIErrWithErr(errCode APIErrorCode, err error) APIError
if err != nil {
apiErr.Description = fmt.Sprintf("%s (%s)", apiErr.Description, err)
}
if globalSite.Region != "" {
if region := globalSite.Region(); region != "" {
if errCode == ErrAuthorizationHeaderMalformed {
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
apiErr.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", region)
return apiErr
}
}
@ -1293,7 +1302,12 @@ var errorCodes = errorCodeMap{
},
ErrServerNotInitialized: {
Code: "XMinioServerNotInitialized",
Description: "Server not initialized, please try again.",
Description: "Server not initialized yet, please try again.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrBucketMetadataNotInitialized: {
Code: "XMinioBucketMetadataNotInitialized",
Description: "Bucket metadata not initialized yet, please try again.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrMalformedJSON: {
@ -1466,7 +1480,7 @@ var errorCodes = errorCodeMap{
ErrTooManyRequests: {
Code: "TooManyRequests",
Description: "Deadline exceeded while waiting in incoming queue, please reduce your request rate",
HTTPStatusCode: http.StatusServiceUnavailable,
HTTPStatusCode: http.StatusTooManyRequests,
},
ErrUnsupportedMetadata: {
Code: "InvalidArgument",
@ -2079,7 +2093,26 @@ var errorCodes = errorCodeMap{
Description: "Invalid attribute name specified.",
HTTPStatusCode: http.StatusBadRequest,
},
// Add your error structure here.
ErrAdminLDAPNotEnabled: {
Code: "XMinioLDAPNotEnabled",
Description: "LDAP is not enabled. LDAP must be enabled to make LDAP requests.",
HTTPStatusCode: http.StatusNotImplemented,
},
ErrAdminLDAPExpectedLoginName: {
Code: "XMinioLDAPExpectedLoginName",
Description: "Expected LDAP short username but was given full DN.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAdminInvalidGroupName: {
Code: "XMinioInvalidGroupName",
Description: "The group name is invalid.",
HTTPStatusCode: http.StatusBadRequest,
},
ErrAddUserValidUTF: {
Code: "XMinioInvalidUTF",
Description: "Invalid UTF-8 character detected.",
HTTPStatusCode: http.StatusBadRequest,
},
}
// toAPIErrorCode - Converts embedded errors. Convenience
@ -2119,6 +2152,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrAdminNoSuchGroup
case errGroupNotEmpty:
apiErr = ErrAdminGroupNotEmpty
case errGroupNameContainsReservedChars:
apiErr = ErrAdminInvalidGroupName
case errNoSuchJob:
apiErr = ErrAdminNoSuchJob
case errNoPolicyToAttachOrDetach:
@ -2133,6 +2168,8 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrEntityTooSmall
case errAuthentication:
apiErr = ErrAccessDenied
case auth.ErrContainsReservedChars:
apiErr = ErrAdminInvalidAccessKey
case auth.ErrInvalidAccessKeyLength:
apiErr = ErrAdminInvalidAccessKey
case auth.ErrInvalidSecretKeyLength:
@ -2200,6 +2237,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrInvalidMaxParts
case ioutil.ErrOverread:
apiErr = ErrExcessData
case errServerNotInitialized:
apiErr = ErrServerNotInitialized
case errBucketMetadataNotInitialized:
apiErr = ErrBucketMetadataNotInitialized
}
// Compression errors
@ -2391,10 +2432,9 @@ func toAPIError(ctx context.Context, err error) APIError {
apiErr := errorCodes.ToAPIErr(toAPIErrorCode(ctx, err))
switch apiErr.Code {
case "NotImplemented":
desc := fmt.Sprintf("%s (%v)", apiErr.Description, err)
apiErr = APIError{
Code: apiErr.Code,
Description: desc,
Description: fmt.Sprintf("%s (%v)", apiErr.Description, err),
HTTPStatusCode: apiErr.HTTPStatusCode,
}
case "XMinioBackendDown":
@ -2406,12 +2446,24 @@ func toAPIError(ctx context.Context, err error) APIError {
switch e := err.(type) {
case kms.Error:
apiErr = APIError{
Description: e.Err.Error(),
Code: e.APICode,
HTTPStatusCode: e.HTTPStatusCode,
Description: e.Err,
HTTPStatusCode: e.Code,
}
case batchReplicationJobError:
apiErr = APIError(e)
apiErr = APIError{
Description: e.Description,
Code: e.Code,
HTTPStatusCode: e.HTTPStatusCode,
}
case InvalidRange:
apiErr = APIError{
Code: "InvalidRange",
Description: e.Error(),
HTTPStatusCode: errorCodes[ErrInvalidRange].HTTPStatusCode,
ObjectSize: strconv.FormatInt(e.ResourceSize, 10),
RangeRequested: fmt.Sprintf("%d-%d", e.OffsetBegin, e.OffsetEnd),
}
case InvalidArgument:
apiErr = APIError{
Code: "InvalidArgument",
@ -2519,7 +2571,7 @@ func toAPIError(ctx context.Context, err error) APIError {
// Make sure to log the errors which we cannot translate
// to a meaningful S3 API errors. This is added to aid in
// debugging unexpected/unhandled errors.
logger.LogIf(ctx, err)
internalLogIf(ctx, err)
}
return apiErr
@ -2538,13 +2590,15 @@ func getAPIError(code APIErrorCode) APIError {
func getAPIErrorResponse(ctx context.Context, err APIError, resource, requestID, hostID string) APIErrorResponse {
reqInfo := logger.GetReqInfo(ctx)
return APIErrorResponse{
Code: err.Code,
Message: err.Description,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
Resource: resource,
Region: globalSite.Region,
RequestID: requestID,
HostID: hostID,
Code: err.Code,
Message: err.Description,
BucketName: reqInfo.BucketName,
Key: reqInfo.ObjectName,
Resource: resource,
Region: globalSite.Region(),
RequestID: requestID,
HostID: hostID,
ActualObjectSize: err.ObjectSize,
RangeRequested: err.RangeRequested,
}
}

View File

@ -19,6 +19,7 @@ package cmd
import (
"bytes"
"context"
"encoding/json"
"encoding/xml"
"fmt"
@ -30,7 +31,6 @@ import (
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
xxml "github.com/minio/xxml"
)
@ -54,7 +54,7 @@ func setCommonHeaders(w http.ResponseWriter) {
// Set `x-amz-bucket-region` only if region is set on the server
// by default minio uses an empty region.
if region := globalSite.Region; region != "" {
if region := globalSite.Region(); region != "" {
w.Header().Set(xhttp.AmzBucketRegion, region)
}
w.Header().Set(xhttp.AcceptRanges, "bytes")
@ -68,7 +68,7 @@ func encodeResponse(response interface{}) []byte {
var buf bytes.Buffer
buf.WriteString(xml.Header)
if err := xml.NewEncoder(&buf).Encode(response); err != nil {
logger.LogIf(GlobalContext, err)
bugLogIf(GlobalContext, err)
return nil
}
return buf.Bytes()
@ -86,7 +86,7 @@ func encodeResponseList(response interface{}) []byte {
var buf bytes.Buffer
buf.WriteString(xxml.Header)
if err := xxml.NewEncoder(&buf).Encode(response); err != nil {
logger.LogIf(GlobalContext, err)
bugLogIf(GlobalContext, err)
return nil
}
return buf.Bytes()
@ -108,7 +108,7 @@ func setPartsCountHeaders(w http.ResponseWriter, objInfo ObjectInfo) {
}
// Write object header
func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
func setObjectHeaders(ctx context.Context, w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSpec, opts ObjectOptions) (err error) {
// set common headers
setCommonHeaders(w)
@ -136,7 +136,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
// Set tag count if object has tags
if len(objInfo.UserTags) > 0 {
tags, _ := tags.ParseObjectTags(objInfo.UserTags)
if tags.Count() > 0 {
if tags != nil && tags.Count() > 0 {
w.Header()[xhttp.AmzTagCount] = []string{strconv.Itoa(tags.Count())}
if opts.Tagging {
// This is MinIO only extension to return back tags along with the count.
@ -213,7 +213,7 @@ func setObjectHeaders(w http.ResponseWriter, objInfo ObjectInfo, rs *HTTPRangeSp
if objInfo.IsRemote() {
// Check if object is being restored. For more information on x-amz-restore header see
// https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html#API_HeadObject_ResponseSyntax
w.Header()[xhttp.AmzStorageClass] = []string{objInfo.TransitionedObject.Tier}
w.Header()[xhttp.AmzStorageClass] = []string{filterStorageClass(ctx, objInfo.TransitionedObject.Tier)}
}
if lc, err := globalLifecycleSys.Get(objInfo.Bucket); err == nil {

View File

@ -35,7 +35,7 @@ import (
"github.com/minio/minio/internal/hash"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
xxml "github.com/minio/xxml"
)
@ -544,7 +544,7 @@ func cleanReservedKeys(metadata map[string]string) map[string]string {
}
// generates an ListBucketVersions response for the said bucket with other enumerated options.
func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
func generateListVersionsResponse(ctx context.Context, bucket, prefix, marker, versionIDMarker, delimiter, encodingType string, maxKeys int, resp ListObjectVersionsInfo, metadata metaCheckFn) ListVersionsResponse {
versions := make([]ObjectVersion, 0, len(resp.Objects))
owner := &Owner{
@ -573,7 +573,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
@ -634,7 +634,7 @@ func generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delim
}
// generates an ListObjectsV1 response for the said bucket with other enumerated options.
func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
func generateListObjectsV1Response(ctx context.Context, bucket, prefix, marker, delimiter, encodingType string, maxKeys int, resp ListObjectsInfo) ListObjectsResponse {
contents := make([]Object, 0, len(resp.Objects))
owner := &Owner{
ID: globalMinioDefaultOwnerID,
@ -654,7 +654,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
@ -683,7 +683,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
}
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
func generateListObjectsV2Response(ctx context.Context, bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata metaCheckFn) ListObjectsV2Response {
contents := make([]Object, 0, len(objects))
var owner *Owner
if fetchOwner {
@ -707,7 +707,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
}
content.Size = object.Size
if object.StorageClass != "" {
content.StorageClass = object.StorageClass
content.StorageClass = filterStorageClass(ctx, object.StorageClass)
} else {
content.StorageClass = globalMinioDefaultStorageClass
}
@ -891,7 +891,7 @@ func writeResponse(w http.ResponseWriter, statusCode int, response []byte, mType
}
// Similar check to http.checkWriteHeaderCode
if statusCode < 100 || statusCode > 999 {
logger.LogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
bugLogIf(context.Background(), fmt.Errorf("invalid WriteHeader code %v", statusCode))
statusCode = http.StatusInternalServerError
}
setCommonHeaders(w)
@ -954,14 +954,14 @@ func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError
switch err.Code {
case "InvalidRegion":
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region)
err.Description = fmt.Sprintf("Region does not match; expecting '%s'.", globalSite.Region())
case "AuthorizationHeaderMalformed":
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region)
err.Description = fmt.Sprintf("The authorization header is malformed; the region is wrong; expecting '%s'.", globalSite.Region())
}
// Similar check to http.checkWriteHeaderCode
if err.HTTPStatusCode < 100 || err.HTTPStatusCode > 999 {
logger.LogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
bugLogIf(ctx, fmt.Errorf("invalid WriteHeader code %v from %v", err.HTTPStatusCode, err.Code))
err.HTTPStatusCode = http.StatusInternalServerError
}

View File

@ -24,7 +24,7 @@ import (
consoleapi "github.com/minio/console/api"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/mux"
"github.com/minio/pkg/v2/wildcard"
"github.com/minio/pkg/v3/wildcard"
"github.com/rs/cors"
)
@ -64,7 +64,7 @@ func setObjectLayer(o ObjectLayer) {
globalObjLayerMutex.Unlock()
}
// objectAPIHandler implements and provides http handlers for S3 API.
// objectAPIHandlers implements and provides http handlers for S3 API.
type objectAPIHandlers struct {
ObjectAPI func() ObjectLayer
}

File diff suppressed because one or more lines are too long

View File

@ -41,7 +41,7 @@ import (
xjwt "github.com/minio/minio/internal/jwt"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/mcontext"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// Verify if request has JWT.
@ -126,7 +126,7 @@ func getRequestAuthType(r *http.Request) (at authType) {
var err error
r.Form, err = url.ParseQuery(r.URL.RawQuery)
if err != nil {
logger.LogIf(r.Context(), err)
authNLogIf(r.Context(), err)
return authTypeUnknown
}
}
@ -178,7 +178,7 @@ func validateAdminSignature(ctx context.Context, r *http.Request, region string)
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = globalSite.Region
logger.GetReqInfo(ctx).Region = globalSite.Region()
return cred, owner, ErrNone
}
@ -257,7 +257,7 @@ func getClaimsFromTokenWithSecret(token, secret string) (map[string]interface{},
if err != nil {
// Base64 decoding fails, we should log to indicate
// something is malforming the request sent by client.
logger.LogIf(GlobalContext, err, logger.ErrorKind)
authNLogIf(GlobalContext, err, logger.ErrorKind)
return nil, errAuthentication
}
claims.MapClaims[sessionPolicyNameExtracted] = string(spBytes)
@ -353,7 +353,7 @@ func checkRequestAuthTypeWithVID(ctx context.Context, r *http.Request, action po
func authenticateRequest(ctx context.Context, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
if logger.GetReqInfo(ctx) == nil {
logger.LogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
bugLogIf(ctx, errors.New("unexpected context.Context does not have a logger.ReqInfo"), logger.ErrorKind)
return ErrAccessDenied
}
@ -368,7 +368,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypeSigned, authTypePresigned:
region := globalSite.Region
region := globalSite.Region()
switch action {
case policy.GetBucketLocationAction, policy.ListAllMyBucketsAction:
region = ""
@ -384,7 +384,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
logger.GetReqInfo(ctx).Cred = cred
logger.GetReqInfo(ctx).Owner = owner
logger.GetReqInfo(ctx).Region = globalSite.Region
logger.GetReqInfo(ctx).Region = globalSite.Region()
// region is valid only for CreateBucketAction.
var region string
@ -392,7 +392,7 @@ func authenticateRequest(ctx context.Context, r *http.Request, action policy.Act
// To extract region from XML in request body, get copy of request body.
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil {
logger.LogIf(ctx, err, logger.ErrorKind)
authZLogIf(ctx, err, logger.ErrorKind)
return ErrMalformedXML
}
@ -684,7 +684,7 @@ func validateSignature(atype authType, r *http.Request) (auth.Credentials, bool,
}
cred, owner, s3Err = getReqAccessKeyV2(r)
case authTypePresigned, authTypeSigned:
region := globalSite.Region
region := globalSite.Region()
if s3Err = isReqAuthenticated(GlobalContext, r, region, serviceS3); s3Err != ErrNone {
return cred, owner, s3Err
}
@ -745,7 +745,7 @@ func isPutRetentionAllowed(bucketName, objectName string, retDays int, retDate t
func isPutActionAllowed(ctx context.Context, atype authType, bucketName, objectName string, r *http.Request, action policy.Action) (s3Err APIErrorCode) {
var cred auth.Credentials
var owner bool
region := globalSite.Region
region := globalSite.Region()
switch atype {
case authTypeUnknown:
return ErrSignatureVersionNotSupported

View File

@ -28,7 +28,7 @@ import (
"time"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
type nullReader struct{}
@ -403,7 +403,7 @@ func TestIsReqAuthenticated(t *testing.T) {
// Validates all testcases.
for i, testCase := range testCases {
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region(), serviceS3)
if s3Error != testCase.s3Error {
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
@ -443,7 +443,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
{Request: mustNewPresignedRequest(http.MethodGet, "http://127.0.0.1:9000", 0, nil, t), ErrCode: ErrAccessDenied},
}
for i, testCase := range testCases {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region); s3Error != testCase.ErrCode {
if _, s3Error := checkAdminRequestAuth(ctx, testCase.Request, policy.AllAdminActions, globalSite.Region()); s3Error != testCase.ErrCode {
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
}
}

View File

@ -25,8 +25,7 @@ import (
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
// healTask represents what to heal along with options
@ -101,16 +100,17 @@ func waitForLowHTTPReq() {
}
func initBackgroundHealing(ctx context.Context, objAPI ObjectLayer) {
bgSeq := newBgHealSequence()
// Run the background healer
for i := 0; i < globalBackgroundHealRoutine.workers; i++ {
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI)
go globalBackgroundHealRoutine.AddWorker(ctx, objAPI, bgSeq)
}
globalBackgroundHealState.LaunchNewHealSequence(newBgHealSequence(), objAPI)
globalBackgroundHealState.LaunchNewHealSequence(bgSeq, objAPI)
}
// Wait for heal requests and process them
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer, bgSeq *healSequence) {
for {
select {
case task, ok := <-h.tasks:
@ -135,8 +135,18 @@ func (h *healRoutine) AddWorker(ctx context.Context, objAPI ObjectLayer) {
if task.respCh != nil {
task.respCh <- healResult{result: res, err: err}
continue
}
// when respCh is not set caller is not waiting but we
// update the relevant metrics for them
if bgSeq != nil {
if err == nil {
bgSeq.countHealed(res.Type)
} else {
bgSeq.countFailed(res.Type)
}
}
case <-ctx.Done():
return
}
@ -148,7 +158,7 @@ func newHealRoutine() *healRoutine {
if envHealWorkers := env.Get("_MINIO_HEAL_WORKERS", ""); envHealWorkers != "" {
if numHealers, err := strconv.Atoi(envHealWorkers); err != nil {
logger.LogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
bugLogIf(context.Background(), fmt.Errorf("invalid _MINIO_HEAL_WORKERS value: %w", err))
} else {
workers = numHealers
}

View File

@ -33,8 +33,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
const (
@ -409,11 +408,11 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
if errors.Is(err, errFileNotFound) {
return nil
}
logger.LogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
healingLogIf(ctx, fmt.Errorf("Unable to load healing tracker on '%s': %w, re-initializing..", disk, err))
tracker = initHealingTracker(disk, mustGetUUID())
}
logger.Event(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
healingLogEvent(ctx, "Healing drive '%s' - 'mc admin heal alias/ --verbose' to check the current status.", endpoint)
buckets, _ := z.ListBuckets(ctx, BucketOptions{})
// Buckets data are dispersed in multiple pools/sets, make
@ -452,11 +451,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
return err
}
logger.Event(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
if len(tracker.QueuedBuckets) > 0 {
return fmt.Errorf("not all buckets were healed: %v", tracker.QueuedBuckets)
}
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
if serverDebugLog {
tracker.printTo(os.Stdout)
@ -464,7 +459,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
}
if tracker.HealID == "" { // HealID was empty only before Feb 2023
logger.LogIf(ctx, tracker.delete(ctx))
bugLogIf(ctx, tracker.delete(ctx))
return nil
}
@ -482,7 +477,7 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
t, err := loadHealingTracker(ctx, disk)
if err != nil {
if !errors.Is(err, errFileNotFound) {
logger.LogIf(ctx, err)
healingLogIf(ctx, err)
}
continue
}
@ -517,7 +512,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
// Reformat disks immediately
_, err := z.HealFormat(context.Background(), false)
if err != nil && !errors.Is(err, errNoHealRequired) {
logger.LogIf(ctx, err)
healingLogIf(ctx, err)
// Reset for next interval.
diskCheckTimer.Reset(defaultMonitorNewDiskInterval)
continue

View File

@ -33,10 +33,9 @@ import (
"github.com/minio/minio/internal/bucket/versioning"
xhttp "github.com/minio/minio/internal/http"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/wildcard"
"github.com/minio/pkg/v2/workers"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/wildcard"
"github.com/minio/pkg/v3/workers"
"gopkg.in/yaml.v3"
)
@ -156,7 +155,7 @@ func (ef BatchJobExpireFilter) Matches(obj ObjectInfo, now time.Time) bool {
}
default:
// we should never come here, Validate should have caught this.
logger.LogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
batchLogOnceIf(context.Background(), fmt.Errorf("invalid filter type: %s", ef.Type), ef.Type)
return false
}
@ -321,7 +320,7 @@ func (r BatchJobExpire) Notify(ctx context.Context, body io.Reader) error {
req.Header.Set("Authorization", r.NotificationCfg.Token)
}
clnt := http.Client{Transport: getRemoteInstanceTransport}
clnt := http.Client{Transport: getRemoteInstanceTransport()}
resp, err := clnt.Do(req)
if err != nil {
return err
@ -428,11 +427,12 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
}
stopFn := globalBatchJobsMetrics.trace(batchJobMetricExpire, ri.JobID, attempts)
_, err := api.DeleteObject(ctx, exp.Bucket, encodeDirObject(exp.Name), ObjectOptions{
DeletePrefix: true,
DeletePrefix: true,
DeletePrefixObject: true, // use prefix delete on exact object (this is an optimization to avoid fan-out calls)
})
if err != nil {
stopFn(exp, err)
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", toExpire[i].Bucket, toExpire[i].Name, toExpire[i].VersionID, err, attempts))
} else {
stopFn(exp, err)
success = true
@ -470,19 +470,19 @@ func batchObjsForDelete(ctx context.Context, r *BatchJobExpire, ri *batchJobInfo
for i, err := range errs {
if err != nil {
stopFn(toDelCopy[i], err)
logger.LogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
batchLogIf(ctx, fmt.Errorf("Failed to expire %s/%s versionID=%s due to %v (attempts=%d)", ri.Bucket, toDelCopy[i].ObjectName, toDelCopy[i].VersionID, err, attempts))
failed++
if attempts == retryAttempts { // all retry attempts failed, record failure
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, false)
}
} else {
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, false, attempts)
}
if attempts != retryAttempts {
// retry
toDel = append(toDel, toDelCopy[i])
}
} else {
stopFn(toDelCopy[i], nil)
if oi, ok := oiCache.Get(toDelCopy[i]); ok {
ri.trackCurrentBucketObject(r.Bucket, *oi, true)
ri.trackCurrentBucketObject(r.Bucket, *oi, true, attempts)
}
}
}
@ -537,7 +537,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
ctx, cancel := context.WithCancel(ctx)
defer cancel()
results := make(chan ObjectInfo, workerSize)
results := make(chan itemOrErr[ObjectInfo], workerSize)
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
Marker: lastObject,
LatestOnly: false, // we need to visit all versions of the object to implement purge: retainVersions
@ -556,16 +556,16 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
select {
case <-saveTicker.C:
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
case <-ctx.Done():
// persist in-memory state immediately before exiting due to context cancellation.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
return
case <-saverQuitCh:
// persist in-memory state immediately to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
return
}
}
@ -584,11 +584,18 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
versionsCount int
toDel []expireObjInfo
)
failed := true
for result := range results {
if result.Err != nil {
failed = true
batchLogIf(ctx, result.Err)
continue
}
// Apply filter to find the matching rule to apply expiry
// actions accordingly.
// nolint:gocritic
if result.IsLatest {
if result.Item.IsLatest {
// send down filtered entries to be deleted using
// DeleteObjects method
if len(toDel) > 10 { // batch up to 10 objects/versions to be expired simultaneously.
@ -609,7 +616,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
var match BatchJobExpireFilter
var found bool
for _, rule := range r.Rules {
if rule.Matches(result, now) {
if rule.Matches(result.Item, now) {
match = rule
found = true
break
@ -619,18 +626,18 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
continue
}
prevObj = result
prevObj = result.Item
matchedFilter = match
versionsCount = 1
// Include the latest version
if matchedFilter.Purge.RetainVersions == 0 {
toDel = append(toDel, expireObjInfo{
ObjectInfo: result,
ObjectInfo: result.Item,
ExpireAll: true,
})
continue
}
} else if prevObj.Name == result.Name {
} else if prevObj.Name == result.Item.Name {
if matchedFilter.Purge.RetainVersions == 0 {
continue // including latest version in toDel suffices, skipping other versions
}
@ -643,7 +650,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
continue // retain versions
}
toDel = append(toDel, expireObjInfo{
ObjectInfo: result,
ObjectInfo: result.Item,
})
}
// Send any remaining objects downstream
@ -658,8 +665,8 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
<-expireDoneCh // waits for the expire goroutine to complete
wk.Wait() // waits for all expire workers to retire
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
ri.Complete = !failed && ri.ObjectsFailed == 0
ri.Failed = failed || ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// Close the saverQuitCh - this also triggers saving in-memory state
@ -669,7 +676,7 @@ func (r *BatchJobExpire) Start(ctx context.Context, api ObjectLayer, job BatchJo
// Notify expire jobs final status to the configured endpoint
buf, _ := json.Marshal(ri)
if err := r.Notify(context.Background(), bytes.NewReader(buf)); err != nil {
logger.LogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
batchLogIf(context.Background(), fmt.Errorf("unable to notify %v", err))
}
return nil

View File

@ -48,11 +48,10 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v2/workers"
"github.com/minio/pkg/v3/console"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/policy"
"github.com/minio/pkg/v3/workers"
"gopkg.in/yaml.v3"
)
@ -92,7 +91,7 @@ func notifyEndpoint(ctx context.Context, ri *batchJobInfo, endpoint, token strin
}
req.Header.Set("Content-Type", "application/json")
clnt := http.Client{Transport: getRemoteInstanceTransport}
clnt := http.Client{Transport: getRemoteInstanceTransport()}
resp, err := clnt.Do(req)
if err != nil {
return err
@ -206,7 +205,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a
if aerr == nil {
return
}
logger.LogIf(ctx,
batchLogIf(ctx,
fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
humanize.Ordinal(attempts), res.UploadID, tgtBucket, tgtObject, aerr))
attempts++
@ -351,7 +350,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
c, err := miniogo.New(u.Host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport,
Transport: getRemoteInstanceTransport(),
BucketLookup: lookupStyle(r.Source.Path),
})
if err != nil {
@ -402,7 +401,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
} else {
if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) &&
!isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
}
continue
}
@ -414,7 +413,7 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
} else {
if !isErrMethodNotAllowed(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) &&
!isErrObjectNotFound(ErrorRespToObjectError(err, r.Source.Bucket, obj.Key)) {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
}
continue
}
@ -443,15 +442,15 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
return
}
stopFn(oi, err)
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
success = false
} else {
stopFn(oi, nil)
}
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success)
ri.trackCurrentBucketObject(r.Target.Bucket, oi, success, attempts)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
time.Sleep(wait)
@ -466,10 +465,10 @@ func (r *BatchJobReplicateV1) StartFromSource(ctx context.Context, api ObjectLay
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
if err := r.Notify(ctx, ri); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
}
cancel()
@ -553,7 +552,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
VersionID: entry.VersionID,
})
if err != nil {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
continue
}
@ -572,7 +571,7 @@ func (r BatchJobReplicateV1) writeAsArchive(ctx context.Context, objAPI ObjectLa
opts, err := batchReplicationOpts(ctx, "", gr.ObjInfo)
if err != nil {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
continue
}
@ -691,6 +690,7 @@ type batchJobInfo struct {
StartTime time.Time `json:"startTime" msg:"st"`
LastUpdate time.Time `json:"lastUpdate" msg:"lu"`
RetryAttempts int `json:"retryAttempts" msg:"ra"`
Attempts int `json:"attempts" msg:"at"`
Complete bool `json:"complete" msg:"cmp"`
Failed bool `json:"failed" msg:"fld"`
@ -834,13 +834,15 @@ func (ri *batchJobInfo) clone() *batchJobInfo {
ObjectsFailed: ri.ObjectsFailed,
BytesTransferred: ri.BytesTransferred,
BytesFailed: ri.BytesFailed,
Attempts: ri.Attempts,
}
}
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
func (ri *batchJobInfo) countItem(size int64, dmarker, success bool, attempt int) {
if ri == nil {
return
}
ri.Attempts++
if success {
if dmarker {
ri.DeleteMarkers++
@ -848,7 +850,19 @@ func (ri *batchJobInfo) countItem(size int64, dmarker, success bool) {
ri.Objects++
ri.BytesTransferred += size
}
if attempt > 1 {
if dmarker {
ri.DeleteMarkersFailed--
} else {
ri.ObjectsFailed--
ri.BytesFailed += size
}
}
} else {
if attempt > 1 {
// Only count first attempt
return
}
if dmarker {
ri.DeleteMarkersFailed++
} else {
@ -922,7 +936,7 @@ func (ri *batchJobInfo) trackMultipleObjectVersions(bucket string, info ObjectIn
}
}
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool) {
func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo, success bool, attempt int) {
if ri == nil {
return
}
@ -932,7 +946,7 @@ func (ri *batchJobInfo) trackCurrentBucketObject(bucket string, info ObjectInfo,
ri.Bucket = bucket
ri.Object = info.Name
ri.countItem(info.Size, info.DeleteMarker, success)
ri.countItem(info.Size, info.DeleteMarker, success, attempt)
}
func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInfo) {
@ -946,7 +960,7 @@ func (ri *batchJobInfo) trackCurrentBucketBatch(bucket string, batch []ObjectInf
ri.Bucket = bucket
for i := range batch {
ri.Object = batch[i].Name
ri.countItem(batch[i].Size, batch[i].DeleteMarker, true)
ri.countItem(batch[i].Size, batch[i].DeleteMarker, true, 1)
}
}
@ -1048,7 +1062,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport,
Transport: getRemoteInstanceTransport(),
BucketLookup: lookupStyle(r.Target.Path),
})
if err != nil {
@ -1058,8 +1072,8 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
c.SetAppInfo("minio-"+batchJobPrefix, r.APIVersion+" "+job.ID)
var (
walkCh = make(chan ObjectInfo, 100)
slowCh = make(chan ObjectInfo, 100)
walkCh = make(chan itemOrErr[ObjectInfo], 100)
slowCh = make(chan itemOrErr[ObjectInfo], 100)
)
if !*r.Source.Snowball.Disable && r.Source.Type.isMinio() && r.Target.Type.isMinio() {
@ -1068,11 +1082,11 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
cl, err := miniogo.New(u.Host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport,
Transport: getRemoteInstanceTransport(),
BucketLookup: lookupStyle(r.Target.Path),
})
if err != nil {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
return
}
@ -1083,25 +1097,25 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
writeFn := func(batch []ObjectInfo) {
if len(batch) > 0 {
if err := r.writeAsArchive(ctx, api, cl, batch); err != nil {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
for _, b := range batch {
slowCh <- b
slowCh <- itemOrErr[ObjectInfo]{Item: b}
}
} else {
ri.trackCurrentBucketBatch(r.Source.Bucket, batch)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
}
}
}
for obj := range walkCh {
if obj.DeleteMarker || !obj.VersionPurgeStatus.Empty() || obj.Size >= int64(smallerThan) {
if obj.Item.DeleteMarker || !obj.Item.VersionPurgeStatus.Empty() || obj.Item.Size >= int64(smallerThan) {
slowCh <- obj
continue
}
batch = append(batch, obj)
batch = append(batch, obj.Item)
if len(batch) < *r.Source.Snowball.Batch {
continue
@ -1154,8 +1168,13 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
prevObj := ""
skipReplicate := false
for result := range slowCh {
result := result
for res := range slowCh {
if res.Err != nil {
ri.Failed = true
batchLogIf(ctx, res.Err)
continue
}
result := res.Item
if result.Name != prevObj {
prevObj = result.Name
skipReplicate = result.DeleteMarker && s3Type
@ -1179,15 +1198,15 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
return
}
stopFn(result, err)
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
success = false
} else {
stopFn(result, nil)
}
ri.trackCurrentBucketObject(r.Source.Bucket, result, success)
ri.trackCurrentBucketObject(r.Source.Bucket, result, success, attempts)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if wait := globalBatchConfig.ReplicationWait(); wait > 0 {
time.Sleep(wait)
@ -1202,10 +1221,10 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
if err := r.Notify(ctx, ri); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
}
cancel()
@ -1232,6 +1251,7 @@ type batchReplicationJobError struct {
Code string
Description string
HTTPStatusCode int
ObjectSize int64
}
func (e batchReplicationJobError) Error() string {
@ -1248,9 +1268,18 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
return errInvalidArgument
}
if r.Source.Bucket == "" {
if r.Source.Endpoint != "" && r.Target.Endpoint != "" {
return errInvalidArgument
}
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
return errInvalidArgument
}
if r.Source.Bucket == "" || r.Target.Bucket == "" {
return errInvalidArgument
}
var isRemoteToLocal bool
localBkt := r.Source.Bucket
if r.Source.Endpoint != "" {
@ -1275,9 +1304,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
if err := r.Source.Snowball.Validate(); err != nil {
return err
}
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
return errInvalidArgument
}
if !r.Source.Creds.Empty() {
if err := r.Source.Creds.Validate(); err != nil {
@ -1299,9 +1325,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
if r.Target.Endpoint != "" && !r.Target.Type.isMinio() && !r.Target.ValidPath() {
return errInvalidArgument
}
if r.Target.Bucket == "" {
return errInvalidArgument
}
if !r.Target.Creds.Empty() {
if err := r.Target.Creds.Validate(); err != nil {
@ -1309,10 +1332,6 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
}
}
if r.Source.Creds.Empty() && r.Target.Creds.Empty() {
return errInvalidArgument
}
if err := r.Target.Type.Validate(); err != nil {
return err
}
@ -1354,7 +1373,7 @@ func (r *BatchJobReplicateV1) Validate(ctx context.Context, job BatchJobRequest,
c, err := miniogo.NewCore(u.Host, &miniogo.Options{
Creds: credentials.NewStaticV4(cred.AccessKey, cred.SecretKey, cred.SessionToken),
Secure: u.Scheme == "https",
Transport: getRemoteInstanceTransport,
Transport: getRemoteInstanceTransport(),
BucketLookup: lookupStyle(pathStyle),
})
if err != nil {
@ -1485,7 +1504,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
jobType = string(madmin.BatchJobReplicate)
}
resultCh := make(chan ObjectInfo)
resultCh := make(chan itemOrErr[ObjectInfo])
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -1497,10 +1516,14 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
listResult := madmin.ListBatchJobsResult{}
for result := range resultCh {
if result.Err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, result.Err), r.URL)
return
}
req := &BatchJobRequest{}
if err := req.load(ctx, objectAPI, result.Name); err != nil {
if err := req.load(ctx, objectAPI, result.Item.Name); err != nil {
if !errors.Is(err, errNoSuchJob) {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
}
continue
}
@ -1516,7 +1539,7 @@ func (a adminAPIHandlers) ListBatchJobs(w http.ResponseWriter, r *http.Request)
}
}
logger.LogIf(ctx, json.NewEncoder(w).Encode(&listResult))
batchLogIf(ctx, json.NewEncoder(w).Encode(&listResult))
}
var errNoSuchJob = errors.New("no such job")
@ -1539,7 +1562,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
req := &BatchJobRequest{}
if err := req.load(ctx, objectAPI, pathJoin(batchJobPrefix, jobID)); err != nil {
if !errors.Is(err, errNoSuchJob) {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
}
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
@ -1548,7 +1571,7 @@ func (a adminAPIHandlers) DescribeBatchJob(w http.ResponseWriter, r *http.Reques
buf, err := yaml.Marshal(req)
if err != nil {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
}
@ -1703,21 +1726,25 @@ func newBatchJobPool(ctx context.Context, o ObjectLayer, workers int) *BatchJobP
}
func (j *BatchJobPool) resume() {
results := make(chan ObjectInfo, 100)
results := make(chan itemOrErr[ObjectInfo], 100)
ctx, cancel := context.WithCancel(j.ctx)
defer cancel()
if err := j.objLayer.Walk(ctx, minioMetaBucket, batchJobPrefix, results, WalkOptions{}); err != nil {
logger.LogIf(j.ctx, err)
batchLogIf(j.ctx, err)
return
}
for result := range results {
if result.Err != nil {
batchLogIf(j.ctx, result.Err)
continue
}
// ignore batch-replicate.bin and batch-rotate.bin entries
if strings.HasSuffix(result.Name, slashSeparator) {
if strings.HasSuffix(result.Item.Name, slashSeparator) {
continue
}
req := &BatchJobRequest{}
if err := req.load(ctx, j.objLayer, result.Name); err != nil {
logger.LogIf(ctx, err)
if err := req.load(ctx, j.objLayer, result.Item.Name); err != nil {
batchLogIf(ctx, err)
continue
}
_, nodeIdx := parseRequestToken(req.ID)
@ -1726,7 +1753,7 @@ func (j *BatchJobPool) resume() {
continue
}
if err := j.queueJob(req); err != nil {
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
continue
}
}
@ -1750,7 +1777,7 @@ func (j *BatchJobPool) AddWorker() {
if job.Replicate.RemoteToLocal() {
if err := job.Replicate.StartFromSource(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err)
batchLogIf(j.ctx, err)
j.canceler(job.ID, false)
continue
}
@ -1759,7 +1786,7 @@ func (j *BatchJobPool) AddWorker() {
} else {
if err := job.Replicate.Start(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err)
batchLogIf(j.ctx, err)
j.canceler(job.ID, false)
continue
}
@ -1769,14 +1796,14 @@ func (j *BatchJobPool) AddWorker() {
case job.KeyRotate != nil:
if err := job.KeyRotate.Start(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err)
batchLogIf(j.ctx, err)
continue
}
}
case job.Expire != nil:
if err := job.Expire.Start(job.ctx, j.objLayer, *job); err != nil {
if !isErrBucketNotFound(err) {
logger.LogIf(j.ctx, err)
batchLogIf(j.ctx, err)
continue
}
}

View File

@ -419,6 +419,12 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "at":
z.Attempts, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
case "cmp":
z.Complete, err = dc.ReadBool()
if err != nil {
@ -492,9 +498,9 @@ func (z *batchJobInfo) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable
func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 16
// map header, size 17
// write "v"
err = en.Append(0xde, 0x0, 0x10, 0xa1, 0x76)
err = en.Append(0xde, 0x0, 0x11, 0xa1, 0x76)
if err != nil {
return
}
@ -553,6 +559,16 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "RetryAttempts")
return
}
// write "at"
err = en.Append(0xa2, 0x61, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.Attempts)
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
// write "cmp"
err = en.Append(0xa3, 0x63, 0x6d, 0x70)
if err != nil {
@ -659,9 +675,9 @@ func (z *batchJobInfo) EncodeMsg(en *msgp.Writer) (err error) {
// MarshalMsg implements msgp.Marshaler
func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 16
// map header, size 17
// string "v"
o = append(o, 0xde, 0x0, 0x10, 0xa1, 0x76)
o = append(o, 0xde, 0x0, 0x11, 0xa1, 0x76)
o = msgp.AppendInt(o, z.Version)
// string "jid"
o = append(o, 0xa3, 0x6a, 0x69, 0x64)
@ -678,6 +694,9 @@ func (z *batchJobInfo) MarshalMsg(b []byte) (o []byte, err error) {
// string "ra"
o = append(o, 0xa2, 0x72, 0x61)
o = msgp.AppendInt(o, z.RetryAttempts)
// string "at"
o = append(o, 0xa2, 0x61, 0x74)
o = msgp.AppendInt(o, z.Attempts)
// string "cmp"
o = append(o, 0xa3, 0x63, 0x6d, 0x70)
o = msgp.AppendBool(o, z.Complete)
@ -765,6 +784,12 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "RetryAttempts")
return
}
case "at":
z.Attempts, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Attempts")
return
}
case "cmp":
z.Complete, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
@ -839,6 +864,6 @@ func (z *batchJobInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *batchJobInfo) Msgsize() (s int) {
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
s = 3 + 2 + msgp.IntSize + 4 + msgp.StringPrefixSize + len(z.JobID) + 3 + msgp.StringPrefixSize + len(z.JobType) + 3 + msgp.TimeSize + 3 + msgp.TimeSize + 3 + msgp.IntSize + 3 + msgp.IntSize + 4 + msgp.BoolSize + 4 + msgp.BoolSize + 5 + msgp.StringPrefixSize + len(z.Bucket) + 5 + msgp.StringPrefixSize + len(z.Object) + 3 + msgp.Int64Size + 3 + msgp.Int64Size + 4 + msgp.Int64Size + 4 + msgp.Int64Size + 3 + msgp.Int64Size + 3 + msgp.Int64Size
return
}

View File

@ -23,7 +23,7 @@ import (
"time"
"github.com/dustin/go-humanize"
"github.com/minio/pkg/v2/wildcard"
"github.com/minio/pkg/v3/wildcard"
"gopkg.in/yaml.v3"
)

View File

@ -33,9 +33,8 @@ import (
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v2/workers"
"github.com/minio/pkg/v3/env"
"github.com/minio/pkg/v3/workers"
)
// keyrotate:
@ -96,6 +95,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
if e.Type == ssekms && spaces {
return crypto.ErrInvalidEncryptionKeyID
}
if e.Type == ssekms && GlobalKMS != nil {
ctx := kms.Context{}
if e.Context != "" {
@ -114,7 +114,7 @@ func (e BatchJobKeyRotateEncryption) Validate() error {
e.kmsContext[k] = v
}
ctx["MinIO batch API"] = "batchrotate" // Context for a test key operation
if _, err := GlobalKMS.GenerateKey(GlobalContext, e.Key, ctx); err != nil {
if _, err := GlobalKMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{Name: e.Key, AssociatedData: ctx}); err != nil {
return err
}
}
@ -357,7 +357,7 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
retryAttempts := ri.RetryAttempts
ctx, cancel := context.WithCancel(ctx)
results := make(chan ObjectInfo, 100)
results := make(chan itemOrErr[ObjectInfo], 100)
if err := api.Walk(ctx, r.Bucket, r.Prefix, results, WalkOptions{
Marker: lastObject,
Filter: selectObj,
@ -366,9 +366,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
// Do not need to retry if we can't list objects on source.
return err
}
for result := range results {
result := result
failed := false
for res := range results {
if res.Err != nil {
failed = true
batchLogIf(ctx, res.Err)
break
}
result := res.Item
sseKMS := crypto.S3KMS.IsEncrypted(result.UserDefined)
sseS3 := crypto.S3.IsEncrypted(result.UserDefined)
if !sseKMS && !sseS3 { // neither sse-s3 nor sse-kms disallowed
@ -378,21 +383,19 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
go func() {
defer wk.Give()
for attempts := 1; attempts <= retryAttempts; attempts++ {
attempts := attempts
stopFn := globalBatchJobsMetrics.trace(batchJobMetricKeyRotation, job.ID, attempts)
success := true
if err := r.KeyRotate(ctx, api, result); err != nil {
stopFn(result, err)
logger.LogIf(ctx, err)
batchLogIf(ctx, err)
success = false
} else {
stopFn(result, nil)
}
ri.trackCurrentBucketObject(r.Bucket, result, success)
ri.RetryAttempts = attempts
ri.trackCurrentBucketObject(r.Bucket, result, success, attempts)
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk after every 10secs.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 10*time.Second, job))
if success {
break
}
@ -408,14 +411,14 @@ func (r *BatchJobKeyRotateV1) Start(ctx context.Context, api ObjectLayer, job Ba
}
wk.Wait()
ri.Complete = ri.ObjectsFailed == 0
ri.Failed = ri.ObjectsFailed > 0
ri.Complete = !failed && ri.ObjectsFailed == 0
ri.Failed = failed || ri.ObjectsFailed > 0
globalBatchJobsMetrics.save(job.ID, ri)
// persist in-memory state to disk.
logger.LogIf(ctx, ri.updateAfter(ctx, api, 0, job))
batchLogIf(ctx, ri.updateAfter(ctx, api, 0, job))
if err := r.Notify(ctx, ri); err != nil {
logger.LogIf(ctx, fmt.Errorf("unable to notify %v", err))
batchLogIf(ctx, fmt.Errorf("unable to notify %v", err))
}
cancel()
@ -476,8 +479,5 @@ func (r *BatchJobKeyRotateV1) Validate(ctx context.Context, job BatchJobRequest,
}
}
if err := r.Flags.Retry.Validate(); err != nil {
return err
}
return nil
return r.Flags.Retry.Validate()
}

View File

@ -26,15 +26,17 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/ringbuffer"
)
// Calculates bitrot in chunks and writes the hash into the stream.
type streamingBitrotWriter struct {
iow io.WriteCloser
closeWithErr func(err error) error
closeWithErr func(err error)
h hash.Hash
shardSize int64
canClose *sync.WaitGroup
byteBuf []byte
}
func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
@ -62,7 +64,10 @@ func (b *streamingBitrotWriter) Write(p []byte) (int, error) {
}
func (b *streamingBitrotWriter) Close() error {
// Close the underlying writer.
// This will also flush the ring buffer if used.
err := b.iow.Close()
// Wait for all data to be written before returning else it causes race conditions.
// Race condition is because of io.PipeWriter implementation. i.e consider the following
// sequent of operations:
@ -73,29 +78,34 @@ func (b *streamingBitrotWriter) Close() error {
if b.canClose != nil {
b.canClose.Wait()
}
// Recycle the buffer.
if b.byteBuf != nil {
globalBytePoolCap.Load().Put(b.byteBuf)
b.byteBuf = nil
}
return err
}
// newStreamingBitrotWriterBuffer returns streaming bitrot writer implementation.
// The output is written to the supplied writer w.
func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize int64) io.Writer {
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) error {
// Similar to CloseWithError on pipes we always return nil.
return nil
}}
return &streamingBitrotWriter{iow: ioutil.NopCloser(w), h: algo.New(), shardSize: shardSize, canClose: nil, closeWithErr: func(err error) {}}
}
// Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
r, w := io.Pipe()
h := algo.New()
buf := globalBytePoolCap.Load().Get()
rb := ringbuffer.NewBuffer(buf[:cap(buf)]).SetBlocking(true)
bw := &streamingBitrotWriter{
iow: ioutil.NewDeadlineWriter(w, globalDriveConfig.GetMaxTimeout()),
closeWithErr: w.CloseWithError,
iow: ioutil.NewDeadlineWriter(rb.WriteCloser(), globalDriveConfig.GetMaxTimeout()),
closeWithErr: rb.CloseWithError,
h: h,
shardSize: shardSize,
canClose: &sync.WaitGroup{},
byteBuf: buf,
}
bw.canClose.Add(1)
go func() {
@ -106,7 +116,7 @@ func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath stri
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize = bitrotSumsTotalSize + length
}
r.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, r))
rb.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, rb))
}()
return bw
}

View File

@ -30,7 +30,7 @@ import (
"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio/internal/grid"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
// To abstract a node over network.
@ -198,7 +198,7 @@ func verifyServerSystemConfig(ctx context.Context, endpointServerPools EndpointS
if err != nil {
bootstrapTraceMsg(fmt.Sprintf("clnt.Verify: %v, endpoint: %s", err, clnt))
if !isNetworkError(err) {
logger.LogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
bootLogOnceIf(context.Background(), fmt.Errorf("%s has incorrect configuration: %w", clnt, err), "incorrect_"+clnt.String())
incorrectConfigs = append(incorrectConfigs, fmt.Errorf("%s has incorrect configuration: %w", clnt, err))
} else {
offlineEndpoints = append(offlineEndpoints, fmt.Errorf("%s is unreachable: %w", clnt, err))

View File

@ -30,7 +30,7 @@ import (
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@ -85,7 +85,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
kmsKey := encConfig.KeyID()
if kmsKey != "" {
kmsContext := kms.Context{"MinIO admin API": "ServerInfoHandler"} // Context for a test key operation
_, err := GlobalKMS.GenerateKey(ctx, kmsKey, kmsContext)
_, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{Name: kmsKey, AssociatedData: kmsContext})
if err != nil {
if errors.Is(err, kes.ErrKeyNotFound) {
writeErrorResponse(ctx, w, toAPIError(ctx, errKMSKeyNotFound), r.URL)
@ -114,7 +114,7 @@ func (api objectAPIHandlers) PutBucketEncryptionHandler(w http.ResponseWriter, r
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeSSEConfig,
Bucket: bucket,
SSEConfig: &cfgStr,
@ -203,7 +203,7 @@ func (api objectAPIHandlers) DeleteBucketEncryptionHandler(w http.ResponseWriter
}
// Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeSSEConfig,
Bucket: bucket,
SSEConfig: nil,

View File

@ -61,8 +61,8 @@ import (
"github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v2/sync/errgroup"
"github.com/minio/pkg/v3/policy"
"github.com/minio/pkg/v3/sync/errgroup"
)
const (
@ -72,6 +72,8 @@ const (
xMinIOErrCodeHeader = "x-minio-error-code"
xMinIOErrDescHeader = "x-minio-error-desc"
postPolicyBucketTagging = "tagging"
)
// Check if there are buckets on server without corresponding entry in etcd backend and
@ -98,7 +100,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// Get buckets in the DNS
dnsBuckets, err := globalDNSConfig.List()
if err != nil && !IsErrIgnored(err, dns.ErrNoEntriesFound, dns.ErrNotImplemented, dns.ErrDomainMissing) {
logger.LogIf(GlobalContext, err)
dnsLogIf(GlobalContext, err)
return
}
@ -160,13 +162,13 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
ctx := GlobalContext
for _, err := range g.Wait() {
if err != nil {
logger.LogIf(ctx, err)
dnsLogIf(ctx, err)
return
}
}
for _, bucket := range bucketsInConflict.ToSlice() {
logger.LogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
dnsLogIf(ctx, fmt.Errorf("Unable to add bucket DNS entry for bucket %s, an entry exists for the same bucket by a different tenant. This local bucket will be ignored. Bucket names are globally unique in federated deployments. Use path style requests on following addresses '%v' to access this bucket", bucket, globalDomainIPs.ToSlice()))
}
var wg sync.WaitGroup
@ -187,7 +189,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
// We go to here, so we know the bucket no longer exists,
// but is registered in DNS to this server
if err := globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
dnsLogIf(GlobalContext, fmt.Errorf("Failed to remove DNS entry for %s due to %w",
bucket, err))
}
}(bucket)
@ -227,7 +229,7 @@ func (api objectAPIHandlers) GetBucketLocationHandler(w http.ResponseWriter, r *
// Generate response.
encodedSuccessResponse := encodeResponse(LocationResponse{})
// Get current region.
region := globalSite.Region
region := globalSite.Region()
if region != globalMinioDefaultRegion {
encodedSuccessResponse = encodeResponse(LocationResponse{
Location: region,
@ -790,7 +792,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
// check if client is attempting to create more buckets, complain about it.
if currBuckets := globalBucketMetadataSys.Count(); currBuckets+1 > maxBuckets {
logger.LogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets))
internalLogIf(ctx, fmt.Errorf("Please avoid creating more buckets %d beyond recommended %d", currBuckets+1, maxBuckets), logger.WarningKind)
}
opts := MakeBucketOptions{
@ -871,7 +873,7 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
globalNotificationSys.LoadBucketMetadata(GlobalContext, bucket)
// Call site replication hook
logger.LogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
replLogIf(ctx, globalSiteReplicationSys.MakeBucketHook(ctx, bucket, opts))
// Make sure to add Location information here only for bucket
w.Header().Set(xhttp.Location, pathJoin(SlashSeparator, bucket))
@ -1218,11 +1220,6 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return
}
if crypto.SSEC.IsRequested(r.Header) && isReplicationEnabled(ctx, bucket) {
writeErrorResponse(ctx, w, toAPIError(ctx, errInvalidEncryptionParametersSSEC), r.URL)
return
}
var (
reader io.Reader
keyID string
@ -1420,6 +1417,19 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
return
}
if formValues.Get(postPolicyBucketTagging) != "" {
tags, err := tags.ParseObjectXML(strings.NewReader(formValues.Get(postPolicyBucketTagging)))
if err != nil {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedPOSTRequest), r.URL)
return
}
tagsStr := tags.String()
opts.UserDefined[xhttp.AmzObjectTagging] = tagsStr
} else {
// avoid user set an invalid tag using `X-Amz-Tagging`
delete(opts.UserDefined, xhttp.AmzObjectTagging)
}
objInfo, err := objectAPI.PutObject(ctx, bucket, object, pReader, opts)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
@ -1666,7 +1676,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
case rcfg.HasActiveRules("", true):
case rcfg != nil && rcfg.HasActiveRules("", true):
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL)
return
}
@ -1698,7 +1708,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
if globalDNSConfig != nil {
if err := globalDNSConfig.Delete(bucket); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
dnsLogIf(ctx, fmt.Errorf("Unable to delete bucket DNS entry %w, please delete it manually, bucket on MinIO no longer exists", err))
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
@ -1708,7 +1718,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
globalReplicationPool.deleteResyncMetadata(ctx, bucket)
// Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
replLogIf(ctx, globalSiteReplicationSys.DeleteBucketHook(ctx, bucket, forceDelete))
// Write success response.
writeSuccessNoContent(w)
@ -1781,7 +1791,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeObjectLockConfig,
Bucket: bucket,
ObjectLockConfig: &cfgStr,
@ -1885,7 +1895,7 @@ func (api objectAPIHandlers) PutBucketTaggingHandler(w http.ResponseWriter, r *h
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeTags,
Bucket: bucket,
Tags: &cfgStr,
@ -1961,7 +1971,7 @@ func (api objectAPIHandlers) DeleteBucketTaggingHandler(w http.ResponseWriter, r
return
}
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeTags,
Bucket: bucket,
UpdatedAt: updatedAt,

View File

@ -28,7 +28,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@ -64,7 +64,8 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
}
// Check if bucket exists.
if _, err := objAPI.GetBucketInfo(ctx, bucket, BucketOptions{}); err != nil {
rcfg, err := globalBucketObjectLockSys.Get(bucket)
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
@ -76,7 +77,7 @@ func (api objectAPIHandlers) PutBucketLifecycleHandler(w http.ResponseWriter, r
}
// Validate the received bucket policy document
if err = bucketLifecycle.Validate(); err != nil {
if err = bucketLifecycle.Validate(rcfg); err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}

View File

@ -40,7 +40,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/s3select"
xnet "github.com/minio/pkg/v2/net"
xnet "github.com/minio/pkg/v3/net"
"github.com/zeebo/xxh3"
)
@ -277,6 +277,10 @@ func (es *expiryState) getWorkerCh(h uint64) chan<- expiryOp {
}
func (es *expiryState) ResizeWorkers(n int) {
if n == 0 {
n = 100
}
// Lock to avoid multiple resizes to happen at the same time.
es.mu.Lock()
defer es.mu.Unlock()
@ -336,7 +340,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
case newerNoncurrentTask:
deleteObjectVersions(es.ctx, es.objAPI, v.bucket, v.versions, v.event)
case jentry:
logger.LogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName))
transitionLogIf(es.ctx, deleteObjectFromRemoteTier(es.ctx, v.ObjName, v.VersionID, v.TierName))
case freeVersionTask:
oi := v.ObjectInfo
traceFn := globalLifecycleSys.trace(oi)
@ -355,7 +359,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
// Remove the remote object
err := deleteObjectFromRemoteTier(es.ctx, oi.TransitionedObject.Name, oi.TransitionedObject.VersionID, oi.TransitionedObject.Tier)
if ignoreNotFoundErr(err) != nil {
logger.LogIf(es.ctx, err)
transitionLogIf(es.ctx, err)
return
}
@ -368,10 +372,10 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
auditLogLifecycle(es.ctx, oi, ILMFreeVersionDelete, nil, traceFn)
}
if ignoreNotFoundErr(err) != nil {
logger.LogIf(es.ctx, err)
transitionLogIf(es.ctx, err)
}
default:
logger.LogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v))
bugLogIf(es.ctx, fmt.Errorf("Invalid work type - %v", v))
}
}
}
@ -486,7 +490,7 @@ func (t *transitionState) worker(objectAPI ObjectLayer) {
if err := transitionObject(t.ctx, objectAPI, task.objInfo, newLifecycleAuditEvent(task.src, task.event)); err != nil {
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) && !xnet.IsNetworkOrHostDown(err, false) {
if !strings.Contains(err.Error(), "use of closed network connection") {
logger.LogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w",
transitionLogIf(t.ctx, fmt.Errorf("Transition to %s failed for %s/%s version:%s with %w",
task.event.StorageClass, task.objInfo.Bucket, task.objInfo.Name, task.objInfo.VersionID, err))
}
}
@ -538,6 +542,10 @@ func (t *transitionState) UpdateWorkers(n int) {
}
func (t *transitionState) updateWorkers(n int) {
if n == 0 {
n = 100
}
for t.numWorkers < n {
go t.worker(t.objAPI)
t.numWorkers++
@ -614,7 +622,7 @@ func expireTransitionedObject(ctx context.Context, objectAPI ObjectLayer, oi *Ob
// remote object
opts.SkipFreeVersion = true
} else {
logger.LogIf(ctx, err)
transitionLogIf(ctx, err)
}
// Now, delete object from hot-tier namespace
@ -663,11 +671,12 @@ func genTransitionObjName(bucket string) (string, error) {
// is moved to the transition tier. Note that in the case of encrypted objects, entire encrypted stream is moved
// to the transition tier without decrypting or re-encrypting.
func transitionObject(ctx context.Context, objectAPI ObjectLayer, oi ObjectInfo, lae lcAuditEvent) (err error) {
timeILM := globalScannerMetrics.timeILM(lae.Action)
defer func() {
if err != nil {
return
}
globalScannerMetrics.timeILM(lae.Action)(1)
timeILM(1)
}()
opts := ObjectOptions{
@ -721,9 +730,9 @@ func auditTierActions(ctx context.Context, tier string, bytes int64) func(err er
// getTransitionedObjectReader returns a reader from the transitioned tier.
func getTransitionedObjectReader(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, oi ObjectInfo, opts ObjectOptions) (gr *GetObjectReader, err error) {
tgtClient, err := globalTierConfigMgr.getDriver(oi.TransitionedObject.Tier)
tgtClient, err := globalTierConfigMgr.getDriver(ctx, oi.TransitionedObject.Tier)
if err != nil {
return nil, fmt.Errorf("transition storage class not configured")
return nil, fmt.Errorf("transition storage class not configured: %w", err)
}
fn, off, length, err := NewGetObjectReader(rs, oi, opts)
@ -879,7 +888,7 @@ func postRestoreOpts(ctx context.Context, r *http.Request, bucket, object string
if vid != "" && vid != nullVersionID {
_, err := uuid.Parse(vid)
if err != nil {
logger.LogIf(ctx, err)
s3LogIf(ctx, err)
return opts, InvalidVersionID{
Bucket: bucket,
Object: object,

View File

@ -26,7 +26,7 @@ import (
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// Validate all the ListObjects query arguments, returns an APIErrorCode
@ -124,7 +124,7 @@ func (api objectAPIHandlers) listObjectVersionsHandler(w http.ResponseWriter, r
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
response := generateListVersionsResponse(bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
response := generateListVersionsResponse(ctx, bucket, prefix, marker, versionIDMarker, delimiter, encodingType, maxkeys, listObjectVersionsInfo, checkObjMeta)
// Write success response.
writeSuccessResponseXML(w, encodeResponseList(response))
@ -219,7 +219,7 @@ func (api objectAPIHandlers) listObjectsV2Handler(ctx context.Context, w http.Re
return
}
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
response := generateListObjectsV2Response(ctx, bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, checkObjMeta)
@ -318,7 +318,7 @@ func (api objectAPIHandlers) ListObjectsV1Handler(w http.ResponseWriter, r *http
return
}
response := generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
response := generateListObjectsV1Response(ctx, bucket, prefix, marker, delimiter, encodingType, maxKeys, listObjectsInfo)
// Write success response.
writeSuccessResponseXML(w, encodeResponseList(response))

View File

@ -37,8 +37,8 @@ import (
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v2/sync/errgroup"
"github.com/minio/pkg/v3/policy"
"github.com/minio/pkg/v3/sync/errgroup"
)
// BucketMetadataSys captures all bucket metadata for a given cluster.
@ -46,6 +46,7 @@ type BucketMetadataSys struct {
objAPI ObjectLayer
sync.RWMutex
initialized bool
metadataMap map[string]BucketMetadata
}
@ -433,6 +434,8 @@ func (sys *BucketMetadataSys) GetConfigFromDisk(ctx context.Context, bucket stri
return loadBucketMetadata(ctx, objAPI, bucket)
}
var errBucketMetadataNotInitialized = errors.New("bucket metadata not initialized yet")
// GetConfig returns a specific configuration from the bucket metadata.
// The returned object may not be modified.
// reloaded will be true if metadata refreshed from disk
@ -454,6 +457,10 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (met
}
meta, err = loadBucketMetadata(ctx, objAPI, bucket)
if err != nil {
if !sys.Initialized() {
// bucket metadata not yet initialized
return newBucketMetadata(bucket), reloaded, errBucketMetadataNotInitialized
}
return meta, reloaded, err
}
sys.Lock()
@ -498,9 +505,10 @@ func (sys *BucketMetadataSys) concurrentLoad(ctx context.Context, buckets []Buck
}
errs := g.Wait()
for _, err := range errs {
for index, err := range errs {
if err != nil {
logger.LogIf(ctx, err)
internalLogOnceIf(ctx, fmt.Errorf("Unable to load bucket metadata, will be retried: %w", err),
"load-bucket-metadata-"+buckets[index].Name, logger.WarningKind)
}
}
@ -542,7 +550,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
case <-t.C:
buckets, err := sys.objAPI.ListBuckets(ctx, BucketOptions{})
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
break
}
@ -560,7 +568,7 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
meta, err := loadBucketMetadata(ctx, sys.objAPI, buckets[i].Name)
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
wait() // wait to proceed to next entry.
continue
}
@ -583,6 +591,14 @@ func (sys *BucketMetadataSys) refreshBucketsMetadataLoop(ctx context.Context, fa
}
}
// Initialized indicates if bucket metadata sys is initialized atleast once.
func (sys *BucketMetadataSys) Initialized() bool {
sys.RLock()
defer sys.RUnlock()
return sys.initialized
}
// Loads bucket metadata for all buckets into BucketMetadataSys.
func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
count := 100 // load 100 bucket metadata at a time.
@ -596,6 +612,10 @@ func (sys *BucketMetadataSys) init(ctx context.Context, buckets []BucketInfo) {
buckets = buckets[count:]
}
sys.Lock()
sys.initialized = true
sys.Unlock()
if globalIsDistErasure {
go sys.refreshBucketsMetadataLoop(ctx, failedBuckets)
}

View File

@ -41,7 +41,7 @@ import (
"github.com/minio/minio/internal/fips"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
"github.com/minio/sio"
)
@ -145,7 +145,7 @@ func (b *BucketMetadata) SetCreatedAt(createdAt time.Time) {
// If an error is returned the returned metadata will be default initialized.
func readBucketMetadata(ctx context.Context, api ObjectLayer, name string) (BucketMetadata, error) {
if name == "" {
logger.LogIf(ctx, errors.New("bucket name cannot be empty"))
internalLogIf(ctx, errors.New("bucket name cannot be empty"), logger.WarningKind)
return BucketMetadata{}, errInvalidArgument
}
b := newBucketMetadata(name)
@ -400,7 +400,7 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj
for legacyFile := range configs {
configFile := path.Join(bucketMetaPrefix, b.Name, legacyFile)
if err := deleteConfig(ctx, objectAPI, configFile); err != nil && !errors.Is(err, errConfigNotFound) {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
}
}
@ -490,7 +490,7 @@ func encryptBucketMetadata(ctx context.Context, bucket string, input []byte, kms
}
metadata := make(map[string]string)
key, err := GlobalKMS.GenerateKey(ctx, "", kmsContext)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{AssociatedData: kmsContext})
if err != nil {
return
}
@ -519,7 +519,11 @@ func decryptBucketMetadata(input []byte, bucket string, meta map[string]string,
if err != nil {
return nil, err
}
extKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kmsContext)
extKey, err := GlobalKMS.Decrypt(context.TODO(), &kms.DecryptRequest{
Name: keyID,
Ciphertext: kmsKey,
AssociatedData: kmsContext,
})
if err != nil {
return nil, err
}

View File

@ -26,7 +26,7 @@ import (
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@ -66,8 +66,9 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return
}
config.SetRegion(globalSite.Region)
if err = config.Validate(globalSite.Region, globalEventNotifier.targetList); err != nil {
region := globalSite.Region()
config.SetRegion(region)
if err = config.Validate(region, globalEventNotifier.targetList); err != nil {
arnErr, ok := err.(*event.ErrARNNotFound)
if ok {
for i, queue := range config.QueueList {
@ -134,7 +135,7 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
return
}
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region, globalEventNotifier.targetList)
config, err := event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalSite.Region(), globalEventNotifier.targetList)
if err != nil {
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
if event.IsEventError(err) {

View File

@ -28,7 +28,7 @@ import (
"github.com/minio/minio/internal/bucket/replication"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// BucketObjectLockSys - map of bucket and retention configuration.
@ -44,7 +44,6 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention,
if errors.Is(err, errInvalidArgument) {
return r, err
}
logger.CriticalIf(context.Background(), err)
return r, err
}
return config.ToRetention(), nil
@ -66,7 +65,7 @@ func enforceRetentionForDeletion(ctx context.Context, objInfo ObjectInfo) (locke
if ret.Mode.Valid() && (ret.Mode == objectlock.RetCompliance || ret.Mode == objectlock.RetGovernance) {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
return true
}
if ret.RetainUntilDate.After(t) {
@ -114,7 +113,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
// duration of the retention period.
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
return ObjectLocked{}
}
@ -140,7 +139,7 @@ func enforceRetentionBypassForDelete(ctx context.Context, r *http.Request, bucke
if !byPassSet {
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
return ObjectLocked{}
}
@ -170,7 +169,7 @@ func enforceRetentionBypassForPut(ctx context.Context, r *http.Request, oi Objec
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
return ObjectLocked{Bucket: oi.Bucket, Object: oi.Name, VersionID: oi.VersionID}
}
@ -277,7 +276,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
r := objectlock.GetObjectRetentionMeta(objInfo.UserDefined)
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
return mode, retainDate, legalHold, ErrObjectLocked
}
if r.Mode == objectlock.RetCompliance && r.RetainUntilDate.After(t) {
@ -324,7 +323,7 @@ func checkPutObjectLockAllowed(ctx context.Context, rq *http.Request, bucket, ob
t, err := objectlock.UTCNowNTP()
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err, logger.WarningKind)
return mode, retainDate, legalHold, ErrObjectLocked
}

View File

@ -27,7 +27,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@ -113,7 +113,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
}
// Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy,
Bucket: bucket,
Policy: bucketPolicyBytes,
@ -157,7 +157,7 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r
}
// Call site replication hook.
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypePolicy,
Bucket: bucket,
UpdatedAt: updatedAt,

View File

@ -29,8 +29,8 @@ import (
"testing"
"github.com/minio/minio/internal/auth"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v2/policy/condition"
"github.com/minio/pkg/v3/policy"
"github.com/minio/pkg/v3/policy/condition"
)
func getAnonReadOnlyBucketPolicy(bucketName string) *policy.BucketPolicy {

View File

@ -32,7 +32,7 @@ import (
"github.com/minio/minio/internal/handlers"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// PolicySys - policy subsystem.
@ -53,7 +53,7 @@ func (sys *PolicySys) IsAllowed(args policy.BucketPolicyArgs) bool {
// Log unhandled errors.
if _, ok := err.(BucketPolicyNotFound); !ok {
logger.LogIf(GlobalContext, err)
internalLogIf(GlobalContext, err, logger.WarningKind)
}
// As policy is not available for given bucket name, returns IsOwner i.e.

View File

@ -49,8 +49,8 @@ var bucketStorageCache = cachevalue.New[DataUsageInfo]()
func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
bucketStorageCache.InitOnce(10*time.Second,
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
func() (DataUsageInfo, error) {
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
func(ctx context.Context) (DataUsageInfo, error) {
ctx, done := context.WithTimeout(ctx, 2*time.Second)
defer done()
return loadDataUsageFromBackend(ctx, objAPI)
@ -59,14 +59,14 @@ func (sys *BucketQuotaSys) Init(objAPI ObjectLayer) {
}
// GetBucketUsageInfo return bucket usage info for a given bucket
func (sys *BucketQuotaSys) GetBucketUsageInfo(bucket string) (BucketUsageInfo, error) {
dui, err := bucketStorageCache.Get()
func (sys *BucketQuotaSys) GetBucketUsageInfo(ctx context.Context, bucket string) (BucketUsageInfo, error) {
dui, err := bucketStorageCache.GetWithCtx(ctx)
timedout := OperationTimedOut{}
if err != nil && !errors.Is(err, context.DeadlineExceeded) && !errors.As(err, &timedout) {
if len(dui.BucketsUsage) > 0 {
logger.LogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket)
internalLogOnceIf(GlobalContext, fmt.Errorf("unable to retrieve usage information for bucket: %s, relying on older value cached in-memory: err(%v)", bucket, err), "bucket-usage-cache-"+bucket)
} else {
logger.LogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket)
internalLogOnceIf(GlobalContext, errors.New("unable to retrieve usage information for bucket: %s, no reliable usage value available - quota will not be enforced"), "bucket-usage-empty-"+bucket)
}
}
@ -87,7 +87,7 @@ func parseBucketQuota(bucket string, data []byte) (quotaCfg *madmin.BucketQuota,
}
if !quotaCfg.IsValid() {
if quotaCfg.Type == "fifo" {
logger.LogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"))
internalLogIf(GlobalContext, errors.New("Detected older 'fifo' quota config, 'fifo' feature is removed and not supported anymore. Please clear your quota configs using 'mc admin bucket quota alias/bucket --clear' and use 'mc ilm add' for expiration of objects"), logger.WarningKind)
return quotaCfg, fmt.Errorf("invalid quota type 'fifo'")
}
return quotaCfg, fmt.Errorf("Invalid quota config %#v", quotaCfg)
@ -118,7 +118,7 @@ func (sys *BucketQuotaSys) enforceQuotaHard(ctx context.Context, bucket string,
return BucketQuotaExceeded{Bucket: bucket}
}
bui, err := sys.GetBucketUsageInfo(bucket)
bui, err := sys.GetBucketUsageInfo(ctx, bucket)
if err != nil {
return err
}

View File

@ -34,7 +34,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// PutBucketReplicationConfigHandler - PUT Bucket replication configuration.

View File

@ -51,6 +51,8 @@ import (
"github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp"
"github.com/zeebo/xxh3"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
)
const (
@ -76,21 +78,13 @@ const (
ReplicationWorkerMultiplier = 1.5
)
func isReplicationEnabled(ctx context.Context, bucketName string) bool {
rc, _ := getReplicationConfig(ctx, bucketName)
return rc != nil
}
// gets replication config associated to a given bucket name.
func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) {
rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName)
if err != nil {
if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) || errors.Is(err, errInvalidArgument) {
return rCfg, err
}
logger.CriticalIf(ctx, err)
if err != nil && !errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) {
return rCfg, err
}
return rCfg, err
return rCfg, nil
}
// validateReplicationDestination returns error if replication destination bucket missing or not configured
@ -264,10 +258,16 @@ func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplica
if mopts.replicationRequest { // incoming replication request on target cluster
return
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
replLogOnceIf(ctx, err, bucket)
return
}
if cfg == nil {
return
}
opts := replication.ObjectOpts{
Name: object,
SSEC: crypto.SSEC.IsEncrypted(mopts.meta),
@ -315,6 +315,7 @@ var standardHeaders = []string{
func hasReplicationRules(ctx context.Context, bucket string, objects []ObjectToDelete) bool {
c, err := getReplicationConfig(ctx, bucket)
if err != nil || c == nil {
replLogOnceIf(ctx, err, bucket)
return false
}
for _, obj := range objects {
@ -334,6 +335,7 @@ func isStandardHeader(matchHeaderKey string) bool {
func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelete, oi ObjectInfo, delOpts ObjectOptions, gerr error) (dsc ReplicateDecision) {
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
replLogOnceIf(ctx, err, bucket)
return
}
// If incoming request is a replication request, it does not need to be re-replicated.
@ -426,7 +428,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil || rcfg == nil {
logger.LogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket)
replLogOnceIf(ctx, fmt.Errorf("unable to obtain replication config for bucket: %s: err: %s", bucket, err), bucket)
sendEvent(eventArgs{
BucketName: bucket,
Object: ObjectInfo{
@ -443,7 +445,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
}
dsc, err := parseReplicateDecision(ctx, bucket, dobj.ReplicationState.ReplicateDecisionStr)
if err != nil {
logger.LogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s",
replLogOnceIf(ctx, fmt.Errorf("unable to parse replication decision parameters for bucket: %s, err: %s, decision: %s",
bucket, err, dobj.ReplicationState.ReplicateDecisionStr), dobj.ReplicationState.ReplicateDecisionStr)
sendEvent(eventArgs{
BucketName: bucket,
@ -497,7 +499,7 @@ func replicateDelete(ctx context.Context, dobj DeletedObjectReplicationInfo, obj
tgtClnt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtEntry.Arn)
if tgtClnt == nil {
// Skip stale targets if any and log them to be missing at least once.
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn)
replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtEntry.Arn), tgtEntry.Arn)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -609,7 +611,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
return
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s", dobj.Bucket, tgt.ARN), "replication-target-offline-delete-"+tgt.ARN)
sendEvent(eventArgs{
BucketName: dobj.Bucket,
Object: ObjectInfo{
@ -684,7 +686,7 @@ func replicateDeleteToTarget(ctx context.Context, dobj DeletedObjectReplicationI
} else {
rinfo.VersionPurgeStatus = Failed
}
logger.LogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr))
replLogIf(ctx, fmt.Errorf("unable to replicate delete marker to %s: %s/%s(%s): %w", tgt.EndpointURL(), tgt.Bucket, dobj.ObjectName, versionID, rmErr))
if rmErr != nil && minio.IsNetworkOrHostDown(rmErr, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
}
@ -764,13 +766,20 @@ func (m caseInsensitiveMap) Lookup(key string) (string, bool) {
func putReplicationOpts(ctx context.Context, sc string, objInfo ObjectInfo) (putOpts minio.PutObjectOptions, err error) {
meta := make(map[string]string)
for k, v := range objInfo.UserDefined {
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
continue
// In case of SSE-C objects copy the allowed internal headers as well
if !crypto.SSEC.IsEncrypted(objInfo.UserDefined) || !slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
if stringsHasPrefixFold(k, ReservedMetadataPrefixLower) {
continue
}
if isStandardHeader(k) {
continue
}
}
if isStandardHeader(k) {
continue
if slices.Contains(maps.Keys(validSSEReplicationHeaders), k) {
meta[validSSEReplicationHeaders[k]] = v
} else {
meta[k] = v
}
meta[k] = v
}
if sc == "" && (objInfo.StorageClass == storageclass.STANDARD || objInfo.StorageClass == storageclass.RRS) {
@ -989,8 +998,8 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
object := ri.Name
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogOnceIf(ctx, err, "get-replication-config-"+bucket)
if err != nil || cfg == nil {
replLogOnceIf(ctx, err, "get-replication-config-"+bucket)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1029,7 +1038,7 @@ func replicateObject(ctx context.Context, ri ReplicateObjectInfo, objectAPI Obje
for _, tgtArn := range tgtArns {
tgt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtArn)
if tgt == nil {
logger.LogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn)
replLogOnceIf(ctx, fmt.Errorf("failed to get target for bucket:%s arn:%s", bucket, tgtArn), tgtArn)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1151,7 +1160,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN)
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline"+tgt.ARN)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1166,9 +1175,10 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{
VersionID: ri.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
VersionID: ri.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
ReplicationRequest: true,
})
if err != nil {
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
@ -1180,7 +1190,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
logger.LogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID)
replLogOnceIf(ctx, fmt.Errorf("unable to read source object %s/%s(%s): %w", bucket, object, objInfo.VersionID, err), object+":"+objInfo.VersionID)
}
return
}
@ -1193,7 +1203,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
replLogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1205,7 +1215,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
}
if tgt.Bucket == "" {
logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s), bucket is empty for target %s", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1231,7 +1241,7 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
putOpts, err := putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
replLogIf(ctx, fmt.Errorf("failure setting options for replication bucket:%s err:%w", bucket, err))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1262,23 +1272,19 @@ func (ri ReplicateObjectInfo) replicateObject(ctx context.Context, objectAPI Obj
}
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
if objInfo.isMultipart() {
if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
r, objInfo, putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
}
}
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
} else {
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): %s (target: %s)", bucket, objInfo.Name, objInfo.VersionID, rinfo.Err, tgt.EndpointURL()))
}
}
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
}
if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
if rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s): to (target: %s): %w",
bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
}
if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
}
}
return
}
@ -1308,7 +1314,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}
if globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
logger.LogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN)
replLogOnceIf(ctx, fmt.Errorf("remote target is offline for bucket:%s arn:%s retry:%d", bucket, tgt.ARN, ri.RetryCount), "replication-target-offline-heal"+tgt.ARN)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1322,11 +1328,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
versioned := globalBucketVersioningSys.PrefixEnabled(bucket, object)
versionSuspended := globalBucketVersioningSys.PrefixSuspended(bucket, object)
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{}, ObjectOptions{
VersionID: ri.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
})
gr, err := objectAPI.GetObjectNInfo(ctx, bucket, object, nil, http.Header{},
ObjectOptions{
VersionID: ri.VersionID,
Versioned: versioned,
VersionSuspended: versionSuspended,
ReplicationRequest: true,
})
if err != nil {
if !isErrVersionNotFound(err) && !isErrObjectNotFound(err) {
objInfo := ri.ToObjectInfo()
@ -1337,13 +1345,14 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
UserAgent: "Internal: [Replication]",
Host: globalLocalNodeName,
})
logger.LogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err))
replLogIf(ctx, fmt.Errorf("unable to replicate to target %s for %s/%s(%s): %w", tgt.EndpointURL(), bucket, object, objInfo.VersionID, err))
}
return
}
defer gr.Close()
objInfo := gr.ObjInfo
// make sure we have the latest metadata for metrics calculation
rinfo.PrevReplicationStatus = objInfo.TargetReplicationStatus(tgt.ARN)
@ -1356,7 +1365,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
size, err := objInfo.GetActualSize()
if err != nil {
logger.LogIf(ctx, err)
replLogIf(ctx, err)
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1367,8 +1376,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
return
}
// Set the encrypted size for SSE-C objects
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
size = objInfo.Size
}
if tgt.Bucket == "" {
logger.LogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
replLogIf(ctx, fmt.Errorf("unable to replicate object %s(%s) to %s, target bucket is missing", objInfo.Name, objInfo.VersionID, tgt.EndpointURL()))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1398,7 +1412,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
if rAction == replicateNone {
if ri.OpType == replication.ExistingObjectReplicationType &&
objInfo.ModTime.Unix() > oi.LastModified.Unix() && objInfo.VersionID == nullVersionID {
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL()))
replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (null). Newer version exists on target %s", bucket, object, tgt.EndpointURL()))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1438,7 +1452,7 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
rAction = replicateAll
default:
rinfo.Err = cerr
logger.LogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD",
replLogIf(ctx, fmt.Errorf("unable to replicate %s/%s (%s). Target (%s) returned %s error on HEAD",
bucket, object, objInfo.VersionID, tgt.EndpointURL(), cerr))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
@ -1488,13 +1502,13 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}
if _, rinfo.Err = c.CopyObject(ctx, tgt.Bucket, object, tgt.Bucket, object, getCopyObjMetadata(objInfo, tgt.StorageClass), srcOpts, dstOpts); rinfo.Err != nil {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
replLogIf(ctx, fmt.Errorf("unable to replicate metadata for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
}
} else {
var putOpts minio.PutObjectOptions
putOpts, err = putReplicationOpts(ctx, tgt.StorageClass, objInfo)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
replLogIf(ctx, fmt.Errorf("failed to set replicate options for object %s/%s(%s) (target %s) err:%w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), err))
sendEvent(eventArgs{
EventName: event.ObjectReplicationNotTracked,
BucketName: bucket,
@ -1524,27 +1538,19 @@ func (ri ReplicateObjectInfo) replicateAll(ctx context.Context, objectAPI Object
}
r := bandwidth.NewMonitoredReader(newCtx, globalBucketMonitor, gr, opts)
if objInfo.isMultipart() {
if rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object,
r, objInfo, putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
} else {
rinfo.ReplicationStatus = replication.Completed
}
}
rinfo.Err = replicateObjectWithMultipart(ctx, c, tgt.Bucket, object, r, objInfo, putOpts)
} else {
if _, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts); rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
logger.LogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w", bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
} else {
rinfo.ReplicationStatus = replication.Completed
}
}
_, rinfo.Err = c.PutObject(ctx, tgt.Bucket, object, r, size, "", "", putOpts)
}
if rinfo.Err != nil && minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
if rinfo.Err != nil {
if minio.ToErrorResponse(rinfo.Err).Code != "PreconditionFailed" {
rinfo.ReplicationStatus = replication.Failed
replLogIf(ctx, fmt.Errorf("unable to replicate for object %s/%s(%s) to target %s: %w",
bucket, objInfo.Name, objInfo.VersionID, tgt.EndpointURL(), rinfo.Err))
}
if minio.IsNetworkOrHostDown(rinfo.Err, true) && !globalBucketTargetSys.isOffline(tgt.EndpointURL()) {
globalBucketTargetSys.markOffline(tgt.EndpointURL())
}
}
}
return
@ -1564,7 +1570,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
break
}
if minio.ToErrorResponse(err).Code == "PreconditionFailed" {
return err
return nil
}
attempts++
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
@ -1580,14 +1586,10 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
for attempts <= 3 {
actx, acancel := context.WithTimeout(ctx, time.Minute)
aerr := c.AbortMultipartUpload(actx, bucket, object, uploadID)
acancel()
if aerr == nil {
acancel()
return
}
acancel()
logger.LogIf(actx,
fmt.Errorf("trying %s: Unable to cleanup failed multipart replication %s on remote %s/%s: %w - this may consume space on remote cluster",
humanize.Ordinal(attempts), uploadID, bucket, object, aerr))
attempts++
time.Sleep(time.Duration(rand.Int63n(int64(time.Second))))
}
@ -1599,21 +1601,35 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
pInfo minio.ObjectPart
)
var objectSize int64
for _, partInfo := range objInfo.Parts {
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize)
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.Size), partInfo.Size, "", "", partInfo.ActualSize)
} else {
hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize)
}
if err != nil {
return err
}
cHeader := http.Header{}
cHeader.Add(xhttp.MinIOSourceReplicationRequest, "true")
popts := minio.PutObjectPartOptions{
SSE: opts.ServerSideEncryption,
SSE: opts.ServerSideEncryption,
CustomHeader: cHeader,
}
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.ActualSize, popts)
if crypto.SSEC.IsEncrypted(objInfo.UserDefined) {
objectSize += partInfo.Size
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.Size, popts)
} else {
objectSize += partInfo.ActualSize
pInfo, err = c.PutObjectPart(ctx, bucket, object, uploadID, partInfo.Number, hr, partInfo.ActualSize, popts)
}
if err != nil {
return err
}
if pInfo.Size != partInfo.ActualSize {
if !crypto.SSEC.IsEncrypted(objInfo.UserDefined) && pInfo.Size != partInfo.ActualSize {
return fmt.Errorf("Part size mismatch: got %d, want %d", pInfo.Size, partInfo.ActualSize)
}
uploadedParts = append(uploadedParts, minio.CompletePart{
@ -1621,11 +1637,15 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob
ETag: pInfo.ETag,
})
}
// really big value but its okay on heavily loaded systems. This is just tail end timeout.
cctx, ccancel := context.WithTimeout(ctx, 10*time.Minute)
defer ccancel()
_, err = c.CompleteMultipartUpload(cctx, bucket, object, uploadID, uploadedParts, minio.PutObjectOptions{
UserMetadata: map[string]string{validSSEReplicationHeaders[ReservedMetadataPrefix+"Actual-Object-Size"]: objInfo.UserDefined[ReservedMetadataPrefix+"actual-size"]},
Internal: minio.AdvancedPutOptions{
SourceMTime: objInfo.ModTime,
SourceETag: objInfo.ETag,
// always set this to distinguish between `mc mirror` replication and serverside
ReplicationRequest: true,
},
@ -1838,7 +1858,7 @@ func (p *ReplicationPool) AddMRFWorker() {
globalReplicationStats.decQ(v.Bucket, v.Size, v.DeleteMarker, v.OpType)
default:
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type")
bugLogIf(p.ctx, fmt.Errorf("unknown mrf replication type: %T", oi), "unknown-mrf-replicate-type")
}
case <-p.mrfWorkerKillCh:
return
@ -1882,7 +1902,7 @@ func (p *ReplicationPool) AddWorker(input <-chan ReplicationWorkerOperation, opT
atomic.AddInt32(opTracker, -1)
}
default:
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
}
}
}
@ -1921,7 +1941,7 @@ func (p *ReplicationPool) AddLargeWorker(input <-chan ReplicationWorkerOperation
case DeletedObjectReplicationInfo:
replicateDelete(p.ctx, v, p.objLayer)
default:
logger.LogOnceIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
bugLogIf(p.ctx, fmt.Errorf("unknown replication type: %T", oi), "unknown-replicate-type")
}
}
}
@ -2068,9 +2088,9 @@ func (p *ReplicationPool) queueReplicaTask(ri ReplicateObjectInfo) {
p.mu.RUnlock()
switch prio {
case "fast":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic"), string(replicationSubsystem))
replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic"), string(replicationSubsystem), logger.WarningKind)
case "slow":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem))
replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming traffic - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind)
default:
maxWorkers = min(maxWorkers, WorkerMaxLimit)
if p.ActiveWorkers() < maxWorkers {
@ -2125,9 +2145,9 @@ func (p *ReplicationPool) queueReplicaDeleteTask(doi DeletedObjectReplicationInf
p.mu.RUnlock()
switch prio {
case "fast":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes"), string(replicationSubsystem))
replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes"), string(replicationSubsystem), logger.WarningKind)
case "slow":
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem))
replLogOnceIf(GlobalContext, fmt.Errorf("Unable to keep up with incoming deletes - we recommend increasing replication priority with `mc admin config set api replication_priority=auto`"), string(replicationSubsystem), logger.WarningKind)
default:
maxWorkers = min(maxWorkers, WorkerMaxLimit)
if p.ActiveWorkers() < maxWorkers {
@ -2219,6 +2239,8 @@ func getProxyTargets(ctx context.Context, bucket, object string, opts ObjectOpti
}
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil || cfg == nil {
replLogOnceIf(ctx, err, bucket)
return &madmin.BucketTargets{}
}
topts := replication.ObjectOpts{Name: object}
@ -2260,7 +2282,7 @@ func proxyHeadToRepTarget(ctx context.Context, bucket, object string, rs *HTTPRa
if rs != nil {
h, err := rs.ToHeader()
if err != nil {
logger.LogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err))
replLogIf(ctx, fmt.Errorf("invalid range header for %s/%s(%s) - %w", bucket, object, opts.VersionID, err))
continue
}
gopts.Set(xhttp.Range, h)
@ -2628,7 +2650,7 @@ func (s *replicationResyncer) PersistToDisk(ctx context.Context, objectAPI Objec
}
if updt {
if err := saveResyncStatus(ctx, bucket, brs, objectAPI); err != nil {
logger.LogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err))
replLogIf(ctx, fmt.Errorf("could not save resync metadata to drive for %s - %w", bucket, err))
} else {
lastResyncStatusSave[bucket] = brs.LastUpdate
}
@ -2713,15 +2735,15 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
s.workerCh <- struct{}{}
}()
// Allocate new results channel to receive ObjectInfo.
objInfoCh := make(chan ObjectInfo)
objInfoCh := make(chan itemOrErr[ObjectInfo])
cfg, err := getReplicationConfig(ctx, opts.bucket)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err))
replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed with %w", opts.bucket, opts.arn, err))
return
}
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, opts.bucket)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err))
replLogIf(ctx, fmt.Errorf("replication resync of %s for arn %s failed %w", opts.bucket, opts.arn, err))
return
}
rcfg := replicationConfig{
@ -2734,12 +2756,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
TargetArn: opts.arn,
})
if len(tgtArns) != 1 {
logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn))
replLogIf(ctx, fmt.Errorf("replication resync failed for %s - arn specified %s is missing in the replication config", opts.bucket, opts.arn))
return
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(opts.bucket, opts.arn)
if tgt == nil {
logger.LogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn))
replLogIf(ctx, fmt.Errorf("replication resync failed for %s - target could not be created for arn %s", opts.bucket, opts.arn))
return
}
// mark resync status as resync started
@ -2750,7 +2772,7 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
// Walk through all object versions - Walk() is always in ascending order needed to ensure
// delete marker replicated to target after object version is first created.
if err := objectAPI.Walk(ctx, opts.bucket, "", objInfoCh, WalkOptions{}); err != nil {
logger.LogIf(ctx, err)
replLogIf(ctx, err)
return
}
@ -2848,7 +2870,12 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
}
}(ctx, i)
}
for obj := range objInfoCh {
for res := range objInfoCh {
if res.Err != nil {
resyncStatus = ResyncFailed
replLogIf(ctx, res.Err)
return
}
select {
case <-s.resyncCancelCh:
resyncStatus = ResyncCanceled
@ -2857,11 +2884,11 @@ func (s *replicationResyncer) resyncBucket(ctx context.Context, objectAPI Object
return
default:
}
if heal && lastCheckpoint != "" && lastCheckpoint != obj.Name {
if heal && lastCheckpoint != "" && lastCheckpoint != res.Item.Name {
continue
}
lastCheckpoint = ""
roi := getHealReplicateObjectInfo(obj, rcfg)
roi := getHealReplicateObjectInfo(res.Item, rcfg)
if !roi.ExistingObjResync.mustResync() {
continue
}
@ -3025,7 +3052,7 @@ func (p *ReplicationPool) loadResync(ctx context.Context, buckets []BucketInfo,
meta, err := loadBucketResyncMetadata(ctx, bucket, objAPI)
if err != nil {
if !errors.Is(err, errVolumeNotFound) {
logger.LogIf(ctx, err)
replLogIf(ctx, err)
}
continue
}
@ -3112,18 +3139,18 @@ func saveResyncStatus(ctx context.Context, bucket string, brs BucketReplicationR
func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string, opts madmin.ReplDiffOpts) (chan madmin.DiffInfo, error) {
cfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
replLogOnceIf(ctx, err, bucket)
return nil, err
}
tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
if err != nil {
logger.LogIf(ctx, err)
replLogIf(ctx, err)
return nil, err
}
objInfoCh := make(chan ObjectInfo, 10)
objInfoCh := make(chan itemOrErr[ObjectInfo], 10)
if err := objAPI.Walk(ctx, bucket, opts.Prefix, objInfoCh, WalkOptions{}); err != nil {
logger.LogIf(ctx, err)
replLogIf(ctx, err)
return nil, err
}
rcfg := replicationConfig{
@ -3133,11 +3160,17 @@ func getReplicationDiff(ctx context.Context, objAPI ObjectLayer, bucket string,
diffCh := make(chan madmin.DiffInfo, 4000)
go func() {
defer xioutil.SafeClose(diffCh)
for obj := range objInfoCh {
for res := range objInfoCh {
if res.Err != nil {
diffCh <- madmin.DiffInfo{Err: res.Err}
return
}
if contextCanceled(ctx) {
// Just consume input...
continue
}
obj := res.Item
// Ignore object prefixes which are excluded
// from versioning via the MinIO bucket versioning extension.
if globalBucketVersioningSys.PrefixSuspended(bucket, obj.Name) {
@ -3205,7 +3238,11 @@ func QueueReplicationHeal(ctx context.Context, bucket string, oi ObjectInfo, ret
if oi.ModTime.IsZero() {
return
}
rcfg, _ := getReplicationConfig(ctx, bucket)
rcfg, err := getReplicationConfig(ctx, bucket)
if err != nil {
replLogOnceIf(ctx, err, bucket)
return
}
tgts, _ := globalBucketTargetSys.ListBucketTargets(ctx, bucket)
queueReplicationHeal(ctx, bucket, oi, replicationConfig{
Config: rcfg,
@ -3507,7 +3544,7 @@ func (p *ReplicationPool) processMRF() {
continue
}
if err := p.queueMRFHeal(); err != nil && !osIsNotExist(err) {
logger.LogIf(p.ctx, err)
replLogIf(p.ctx, err)
}
pTimer.Reset(mrfQueueInterval)
case <-p.ctx.Done():

View File

@ -20,7 +20,6 @@ package cmd
import (
"context"
"errors"
"fmt"
"net/url"
"sync"
"time"
@ -32,7 +31,6 @@ import (
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/crypto"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
)
const (
@ -131,7 +129,7 @@ func (sys *BucketTargetSys) initHC(ep *url.URL) {
func newHCClient() *madmin.AnonymousClient {
clnt, e := madmin.NewAnonymousClientNoEndpoint()
if e != nil {
logger.LogOnceIf(GlobalContext, fmt.Errorf("WARNING: Unable to initialize health check client"), string(replicationSubsystem))
bugLogIf(GlobalContext, errors.New("Unable to initialize health check client"))
return nil
}
clnt.SetCustomTransport(globalRemoteTargetTransport)
@ -430,7 +428,7 @@ func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr str
if arn.Type == madmin.ReplicationService {
// reject removal of remote target if replication configuration is present
rcfg, err := getReplicationConfig(ctx, bucket)
if err == nil {
if err == nil && rcfg != nil {
for _, tgtArn := range rcfg.FilterTargetArns(replication.ObjectOpts{OpType: replication.AllReplicationType}) {
if err == nil && (tgtArn == arnStr || rcfg.RoleArn == arnStr) {
sys.RLock()
@ -624,7 +622,7 @@ func (sys *BucketTargetSys) set(bucket BucketInfo, meta BucketMetadata) {
for _, tgt := range cfg.Targets {
tgtClient, err := sys.getRemoteTargetClient(&tgt)
if err != nil {
logger.LogIf(GlobalContext, err)
replLogIf(GlobalContext, err)
continue
}
sys.arnRemotesMap[tgt.Arn] = arnTarget{Client: tgtClient}

View File

@ -28,7 +28,7 @@ import (
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
const (
@ -82,7 +82,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
}, r.URL)
return
}
if _, err := getReplicationConfig(ctx, bucket); err == nil && v.Suspended() {
if rc, _ := getReplicationConfig(ctx, bucket); rc != nil && v.Suspended() {
writeErrorResponse(ctx, w, APIError{
Code: "InvalidBucketState",
Description: "A replication configuration is present on this bucket, bucket wide versioning cannot be suspended.",
@ -108,7 +108,7 @@ func (api objectAPIHandlers) PutBucketVersioningHandler(w http.ResponseWriter, r
// We encode the xml bytes as base64 to ensure there are no encoding
// errors.
cfgStr := base64.StdEncoding.EncodeToString(configData)
logger.LogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
replLogIf(ctx, globalSiteReplicationSys.BucketMetaHook(ctx, madmin.SRBucketMeta{
Type: madmin.SRBucketMetaTypeVersionConfig,
Bucket: bucket,
Versioning: &cfgStr,

View File

@ -65,5 +65,5 @@ var (
MinioBannerName = "MinIO Object Storage Server"
// MinioLicense - MinIO server license.
MinioLicense = "GNU AGPLv3 <https://www.gnu.org/licenses/agpl-3.0.html>"
MinioLicense = "GNU AGPLv3 - https://www.gnu.org/licenses/agpl-3.0.html"
)

View File

@ -29,7 +29,6 @@ import (
"time"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
)
var callhomeLeaderLockTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
@ -81,6 +80,9 @@ func runCallhome(ctx context.Context, objAPI ObjectLayer) bool {
ctx = lkctx.Context()
defer locker.Unlock(lkctx)
// Perform callhome once and then keep running it at regular intervals.
performCallhome(ctx)
callhomeTimer := time.NewTimer(globalCallhomeConfig.FrequencyDur())
defer callhomeTimer.Stop()
@ -112,7 +114,7 @@ func performCallhome(ctx context.Context) {
deadline := 10 * time.Second // Default deadline is 10secs for callhome
objectAPI := newObjectLayerFn()
if objectAPI == nil {
logger.LogIf(ctx, errors.New("Callhome: object layer not ready"))
internalLogIf(ctx, errors.New("Callhome: object layer not ready"))
return
}
@ -142,11 +144,14 @@ func performCallhome(ctx context.Context) {
select {
case hi, hasMore := <-healthInfoCh:
if !hasMore {
auditOptions := AuditLogOptions{Event: "callhome:diagnostics"}
// Received all data. Send to SUBNET and return
err := sendHealthInfo(ctx, healthInfo)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
internalLogIf(ctx, fmt.Errorf("Unable to perform callhome: %w", err))
auditOptions.Error = err.Error()
}
auditLogInternal(ctx, auditOptions)
return
}
healthInfo = hi
@ -180,12 +185,12 @@ func createHealthJSONGzip(ctx context.Context, healthInfo madmin.HealthInfo) []b
enc := json.NewEncoder(gzWriter)
if e := enc.Encode(header); e != nil {
logger.LogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e))
internalLogIf(ctx, fmt.Errorf("Could not encode health info header: %w", e))
return nil
}
if e := enc.Encode(healthInfo); e != nil {
logger.LogIf(ctx, fmt.Errorf("Could not encode health info: %w", e))
internalLogIf(ctx, fmt.Errorf("Could not encode health info: %w", e))
return nil
}

View File

@ -21,10 +21,8 @@ import (
"bufio"
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"encoding/gob"
"encoding/pem"
"errors"
"fmt"
"net"
@ -49,7 +47,6 @@ import (
"github.com/minio/console/api/operations"
consoleoauth2 "github.com/minio/console/pkg/auth/idp/oauth2"
consoleCerts "github.com/minio/console/pkg/certs"
"github.com/minio/kms-go/kes"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/set"
@ -58,19 +55,28 @@ import (
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/certs"
"github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v2/ellipses"
"github.com/minio/pkg/v2/env"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v3/certs"
"github.com/minio/pkg/v3/console"
"github.com/minio/pkg/v3/env"
xnet "github.com/minio/pkg/v3/net"
"golang.org/x/term"
)
// serverDebugLog will enable debug printing
var serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
var currentReleaseTime time.Time
var (
serverDebugLog = env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn
currentReleaseTime time.Time
orchestrated = IsKubernetes() || IsDocker()
)
func init() {
if !term.IsTerminal(int(os.Stdout.Fd())) || !term.IsTerminal(int(os.Stderr.Fd())) {
color.TurnOff()
}
if env.Get("NO_COLOR", "") != "" || env.Get("TERM", "") == "dumb" {
color.TurnOff()
}
if runtime.GOOS == "windows" {
if mousetrap.StartedByExplorer() {
fmt.Printf("Don't double-click %s\n", os.Args[0])
@ -129,6 +135,9 @@ func minioConfigToConsoleFeatures() {
os.Setenv("CONSOLE_LOG_QUERY_AUTH_TOKEN", value)
}
}
if value := env.Get(config.EnvBrowserRedirectURL, ""); value != "" {
os.Setenv("CONSOLE_BROWSER_REDIRECT_URL", value)
}
// pass the console subpath configuration
if globalBrowserRedirectURL != nil {
subPath := path.Clean(pathJoin(strings.TrimSpace(globalBrowserRedirectURL.Path), SlashSeparator))
@ -167,7 +176,10 @@ func minioConfigToConsoleFeatures() {
os.Setenv("CONSOLE_STS_DURATION", valueSession)
}
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region)
os.Setenv("CONSOLE_MINIO_SITE_NAME", globalSite.Name())
os.Setenv("CONSOLE_MINIO_SITE_REGION", globalSite.Region())
os.Setenv("CONSOLE_MINIO_REGION", globalSite.Region())
os.Setenv("CONSOLE_CERT_PASSWD", env.Get("MINIO_CERT_PASSWD", ""))
// This section sets Browser (console) stored config
@ -390,20 +402,38 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
ctxt.certsDirSet = true
}
memAvailable := availableMemory()
if ctx.IsSet("memlimit") || ctx.GlobalIsSet("memlimit") {
memlimit := ctx.String("memlimit")
if memlimit == "" {
memlimit = ctx.GlobalString("memlimit")
}
mlimit, err := humanize.ParseBytes(memlimit)
if err != nil {
return err
}
if mlimit > memAvailable {
logger.Info("WARNING: maximum memory available (%s) smaller than specified --memlimit=%s, ignoring --memlimit value",
humanize.IBytes(memAvailable), memlimit)
}
ctxt.MemLimit = mlimit
} else {
ctxt.MemLimit = memAvailable
}
if memAvailable < ctxt.MemLimit {
ctxt.MemLimit = memAvailable
}
ctxt.FTP = ctx.StringSlice("ftp")
ctxt.SFTP = ctx.StringSlice("sftp")
ctxt.Interface = ctx.String("interface")
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
ctxt.ConnReadDeadline = ctx.Duration("conn-read-deadline")
ctxt.ConnWriteDeadline = ctx.Duration("conn-write-deadline")
ctxt.ConnClientReadDeadline = ctx.Duration("conn-client-read-deadline")
ctxt.ConnClientWriteDeadline = ctx.Duration("conn-client-write-deadline")
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
ctxt.SendBufSize = ctx.Int("send-buf-size")
ctxt.RecvBufSize = ctx.Int("recv-buf-size")
ctxt.IdleTimeout = ctx.Duration("idle-timeout")
ctxt.ReadHeaderTimeout = ctx.Duration("read-header-timeout")
ctxt.MaxIdleConnsPerHost = ctx.Int("max-idle-conns-per-host")
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
if conf := ctx.String("config"); len(conf) > 0 {
err = mergeServerCtxtFromConfigFile(conf, ctxt)
@ -494,7 +524,11 @@ func runDNSCache(ctx *cli.Context) {
dnsTTL := ctx.Duration("dns-cache-ttl")
// Check if we have configured a custom DNS cache TTL.
if dnsTTL <= 0 {
dnsTTL = 10 * time.Minute
if orchestrated {
dnsTTL = 30 * time.Second
} else {
dnsTTL = 10 * time.Minute
}
}
// Call to refresh will refresh names in cache.
@ -757,12 +791,7 @@ func serverHandleEnvVars() {
for _, endpoint := range minioEndpoints {
if net.ParseIP(endpoint) == nil {
// Checking if the IP is a DNS entry.
lookupHost := globalDNSCache.LookupHost
if IsKubernetes() || IsDocker() {
lookupHost = net.DefaultResolver.LookupHost
}
addrs, err := lookupHost(GlobalContext, endpoint)
addrs, err := globalDNSCache.LookupHost(GlobalContext, endpoint)
if err != nil {
logger.FatalIf(err, "Unable to initialize MinIO server with [%s] invalid entry found in MINIO_PUBLIC_IPS", endpoint)
}
@ -851,125 +880,28 @@ func loadRootCredentials() {
// Initialize KMS global variable after valiadating and loading the configuration.
// It depends on KMS env variables and global cli flags.
func handleKMSConfig() {
if env.IsSet(kms.EnvKMSSecretKey) && env.IsSet(kms.EnvKESEndpoint) {
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKMSSecretKey, kms.EnvKESEndpoint))
present, err := kms.IsPresent()
if err != nil {
logger.Fatal(err, "Invalid KMS configuration specified")
}
if !present {
return
}
if env.IsSet(kms.EnvKMSSecretKey) {
KMS, err := kms.Parse(env.Get(kms.EnvKMSSecretKey, ""))
if err != nil {
logger.Fatal(err, "Unable to parse the KMS secret key inherited from the shell environment")
}
GlobalKMS = KMS
KMS, err := kms.Connect(GlobalContext, &kms.ConnectionOptions{
CADir: globalCertsCADir.Get(),
})
if err != nil {
logger.Fatal(err, "Failed to connect to KMS")
}
if env.IsSet(kms.EnvKESEndpoint) {
if env.IsSet(kms.EnvKESAPIKey) {
if env.IsSet(kms.EnvKESClientKey) {
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientKey))
}
if env.IsSet(kms.EnvKESClientCert) {
logger.Fatal(errors.New("ambiguous KMS configuration"), fmt.Sprintf("The environment contains %q as well as %q", kms.EnvKESAPIKey, kms.EnvKESClientCert))
}
}
if !env.IsSet(kms.EnvKESKeyName) {
logger.Fatal(errors.New("Invalid KES configuration"), fmt.Sprintf("The mandatory environment variable %q not set", kms.EnvKESKeyName))
}
var endpoints []string
for _, endpoint := range strings.Split(env.Get(kms.EnvKESEndpoint, ""), ",") {
if strings.TrimSpace(endpoint) == "" {
continue
}
if !ellipses.HasEllipses(endpoint) {
endpoints = append(endpoints, endpoint)
continue
}
patterns, err := ellipses.FindEllipsesPatterns(endpoint)
if err != nil {
logger.Fatal(err, fmt.Sprintf("Invalid KES endpoint %q", endpoint))
}
for _, lbls := range patterns.Expand() {
endpoints = append(endpoints, strings.Join(lbls, ""))
}
}
rootCAs, err := certs.GetRootCAs(env.Get(kms.EnvKESServerCA, globalCertsCADir.Get()))
if err != nil {
logger.Fatal(err, fmt.Sprintf("Unable to load X.509 root CAs for KES from %q", env.Get(kms.EnvKESServerCA, globalCertsCADir.Get())))
}
var kmsConf kms.Config
if env.IsSet(kms.EnvKESAPIKey) {
key, err := kes.ParseAPIKey(env.Get(kms.EnvKESAPIKey, ""))
if err != nil {
logger.Fatal(err, fmt.Sprintf("Failed to parse KES API key from %q", env.Get(kms.EnvKESAPIKey, "")))
}
kmsConf = kms.Config{
Endpoints: endpoints,
DefaultKeyID: env.Get(kms.EnvKESKeyName, ""),
APIKey: key,
RootCAs: rootCAs,
}
} else {
loadX509KeyPair := func(certFile, keyFile string) (tls.Certificate, error) {
// Manually load the certificate and private key into memory.
// We need to check whether the private key is encrypted, and
// if so, decrypt it using the user-provided password.
certBytes, err := os.ReadFile(certFile)
if err != nil {
return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err)
}
keyBytes, err := os.ReadFile(keyFile)
if err != nil {
return tls.Certificate{}, fmt.Errorf("Unable to load KES client private key as specified by the shell environment: %v", err)
}
privateKeyPEM, rest := pem.Decode(bytes.TrimSpace(keyBytes))
if len(rest) != 0 {
return tls.Certificate{}, errors.New("Unable to load KES client private key as specified by the shell environment: private key contains additional data")
}
if x509.IsEncryptedPEMBlock(privateKeyPEM) {
keyBytes, err = x509.DecryptPEMBlock(privateKeyPEM, []byte(env.Get(kms.EnvKESClientPassword, "")))
if err != nil {
return tls.Certificate{}, fmt.Errorf("Unable to decrypt KES client private key as specified by the shell environment: %v", err)
}
keyBytes = pem.EncodeToMemory(&pem.Block{Type: privateKeyPEM.Type, Bytes: keyBytes})
}
certificate, err := tls.X509KeyPair(certBytes, keyBytes)
if err != nil {
return tls.Certificate{}, fmt.Errorf("Unable to load KES client certificate as specified by the shell environment: %v", err)
}
return certificate, nil
}
reloadCertEvents := make(chan tls.Certificate, 1)
certificate, err := certs.NewCertificate(env.Get(kms.EnvKESClientCert, ""), env.Get(kms.EnvKESClientKey, ""), loadX509KeyPair)
if err != nil {
logger.Fatal(err, "Failed to load KES client certificate")
}
certificate.Watch(context.Background(), 15*time.Minute, syscall.SIGHUP)
certificate.Notify(reloadCertEvents)
kmsConf = kms.Config{
Endpoints: endpoints,
DefaultKeyID: env.Get(kms.EnvKESKeyName, ""),
Certificate: certificate,
ReloadCertEvents: reloadCertEvents,
RootCAs: rootCAs,
}
}
KMS, err := kms.NewWithConfig(kmsConf)
if err != nil {
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
}
// We check that the default key ID exists or try to create it otherwise.
// This implicitly checks that we can communicate to KES. We don't treat
// a policy error as failure condition since MinIO may not have the permission
// to create keys - just to generate/decrypt data encryption keys.
if err = KMS.CreateKey(context.Background(), env.Get(kms.EnvKESKeyName, "")); err != nil && !errors.Is(err, kes.ErrKeyExists) && !errors.Is(err, kes.ErrNotAllowed) {
logger.Fatal(err, "Unable to initialize a connection to KES as specified by the shell environment")
}
GlobalKMS = KMS
if _, err = KMS.GenerateKey(GlobalContext, &kms.GenerateKeyRequest{}); errors.Is(err, kms.ErrKeyNotFound) {
err = KMS.CreateKey(GlobalContext, &kms.CreateKeyRequest{Name: KMS.DefaultKey})
}
if err != nil && !errors.Is(err, kms.ErrKeyExists) && !errors.Is(err, kms.ErrPermission) {
logger.Fatal(err, "Failed to connect to KMS")
}
GlobalKMS = KMS
}
func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secureConn bool, err error) {
@ -1043,7 +975,7 @@ func getTLSConfig() (x509Certs []*x509.Certificate, manager *certs.Manager, secu
}
if err = manager.AddCertificate(certFile, keyFile); err != nil {
err = fmt.Errorf("Unable to load TLS certificate '%s,%s': %w", certFile, keyFile, err)
logger.LogIf(GlobalContext, err, logger.ErrorKind)
bootLogIf(GlobalContext, err, logger.ErrorKind)
}
}
secureConn = true

View File

@ -56,7 +56,7 @@ import (
"github.com/minio/minio/internal/crypto"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/env"
"github.com/minio/pkg/v3/env"
)
func initHelp() {
@ -240,6 +240,11 @@ func initHelp() {
Description: "manage Browser HTTP specific features, such as Security headers, etc.",
Optional: true,
},
config.HelpKV{
Key: config.ILMSubSys,
Description: "manage ILM settings for expiration and transition workers",
Optional: true,
},
}
if globalIsErasure {
@ -288,6 +293,7 @@ func initHelp() {
config.DriveSubSys: drive.HelpDrive,
config.CacheSubSys: cache.Help,
config.BrowserSubSys: browser.Help,
config.ILMSubSys: ilm.Help,
}
config.RegisterHelpSubSys(helpMap)
@ -362,7 +368,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
}
case config.IdentityOpenIDSubSys:
if _, err := openid.LookupConfig(s,
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
return err
}
case config.IdentityLDAPSubSys:
@ -383,7 +389,7 @@ func validateSubSysConfig(ctx context.Context, s config.Config, subSys string, o
}
case config.IdentityPluginSubSys:
if _, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default],
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil {
NewHTTPTransport(), xhttp.DrainBody, globalSite.Region()); err != nil {
return err
}
case config.SubnetSubSys:
@ -479,7 +485,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
}
if err == nil && dnsURL != "" {
bootstrapTraceMsg("initialize remote bucket DNS store")
@ -487,27 +493,27 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
dns.Authentication(dnsUser, dnsPass),
dns.RootCAs(globalRootCAs))
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
configLogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err))
}
}
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
if etcdCfg.Enabled {
bootstrapTraceMsg("initialize etcd store")
globalEtcdClient, err = etcd.New(etcdCfg)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err))
}
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
if globalDNSConfig != nil {
// if global DNS is already configured, indicate with a warning, in case
// users are confused.
logger.LogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig))
configLogIf(ctx, fmt.Errorf("DNS store is already configured with %s, etcd is not used for DNS store", globalDNSConfig))
} else {
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
dns.DomainNames(globalDomainNames),
@ -516,7 +522,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
configLogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w",
globalDomainNames, err))
}
}
@ -530,10 +536,11 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
// but not federation.
globalBucketFederation = etcdCfg.PathPrefix == "" && etcdCfg.Enabled
globalSite, err = config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
siteCfg, err := config.LookupSite(s[config.SiteSubSys][config.Default], s[config.RegionSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
configLogIf(ctx, fmt.Errorf("Invalid site configuration: %w", err))
}
globalSite.Update(siteCfg)
globalAutoEncryption = crypto.LookupAutoEncryption() // Enable auto-encryption if enabled
if globalAutoEncryption && GlobalKMS == nil {
@ -545,19 +552,19 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) {
bootstrapTraceMsg("initialize the event notification targets")
globalNotifyTargetList, err = notify.FetchEnabledTargets(GlobalContext, s, transport)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
configLogIf(ctx, fmt.Errorf("Unable to initialize notification target(s): %w", err))
}
bootstrapTraceMsg("initialize the lambda targets")
globalLambdaTargetList, err = lambda.FetchEnabledTargets(GlobalContext, s, transport)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err))
configLogIf(ctx, fmt.Errorf("Unable to initialize lambda target(s): %w", err))
}
bootstrapTraceMsg("applying the dynamic configuration")
// Apply dynamic config values
if err := applyDynamicConfig(ctx, objAPI, s); err != nil {
logger.LogIf(ctx, err)
configLogIf(ctx, err)
}
}
@ -566,21 +573,18 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
return errServerNotInitialized
}
var errs []error
setDriveCounts := objAPI.SetDriveCounts()
switch subSys {
case config.APISubSys:
apiConfig, err := api.LookupConfig(s[config.APISubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
}
globalAPIConfig.init(apiConfig, setDriveCounts)
globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy())
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
// Initialize remote instance transport once.
getRemoteInstanceTransportOnce.Do(func() {
getRemoteInstanceTransport = NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline)
})
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
case config.CompressionSubSys:
cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default])
if err != nil {
@ -592,95 +596,96 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
case config.HealSubSys:
healCfg, err := heal.LookupConfig(s[config.HealSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply heal config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply heal config: %w", err))
} else {
globalHealConfig.Update(healCfg)
}
globalHealConfig.Update(healCfg)
case config.BatchSubSys:
batchCfg, err := batch.LookupConfig(s[config.BatchSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply batch config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply batch config: %w", err))
} else {
globalBatchConfig.Update(batchCfg)
}
globalBatchConfig.Update(batchCfg)
case config.ScannerSubSys:
scannerCfg, err := scanner.LookupConfig(s[config.ScannerSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply scanner config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply scanner config: %w", err))
} else {
// update dynamic scanner values.
scannerIdleMode.Store(scannerCfg.IdleMode)
scannerCycle.Store(scannerCfg.Cycle)
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
scannerExcessFolders.Store(scannerCfg.ExcessFolders)
configLogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
}
// update dynamic scanner values.
scannerIdleMode.Store(scannerCfg.IdleMode)
scannerCycle.Store(scannerCfg.Cycle)
scannerExcessObjectVersions.Store(scannerCfg.ExcessVersions)
scannerExcessFolders.Store(scannerCfg.ExcessFolders)
logger.LogIf(ctx, scannerSleeper.Update(scannerCfg.Delay, scannerCfg.MaxWait))
case config.LoggerWebhookSubSys:
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.LoggerWebhookSubSys)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to load logger webhook config: %w", err))
}
userAgent := getUserAgent(getMinioMode())
for n, l := range loggerCfg.HTTP {
if l.Enabled {
l.LogOnce = logger.LogOnceConsoleIf
l.LogOnceIf = configLogOnceConsoleIf
l.UserAgent = userAgent
l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
}
loggerCfg.HTTP[n] = l
}
if errs := logger.UpdateSystemTargets(ctx, loggerCfg); len(errs) > 0 {
logger.LogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs))
if errs := logger.UpdateHTTPWebhooks(ctx, loggerCfg.HTTP); len(errs) > 0 {
configLogIf(ctx, fmt.Errorf("Unable to update logger webhook config: %v", errs))
}
case config.AuditWebhookSubSys:
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditWebhookSubSys)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to load audit webhook config: %w", err))
}
userAgent := getUserAgent(getMinioMode())
for n, l := range loggerCfg.AuditWebhook {
if l.Enabled {
l.LogOnce = logger.LogOnceConsoleIf
l.LogOnceIf = configLogOnceConsoleIf
l.UserAgent = userAgent
l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey)
}
loggerCfg.AuditWebhook[n] = l
}
if errs := logger.UpdateAuditWebhookTargets(ctx, loggerCfg); len(errs) > 0 {
logger.LogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs))
if errs := logger.UpdateAuditWebhooks(ctx, loggerCfg.AuditWebhook); len(errs) > 0 {
configLogIf(ctx, fmt.Errorf("Unable to update audit webhook targets: %v", errs))
}
case config.AuditKafkaSubSys:
loggerCfg, err := logger.LookupConfigForSubSys(ctx, s, config.AuditKafkaSubSys)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to load audit kafka config: %w", err))
}
for n, l := range loggerCfg.AuditKafka {
if l.Enabled {
if l.TLS.Enable {
l.TLS.RootCAs = globalRootCAs
}
l.LogOnce = logger.LogOnceIf
l.LogOnce = configLogOnceIf
loggerCfg.AuditKafka[n] = l
}
}
if errs := logger.UpdateAuditKafkaTargets(ctx, loggerCfg); len(errs) > 0 {
logger.LogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs))
configLogIf(ctx, fmt.Errorf("Unable to update audit kafka targets: %v", errs))
}
case config.StorageClassSubSys:
for i, setDriveCount := range setDriveCounts {
sc, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default], setDriveCount)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to initialize storage class config: %w", err))
break
}
// if we validated all setDriveCounts and it was successful
// proceed to store the correct storage class globally.
if i == len(setDriveCounts)-1 {
if i == 0 {
globalStorageClass.Update(sc)
}
}
case config.SubnetSubSys:
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalProxyTransport)
subnetConfig, err := subnet.LookupConfig(s[config.SubnetSubSys][config.Default], globalRemoteTargetTransport)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err))
} else {
globalSubnetConfig.Update(subnetConfig, globalIsCICD)
globalSubnetConfig.ApplyEnv() // update environment settings for Console UI
@ -688,7 +693,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
case config.CallhomeSubSys:
callhomeCfg, err := callhome.LookupConfig(s[config.CallhomeSubSys][config.Default])
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to load callhome config: %w", err))
} else {
enable := callhomeCfg.Enable && !globalCallhomeConfig.Enabled()
globalCallhomeConfig.Update(callhomeCfg)
@ -697,45 +702,50 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
}
}
case config.DriveSubSys:
if driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default]); err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err))
driveConfig, err := drive.LookupConfig(s[config.DriveSubSys][config.Default])
if err != nil {
configLogIf(ctx, fmt.Errorf("Unable to load drive config: %w", err))
} else {
err := globalDriveConfig.Update(driveConfig)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
if err = globalDriveConfig.Update(driveConfig); err != nil {
configLogIf(ctx, fmt.Errorf("Unable to update drive config: %v", err))
}
}
case config.CacheSubSys:
cacheCfg, err := cache.LookupConfig(s[config.CacheSubSys][config.Default], globalRemoteTargetTransport)
if err != nil {
logger.LogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err))
configLogIf(ctx, fmt.Errorf("Unable to load cache config: %w", err))
} else {
globalCacheConfig.Update(cacheCfg)
}
case config.BrowserSubSys:
browserCfg, err := browser.LookupConfig(s[config.BrowserSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply browser config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply browser config: %w", err))
} else {
globalBrowserConfig.Update(browserCfg)
}
globalBrowserConfig.Update(browserCfg)
case config.ILMSubSys:
ilmCfg, err := ilm.LookupConfig(s[config.ILMSubSys][config.Default])
if err != nil {
return fmt.Errorf("Unable to apply ilm config: %w", err)
errs = append(errs, fmt.Errorf("Unable to apply ilm config: %w", err))
} else {
if globalTransitionState != nil {
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
}
if globalExpiryState != nil {
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
}
globalILMConfig.update(ilmCfg)
}
if globalTransitionState != nil {
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
}
if globalExpiryState != nil {
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
}
globalILMConfig.update(ilmCfg)
}
globalServerConfigMu.Lock()
defer globalServerConfigMu.Unlock()
if globalServerConfig != nil {
globalServerConfig[subSys] = s[subSys]
}
if len(errs) > 0 {
return errors.Join(errs...)
}
return nil
}
@ -750,41 +760,33 @@ func autoGenerateRootCredentials() {
return
}
if manager, ok := GlobalKMS.(kms.KeyManager); ok {
stat, err := GlobalKMS.Stat(GlobalContext)
if err != nil {
logger.LogIf(GlobalContext, err, "Unable to generate root credentials using KMS")
return
}
aKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root access key")})
if errors.Is(err, kes.ErrNotAllowed) || errors.Is(err, errors.ErrUnsupported) {
return // If we don't have permission to compute the HMAC, don't change the cred.
}
if err != nil {
logger.Fatal(err, "Unable to generate root access key using KMS")
}
aKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root access key"))
if errors.Is(err, kes.ErrNotAllowed) {
return // If we don't have permission to compute the HMAC, don't change the cred.
}
if err != nil {
logger.Fatal(err, "Unable to generate root access key using KMS")
}
sKey, err := GlobalKMS.MAC(GlobalContext, &kms.MACRequest{Message: []byte("root secret key")})
if err != nil {
// Here, we must have permission. Otherwise, we would have failed earlier.
logger.Fatal(err, "Unable to generate root secret key using KMS")
}
sKey, err := manager.HMAC(GlobalContext, stat.DefaultKey, []byte("root secret key"))
if err != nil {
// Here, we must have permission. Otherwise, we would have failed earlier.
logger.Fatal(err, "Unable to generate root secret key using KMS")
}
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
if err != nil {
logger.Fatal(err, "Unable to generate root access key")
}
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
if err != nil {
logger.Fatal(err, "Unable to generate root secret key")
}
accessKey, err := auth.GenerateAccessKey(20, bytes.NewReader(aKey))
if err != nil {
logger.Fatal(err, "Unable to generate root access key")
}
secretKey, err := auth.GenerateSecretKey(32, bytes.NewReader(sKey))
if err != nil {
logger.Fatal(err, "Unable to generate root secret key")
}
logger.Info("Automatically generated root access key and secret key with the KMS")
globalActiveCred = auth.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
}
logger.Info("Automatically generated root access key and secret key with the KMS")
globalActiveCred = auth.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
}
}

View File

@ -39,8 +39,8 @@ func TestServerConfig(t *testing.T) {
t.Fatalf("Init Test config failed")
}
if globalSite.Region != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region)
if globalSite.Region() != globalMinioDefaultRegion {
t.Errorf("Expecting region `us-east-1` found %s", globalSite.Region())
}
// Set new region and verify.
@ -52,8 +52,8 @@ func TestServerConfig(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if site.Region != "us-west-1" {
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region)
if site.Region() != "us-west-1" {
t.Errorf("Expecting region `us-west-1` found %s", globalSite.Region())
}
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {

View File

@ -33,8 +33,8 @@ import (
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/event/target"
"github.com/minio/minio/internal/logger"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v2/quick"
xnet "github.com/minio/pkg/v3/net"
"github.com/minio/pkg/v3/quick"
)
// Save config file to corresponding backend

View File

@ -27,7 +27,7 @@ import (
"github.com/minio/minio/internal/config/policy/opa"
"github.com/minio/minio/internal/config/storageclass"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/quick"
"github.com/minio/pkg/v3/quick"
)
// FileLogger is introduced to workaround the dependency about logrus

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@ -20,6 +20,7 @@ package cmd
import (
"container/ring"
"context"
"io"
"sync"
"sync/atomic"
@ -28,8 +29,8 @@ import (
"github.com/minio/minio/internal/logger/target/console"
"github.com/minio/minio/internal/logger/target/types"
"github.com/minio/minio/internal/pubsub"
"github.com/minio/pkg/v2/logger/message/log"
xnet "github.com/minio/pkg/v2/net"
"github.com/minio/pkg/v3/logger/message/log"
xnet "github.com/minio/pkg/v3/net"
)
// number of log messages to buffer
@ -49,10 +50,10 @@ type HTTPConsoleLoggerSys struct {
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
// the console logging pub sub system
func NewConsoleLogger(ctx context.Context) *HTTPConsoleLoggerSys {
func NewConsoleLogger(ctx context.Context, w io.Writer) *HTTPConsoleLoggerSys {
return &HTTPConsoleLoggerSys{
pubsub: pubsub.New[log.Info, madmin.LogMask](8),
console: console.New(),
console: console.New(w),
logBuf: ring.New(defaultLogBufferCount),
}
}

View File

@ -36,13 +36,14 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/object/lock"
objectlock "github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/replication"
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config/heal"
"github.com/minio/minio/internal/event"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v3/console"
uatomic "go.uber.org/atomic"
)
@ -52,7 +53,7 @@ const (
dataScannerCompactLeastObject = 500 // Compact when there is less than this many objects in a branch.
dataScannerCompactAtChildren = 10000 // Compact when there are this many children in a branch.
dataScannerCompactAtFolders = dataScannerCompactAtChildren / 4 // Compact when this many subfolders in a single folder.
dataScannerForceCompactAtFolders = 1_000_000 // Compact when this many subfolders in a single folder (even top level).
dataScannerForceCompactAtFolders = 250_000 // Compact when this many subfolders in a single folder (even top level).
dataScannerStartDelay = 1 * time.Minute // Time to wait on startup and between cycles.
healDeleteDangling = true
@ -63,11 +64,12 @@ var (
globalHealConfig heal.Config
// Sleeper values are updated when config is loaded.
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle
scannerExcessObjectVersions = uatomic.NewInt64(100)
scannerExcessFolders = uatomic.NewInt64(50000)
scannerSleeper = newDynamicSleeper(2, time.Second, true) // Keep defaults same as config defaults
scannerCycle = uatomic.NewDuration(dataScannerStartDelay)
scannerIdleMode = uatomic.NewInt32(0) // default is throttled when idle
scannerExcessObjectVersions = uatomic.NewInt64(100)
scannerExcessObjectVersionsTotalSize = uatomic.NewInt64(1024 * 1024 * 1024 * 1024) // 1 TB
scannerExcessFolders = uatomic.NewInt64(50000)
)
// initDataScanner will start the scanner in the background.
@ -122,13 +124,13 @@ func readBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer) backgroundH
buf, err := readConfig(ctx, objAPI, backgroundHealInfoPath)
if err != nil {
if !errors.Is(err, errConfigNotFound) {
logger.LogOnceIf(ctx, err, backgroundHealInfoPath)
internalLogOnceIf(ctx, err, backgroundHealInfoPath)
}
return backgroundHealInfo{}
}
var info backgroundHealInfo
if err = json.Unmarshal(buf, &info); err != nil {
logger.LogOnceIf(ctx, err, backgroundHealInfoPath)
bugLogIf(ctx, err, backgroundHealInfoPath)
}
return info
}
@ -140,13 +142,13 @@ func saveBackgroundHealInfo(ctx context.Context, objAPI ObjectLayer, info backgr
b, err := json.Marshal(info)
if err != nil {
logger.LogIf(ctx, err)
bugLogIf(ctx, err)
return
}
// Get last healing information
err = saveConfig(ctx, objAPI, backgroundHealInfoPath, b)
if err != nil {
logger.LogIf(ctx, err)
internalLogIf(ctx, err)
}
}
@ -167,7 +169,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
cycleInfo.next = binary.LittleEndian.Uint64(buf[:8])
buf = buf[8:]
_, err := cycleInfo.UnmarshalMsg(buf)
logger.LogIf(ctx, err)
bugLogIf(ctx, err)
}
scannerTimer := time.NewTimer(scannerCycle.Load())
@ -204,7 +206,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
results := make(chan DataUsageInfo, 1)
go storeDataUsageInBackend(ctx, objAPI, results)
err := objAPI.NSScanner(ctx, results, uint32(cycleInfo.current), scanMode)
logger.LogOnceIf(ctx, err, "ns-scanner")
scannerLogIf(ctx, err)
res := map[string]string{"cycle": strconv.FormatUint(cycleInfo.current, 10)}
if err != nil {
res["error"] = err.Error()
@ -224,7 +226,7 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
binary.LittleEndian.PutUint64(tmp, cycleInfo.next)
tmp, _ = cycleInfo.MarshalMsg(tmp)
err = saveConfig(ctx, objAPI, dataUsageBloomNamePath, tmp)
logger.LogOnceIf(ctx, err, dataUsageBloomNamePath)
scannerLogIf(ctx, err, dataUsageBloomNamePath)
}
}
}
@ -347,6 +349,7 @@ func scanDataFolder(ctx context.Context, disks []StorageAPI, basePath string, ca
// No useful information...
return cache, err
}
s.newCache.forceCompact(dataScannerCompactAtChildren)
s.newCache.Info.LastUpdate = UTCNow()
s.newCache.Info.NextCycle = cache.Info.NextCycle
return s.newCache, nil
@ -752,7 +755,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
versionID: "",
}, madmin.HealItemObject)
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogOnceIf(ctx, err, entry.name)
scannerLogIf(ctx, err)
}
foundObjs = foundObjs || err == nil
return
@ -769,7 +772,7 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
}, madmin.HealItemObject)
stopFn(int(ver.Size))
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
logger.LogOnceIf(ctx, err, fiv.Name)
scannerLogIf(ctx, err, fiv.Name)
}
if err == nil {
successVersions++
@ -945,17 +948,39 @@ func (i *scannerItem) applyHealing(ctx context.Context, o ObjectLayer, oi Object
func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi ObjectInfo) (action lifecycle.Action, size int64) {
size, err := oi.GetActualSize()
if i.debug {
logger.LogIf(ctx, err)
scannerLogIf(ctx, err)
}
if i.lifeCycle == nil {
return action, size
}
versionID := oi.VersionID
vcfg, _ := globalBucketVersioningSys.Get(i.bucket)
rCfg, _ := globalBucketObjectLockSys.Get(i.bucket)
replcfg, _ := getReplicationConfig(ctx, i.bucket)
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, rCfg, replcfg, oi)
var vc *versioning.Versioning
var lr objectlock.Retention
var rcfg *replication.Config
if !isMinioMetaBucketName(i.bucket) {
vc, err = globalBucketVersioningSys.Get(i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
// Check if bucket is object locked.
lr, err = globalBucketObjectLockSys.Get(i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
rcfg, err = getReplicationConfig(ctx, i.bucket)
if err != nil {
scannerLogOnceIf(ctx, err, i.bucket)
return
}
}
lcEvt := evalActionFromLifecycle(ctx, *i.lifeCycle, lr, rcfg, oi)
if i.debug {
if versionID != "" {
console.Debugf(applyActionsLogPrefix+" lifecycle: %q (version-id=%s), Initial scan: %v\n", i.objectPath(), versionID, lcEvt.Action)
@ -969,11 +994,11 @@ func (i *scannerItem) applyLifecycle(ctx context.Context, o ObjectLayer, oi Obje
// This can happen when,
// - ExpireObjectAllVersions flag is enabled
// - NoncurrentVersionExpiration is applicable
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction:
case lifecycle.DeleteVersionAction, lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
size = 0
case lifecycle.DeleteAction:
// On a non-versioned bucket, DeleteObject removes the only version permanently.
if !vcfg.PrefixEnabled(oi.Name) {
if !vc.PrefixEnabled(oi.Name) {
size = 0
}
}
@ -1066,7 +1091,7 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
}
// Check if we have many versions after applyNewerNoncurrentVersionLimit.
if len(objInfos) > int(scannerExcessObjectVersions.Load()) {
if len(objInfos) >= int(scannerExcessObjectVersions.Load()) {
// Notify object accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectManyVersions,
@ -1090,6 +1115,39 @@ func (i *scannerItem) applyVersionActions(ctx context.Context, o ObjectLayer, fi
})
}
cumulativeSize := int64(0)
for _, objInfo := range objInfos {
cumulativeSize += objInfo.Size
}
// Check if the cumulative size of all versions of this object is high.
if cumulativeSize >= scannerExcessObjectVersionsTotalSize.Load() {
// Notify object accessed via a GET request.
sendEvent(eventArgs{
EventName: event.ObjectLargeVersions,
BucketName: i.bucket,
Object: ObjectInfo{
Name: i.objectPath(),
},
UserAgent: "Scanner",
Host: globalLocalNodeName,
RespElements: map[string]string{
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
},
})
auditLogInternal(context.Background(), AuditLogOptions{
Event: "scanner:largeversions",
APIName: "Scanner",
Bucket: i.bucket,
Object: i.objectPath(),
Tags: map[string]interface{}{
"x-minio-versions-count": strconv.Itoa(len(objInfos)),
"x-minio-versions-size": strconv.FormatInt(cumulativeSize, 10),
},
})
}
return objInfos, nil
}
@ -1105,7 +1163,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
// Note: objDeleted is true if and only if action ==
// lifecycle.DeleteAllVersionsAction
if action == lifecycle.DeleteAllVersionsAction {
if action.DeleteAll() {
return true, 0
}
@ -1123,7 +1181,7 @@ func (i *scannerItem) applyActions(ctx context.Context, o ObjectLayer, oi Object
err := o.CheckAbandonedParts(ctx, i.bucket, i.objectPath(), madmin.HealOpts{Remove: healDeleteDangling})
done()
if err != nil {
logger.LogOnceIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath())
healingLogIf(ctx, fmt.Errorf("unable to check object %s/%s for abandoned data: %w", i.bucket, i.objectPath(), err), i.objectPath())
}
}
}
@ -1142,17 +1200,15 @@ func evalActionFromLifecycle(ctx context.Context, lc lifecycle.Lifecycle, lr loc
console.Debugf(applyActionsLogPrefix+" lifecycle: Secondary scan: %v\n", event.Action)
}
if event.Action == lifecycle.NoneAction {
return event
}
if obj.IsLatest && event.Action == lifecycle.DeleteAllVersionsAction {
if lr.LockEnabled && enforceRetentionForDeletion(ctx, obj) {
switch event.Action {
case lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
// Skip if bucket has object locking enabled; To prevent the
// possibility of violating an object retention on one of the
// noncurrent versions of this object.
if lr.LockEnabled {
return lifecycle.Event{Action: lifecycle.NoneAction}
}
}
switch event.Action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteRestoredVersionAction:
// Defensive code, should never happen
if obj.VersionID == "" {
@ -1184,29 +1240,22 @@ func applyTransitionRule(event lifecycle.Event, src lcEventSrc, obj ObjectInfo)
return true
}
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool {
var err error
defer func() {
if err != nil {
return
}
// Note: DeleteAllVersions action is not supported for
// transitioned objects
globalScannerMetrics.timeILM(lcEvent.Action)(1)
}()
if err = expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil {
func applyExpiryOnTransitionedObject(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) {
timeILM := globalScannerMetrics.timeILM(lcEvent.Action)
if err := expireTransitionedObject(ctx, objLayer, &obj, lcEvent, src); err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
return false
}
logger.LogOnceIf(ctx, err, obj.Name)
ilmLogIf(ctx, err)
return false
}
timeILM(1)
// Notification already sent in *expireTransitionedObject*, just return 'true' here.
return true
}
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) bool {
func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLayer, obj ObjectInfo, lcEvent lifecycle.Event, src lcEventSrc) (ok bool) {
traceFn := globalLifecycleSys.trace(obj)
opts := ObjectOptions{
Expiration: ExpirationOptions{Expire: true},
@ -1221,22 +1270,26 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
if lcEvent.Action.DeleteAll() {
opts.DeletePrefix = true
// use prefix delete on exact object (this is an optimization to avoid fan-out calls)
opts.DeletePrefixObject = true
}
var (
dobj ObjectInfo
err error
)
timeILM := globalScannerMetrics.timeILM(lcEvent.Action)
defer func() {
if err != nil {
if !ok {
return
}
if lcEvent.Action != lifecycle.NoneAction {
numVersions := uint64(1)
if lcEvent.Action == lifecycle.DeleteAllVersionsAction {
if lcEvent.Action.DeleteAll() {
numVersions = uint64(obj.NumVersions)
}
globalScannerMetrics.timeILM(lcEvent.Action)(numVersions)
timeILM(numVersions)
}
}()
@ -1246,7 +1299,7 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
return false
}
// Assume it is still there.
logger.LogOnceIf(ctx, err, "non-transition-expiry")
ilmLogOnceIf(ctx, err, "non-transition-expiry")
return false
}
if dobj.Name == "" {
@ -1261,8 +1314,11 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
if obj.DeleteMarker {
eventName = event.ObjectRemovedDeleteMarkerCreated
}
if lcEvent.Action.DeleteAll() {
switch lcEvent.Action {
case lifecycle.DeleteAllVersionsAction:
eventName = event.ObjectRemovedDeleteAllVersions
case lifecycle.DelMarkerDeleteAllVersionsAction:
eventName = event.ILMDelMarkerExpirationDelete
}
// Notify object deleted event.
sendEvent(eventArgs{
@ -1287,7 +1343,7 @@ func applyLifecycleAction(event lifecycle.Event, src lcEventSrc, obj ObjectInfo)
switch action := event.Action; action {
case lifecycle.DeleteVersionAction, lifecycle.DeleteAction,
lifecycle.DeleteRestoredAction, lifecycle.DeleteRestoredVersionAction,
lifecycle.DeleteAllVersionsAction:
lifecycle.DeleteAllVersionsAction, lifecycle.DelMarkerDeleteAllVersionsAction:
success = applyExpiryRule(event, src, obj)
case lifecycle.TransitionAction, lifecycle.TransitionVersionAction:
success = applyTransitionRule(event, src, obj)
@ -1387,48 +1443,7 @@ func (d *dynamicSleeper) Timer(ctx context.Context) func() {
t := time.Now()
return func() {
doneAt := time.Now()
for {
// Grab current values
d.mu.RLock()
minWait, maxWait := d.minSleep, d.maxSleep
factor := d.factor
cycle := d.cycle
d.mu.RUnlock()
elapsed := doneAt.Sub(t)
// Don't sleep for really small amount of time
wantSleep := time.Duration(float64(elapsed) * factor)
if wantSleep <= minWait {
return
}
if maxWait > 0 && wantSleep > maxWait {
wantSleep = maxWait
}
timer := time.NewTimer(wantSleep)
select {
case <-ctx.Done():
if !timer.Stop() {
<-timer.C
}
if d.isScanner {
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
}
return
case <-timer.C:
if d.isScanner {
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
}
return
case <-cycle:
if !timer.Stop() {
// We expired.
<-timer.C
if d.isScanner {
globalScannerMetrics.incTime(scannerMetricYield, wantSleep)
}
return
}
}
}
d.Sleep(ctx, doneAt.Sub(t))
}
}

View File

@ -20,12 +20,15 @@ package cmd
import (
"context"
"encoding/xml"
"fmt"
"strings"
"sync"
"testing"
"time"
"github.com/google/uuid"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/bucket/object/lock"
"github.com/minio/minio/internal/bucket/versioning"
)
@ -141,3 +144,96 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
}
}
}
func TestEvalActionFromLifecycle(t *testing.T) {
// Tests cover only ExpiredObjectDeleteAllVersions and DelMarkerExpiration actions
obj := ObjectInfo{
Name: "foo",
ModTime: time.Now().Add(-31 * 24 * time.Hour),
Size: 100 << 20,
VersionID: uuid.New().String(),
IsLatest: true,
NumVersions: 4,
}
delMarker := ObjectInfo{
Name: "foo-deleted",
ModTime: time.Now().Add(-61 * 24 * time.Hour),
Size: 0,
VersionID: uuid.New().String(),
IsLatest: true,
DeleteMarker: true,
NumVersions: 4,
}
deleteAllILM := `<LifecycleConfiguration>
<Rule>
<Expiration>
<Days>30</Days>
<ExpiredObjectAllVersions>true</ExpiredObjectAllVersions>
</Expiration>
<Filter></Filter>
<Status>Enabled</Status>
<ID>DeleteAllVersions</ID>
</Rule>
</LifecycleConfiguration>`
delMarkerILM := `<LifecycleConfiguration>
<Rule>
<ID>DelMarkerExpiration</ID>
<Filter></Filter>
<Status>Enabled</Status>
<DelMarkerExpiration>
<Days>60</Days>
</DelMarkerExpiration>
</Rule>
</LifecycleConfiguration>`
deleteAllLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(deleteAllILM))
if err != nil {
t.Fatalf("Failed to parse deleteAllILM test ILM policy %v", err)
}
delMarkerLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(delMarkerILM))
if err != nil {
t.Fatalf("Failed to parse delMarkerILM test ILM policy %v", err)
}
tests := []struct {
ilm lifecycle.Lifecycle
retention lock.Retention
obj ObjectInfo
want lifecycle.Action
}{
{
// with object locking
ilm: *deleteAllLc,
retention: lock.Retention{LockEnabled: true},
obj: obj,
want: lifecycle.NoneAction,
},
{
// without object locking
ilm: *deleteAllLc,
retention: lock.Retention{},
obj: obj,
want: lifecycle.DeleteAllVersionsAction,
},
{
// with object locking
ilm: *delMarkerLc,
retention: lock.Retention{LockEnabled: true},
obj: delMarker,
want: lifecycle.NoneAction,
},
{
// without object locking
ilm: *delMarkerLc,
retention: lock.Retention{},
obj: delMarker,
want: lifecycle.DelMarkerDeleteAllVersionsAction,
},
}
for i, test := range tests {
t.Run(fmt.Sprintf("TestEvalAction-%d", i), func(t *testing.T) {
if got := evalActionFromLifecycle(context.TODO(), test.ilm, test.retention, nil, test.obj); got.Action != test.want {
t.Fatalf("Expected %v but got %v", test.want, got)
}
})
}
}

View File

@ -18,7 +18,6 @@
package cmd
import (
"bytes"
"context"
"errors"
"fmt"
@ -36,8 +35,6 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/lifecycle"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/logger"
"github.com/tinylib/msgp/msgp"
"github.com/valyala/bytebufferpool"
)
@ -191,6 +188,22 @@ type replicationAllStatsV1 struct {
ReplicaCount uint64 `msg:"ReplicaCount,omitempty"`
}
// empty returns true if the replicationAllStats is empty (contains no entries).
func (r *replicationAllStats) empty() bool {
if r == nil {
return true
}
if r.ReplicaSize != 0 || r.ReplicaCount != 0 {
return false
}
for _, v := range r.Targets {
if !v.Empty() {
return false
}
}
return true
}
// clone creates a deep-copy clone.
func (r *replicationAllStats) clone() *replicationAllStats {
if r == nil {
@ -523,20 +536,18 @@ func (d *dataUsageCache) searchParent(h dataUsageHash) *dataUsageHash {
want := h.Key()
if idx := strings.LastIndexByte(want, '/'); idx >= 0 {
if v := d.find(want[:idx]); v != nil {
for child := range v.Children {
if child == want {
found := hashPath(want[:idx])
return &found
}
_, ok := v.Children[want]
if ok {
found := hashPath(want[:idx])
return &found
}
}
}
for k, v := range d.Cache {
for child := range v.Children {
if child == want {
found := dataUsageHash(k)
return &found
}
_, ok := v.Children[want]
if ok {
found := dataUsageHash(k)
return &found
}
}
return nil
@ -621,7 +632,7 @@ func (d *dataUsageCache) copyWithChildren(src *dataUsageCache, hash dataUsageHas
d.Cache[hash.Key()] = e
for ch := range e.Children {
if ch == hash.Key() {
logger.LogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference"))
scannerLogIf(GlobalContext, errors.New("dataUsageCache.copyWithChildren: Circular reference"))
return
}
d.copyWithChildren(src, dataUsageHash(ch), &hash)
@ -718,6 +729,53 @@ func (d *dataUsageCache) reduceChildrenOf(path dataUsageHash, limit int, compact
}
}
// forceCompact will force compact the cache of the top entry.
// If the number of children is more than limit*100, it will compact self.
// When above the limit a cleanup will also be performed to remove any possible abandoned entries.
func (d *dataUsageCache) forceCompact(limit int) {
if d == nil || len(d.Cache) <= limit {
return
}
top := hashPath(d.Info.Name).Key()
topE := d.find(top)
if topE == nil {
scannerLogIf(GlobalContext, errors.New("forceCompact: root not found"))
return
}
// If off by 2 orders of magnitude, compact self and log error.
if len(topE.Children) > dataScannerForceCompactAtFolders {
// If we still have too many children, compact self.
scannerLogOnceIf(GlobalContext, fmt.Errorf("forceCompact: %q has %d children. Force compacting. Expect reduced scanner performance", d.Info.Name, len(topE.Children)), d.Info.Name)
d.reduceChildrenOf(hashPath(d.Info.Name), limit, true)
}
if len(d.Cache) <= limit {
return
}
// Check for abandoned entries.
found := make(map[string]struct{}, len(d.Cache))
// Mark all children recursively
var mark func(entry dataUsageEntry)
mark = func(entry dataUsageEntry) {
for k := range entry.Children {
found[k] = struct{}{}
if ch, ok := d.Cache[k]; ok {
mark(ch)
}
}
}
found[top] = struct{}{}
mark(*topE)
// Delete all entries not found.
for k := range d.Cache {
if _, ok := found[k]; !ok {
delete(d.Cache, k)
}
}
}
// StringAll returns a detailed string representation of all entries in the cache.
func (d *dataUsageCache) StringAll() string {
// Remove bloom filter from print.
@ -901,6 +959,9 @@ func (d *dataUsageCache) sizeRecursive(path string) *dataUsageEntry {
return root
}
flat := d.flatten(*root)
if flat.ReplicationStats.empty() {
flat.ReplicationStats = nil
}
return &flat
}
@ -989,11 +1050,23 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string)
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
r, err := store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
r, err := store.GetObjectNInfo(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, name), nil, http.Header{}, ObjectOptions{NoLock: true})
if err != nil {
switch err.(type) {
case ObjectNotFound, BucketNotFound:
return false, nil
r, err = store.GetObjectNInfo(ctx, dataUsageBucket, name, nil, http.Header{}, ObjectOptions{NoLock: true})
if err != nil {
switch err.(type) {
case ObjectNotFound, BucketNotFound:
return false, nil
case InsufficientReadQuorum, StorageErr:
return true, nil
}
return false, err
}
err = d.deserialize(r)
r.Close()
return err != nil, nil
case InsufficientReadQuorum, StorageErr:
return true, nil
}
@ -1024,7 +1097,7 @@ func (d *dataUsageCache) load(ctx context.Context, store objectIO, name string)
}
if retries == 5 {
logger.LogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache")
scannerLogOnceIf(ctx, fmt.Errorf("maximum retry reached to load the data usage cache `%s`", name), "retry-loading-data-usage-cache")
}
return nil
@ -1054,24 +1127,11 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string)
}
save := func(name string, timeout time.Duration) error {
hr, err := hash.NewReader(ctx, bytes.NewReader(buf.Bytes()), int64(buf.Len()), "", "", int64(buf.Len()))
if err != nil {
return err
}
// Abandon if more than a minute, so we don't hold up scanner.
ctx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
_, err = store.PutObject(ctx,
dataUsageBucket,
name,
NewPutObjReader(hr),
ObjectOptions{NoLock: true})
if isErrBucketNotFound(err) {
return nil
}
return err
return saveConfig(ctx, store, pathJoin(bucketMetaPrefix, name), buf.Bytes())
}
defer save(name+".bkp", 5*time.Second) // Keep a backup as well

View File

@ -25,7 +25,6 @@ import (
jsoniter "github.com/json-iterator/go"
"github.com/minio/minio/internal/cachevalue"
"github.com/minio/minio/internal/logger"
)
const (
@ -49,7 +48,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
json := jsoniter.ConfigCompatibleWithStandardLibrary
dataUsageJSON, err := json.Marshal(dataUsageInfo)
if err != nil {
logger.LogIf(ctx, err)
scannerLogIf(ctx, err)
continue
}
if attempts > 10 {
@ -57,7 +56,7 @@ func storeDataUsageInBackend(ctx context.Context, objAPI ObjectLayer, dui <-chan
attempts = 1
}
if err = saveConfig(ctx, objAPI, dataUsageObjNamePath, dataUsageJSON); err != nil {
logger.LogOnceIf(ctx, err, dataUsageObjNamePath)
scannerLogOnceIf(ctx, err, dataUsageObjNamePath)
}
attempts++
}
@ -80,12 +79,12 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
prefixUsageCache.InitOnce(30*time.Second,
// No need to fail upon Update() error, fallback to old value.
cachevalue.Opts{ReturnLastGood: true, NoWait: true},
func() (map[string]uint64, error) {
func(ctx context.Context) (map[string]uint64, error) {
m := make(map[string]uint64)
for _, pool := range z.serverPools {
for _, er := range pool.sets {
// Load bucket usage prefixes
ctx, done := context.WithTimeout(context.Background(), 2*time.Second)
ctx, done := context.WithTimeout(ctx, 2*time.Second)
ok := cache.load(ctx, er, bucket+slashSeparator+dataUsageCacheName) == nil
done()
if ok {
@ -108,7 +107,7 @@ func loadPrefixUsageFromBackend(ctx context.Context, objAPI ObjectLayer, bucket
},
)
return prefixUsageCache.Get()
return prefixUsageCache.GetWithCtx(ctx)
}
func loadDataUsageFromBackend(ctx context.Context, objAPI ObjectLayer) (DataUsageInfo, error) {

View File

@ -364,6 +364,7 @@ func TestDataUsageUpdatePrefix(t *testing.T) {
}
if e == nil {
t.Fatal("got nil result")
return
}
if e.Size != int64(w.size) {
t.Error("got size", e.Size, "want", w.size)

View File

@ -22,7 +22,7 @@ import (
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/policy"
"github.com/minio/pkg/v3/policy"
)
// Data types used for returning dummy tagging XML.

View File

@ -110,7 +110,7 @@ func kmsKeyIDFromMetadata(metadata map[string]string) string {
//
// DecryptETags uses a KMS bulk decryption API, if available, which
// is more efficient than decrypting ETags sequentually.
func DecryptETags(ctx context.Context, k kms.KMS, objects []ObjectInfo) error {
func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
const BatchSize = 250 // We process the objects in batches - 250 is a reasonable default.
var (
metadata = make([]map[string]string, 0, BatchSize)
@ -267,7 +267,11 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
if err != nil {
return err
}
oldKey, err := GlobalKMS.DecryptKey(keyID, kmsKey, kms.Context{bucket: path.Join(bucket, object)})
oldKey, err := GlobalKMS.Decrypt(ctx, &kms.DecryptRequest{
Name: keyID,
Ciphertext: kmsKey,
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
})
if err != nil {
return err
}
@ -276,7 +280,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
return err
}
newKey, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)})
newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: GlobalKMS.DefaultKey,
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
})
if err != nil {
return err
}
@ -312,7 +319,10 @@ func rotateKey(ctx context.Context, oldKey []byte, newKeyID string, newKey []byt
if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object)
}
newKey, err := GlobalKMS.GenerateKey(ctx, newKeyID, kmsCtx)
newKey, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: newKeyID,
AssociatedData: kmsCtx,
})
if err != nil {
return err
}
@ -352,7 +362,9 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
if GlobalKMS == nil {
return crypto.ObjectKey{}, errKMSNotConfigured
}
key, err := GlobalKMS.GenerateKey(ctx, "", kms.Context{bucket: path.Join(bucket, object)})
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
AssociatedData: kms.Context{bucket: path.Join(bucket, object)},
})
if err != nil {
return crypto.ObjectKey{}, err
}
@ -379,7 +391,10 @@ func newEncryptMetadata(ctx context.Context, kind crypto.Type, keyID string, key
if _, ok := kmsCtx[bucket]; !ok {
kmsCtx[bucket] = path.Join(bucket, object)
}
key, err := GlobalKMS.GenerateKey(ctx, keyID, kmsCtx)
key, err := GlobalKMS.GenerateKey(ctx, &kms.GenerateKeyRequest{
Name: keyID,
AssociatedData: kmsCtx,
})
if err != nil {
if errors.Is(err, kes.ErrKeyNotFound) {
return crypto.ObjectKey{}, errKMSKeyNotFound
@ -475,11 +490,10 @@ func EncryptRequest(content io.Reader, r *http.Request, bucket, object string, m
func decryptObjectMeta(key []byte, bucket, object string, metadata map[string]string) ([]byte, error) {
switch kind, _ := crypto.IsEncrypted(metadata); kind {
case crypto.S3:
KMS := GlobalKMS
if KMS == nil {
if GlobalKMS == nil {
return nil, errKMSNotConfigured
}
objectKey, err := crypto.S3.UnsealObjectKey(KMS, metadata, bucket, object)
objectKey, err := crypto.S3.UnsealObjectKey(GlobalKMS, metadata, bucket, object)
if err != nil {
return nil, err
}
@ -1089,7 +1103,7 @@ func (o *ObjectInfo) decryptPartsChecksums() {
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
decrypted, err := o.metadataDecrypter()("object-checksum", data)
if err != nil {
logger.LogIf(GlobalContext, err)
encLogIf(GlobalContext, err)
return
}
data = decrypted
@ -1151,7 +1165,7 @@ func (o *ObjectInfo) decryptChecksums(part int) map[string]string {
if _, encrypted := crypto.IsEncrypted(o.UserDefined); encrypted {
decrypted, err := o.metadataDecrypter()("object-checksum", data)
if err != nil {
logger.LogIf(GlobalContext, err)
encLogIf(GlobalContext, err)
return nil
}
data = decrypted

Some files were not shown because too many files have changed in this diff Show More