diff --git a/CREDITS b/CREDITS index 7d82c8a43..b0ce89bfc 100644 --- a/CREDITS +++ b/CREDITS @@ -18359,6 +18359,214 @@ https://github.com/minio/minio-go/v7 ================================================================ +github.com/minio/minio-go/v7 +https://github.com/minio/minio-go/v7 +---------------------------------------------------------------- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +================================================================ + github.com/minio/pkg https://github.com/minio/pkg ---------------------------------------------------------------- diff --git a/cmd/admin-bucket-handlers.go b/cmd/admin-bucket-handlers.go index 6228c9ece..6c320a792 100644 --- a/cmd/admin-bucket-handlers.go +++ b/cmd/admin-bucket-handlers.go @@ -160,11 +160,6 @@ func (a adminAPIHandlers) SetRemoteTargetHandler(w http.ResponseWriter, r *http. bucket := pathClean(vars["bucket"]) update := r.Form.Get("update") == "true" - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - // Get current object layer instance. objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction) if objectAPI == nil { @@ -289,10 +284,6 @@ func (a adminAPIHandlers) ListRemoteTargetsHandler(w http.ResponseWriter, r *htt bucket := pathClean(vars["bucket"]) arnType := vars["type"] - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } // Get current object layer instance. objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.GetBucketTargetAction) if objectAPI == nil { @@ -328,10 +319,6 @@ func (a adminAPIHandlers) RemoveRemoteTargetHandler(w http.ResponseWriter, r *ht bucket := pathClean(vars["bucket"]) arn := vars["arn"] - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } // Get current object layer instance. objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetBucketTargetAction) if objectAPI == nil { @@ -373,10 +360,6 @@ func (a adminAPIHandlers) ExportBucketMetadataHandler(w http.ResponseWriter, r * defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) bucket := pathClean(r.Form.Get("bucket")) - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } // Get current object layer instance. objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ExportBucketMetadataAction) if objectAPI == nil { @@ -661,10 +644,6 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r * defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } // Get current object layer instance. objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ImportBucketMetadataAction) if objectAPI == nil { @@ -1091,11 +1070,6 @@ func (a adminAPIHandlers) ReplicationDiffHandler(w http.ResponseWriter, r *http. vars := mux.Vars(r) bucket := vars["bucket"] - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ReplicationDiff) if objectAPI == nil { return diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index 42d5f4304..df6d2243e 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -1149,15 +1149,12 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ return rd, wr } - var dataUsageInfo DataUsageInfo - var err error - if !globalIsGateway { - // Load the latest calculated data usage - dataUsageInfo, _ = loadDataUsageFromBackend(ctx, objectAPI) - } + // Load the latest calculated data usage + dataUsageInfo, _ := loadDataUsageFromBackend(ctx, objectAPI) // If etcd, dns federation configured list buckets from etcd. var buckets []BucketInfo + var err error if globalDNSConfig != nil && globalBucketFederation { dnsBuckets, err := globalDNSConfig.List() if err != nil && !IsErrIgnored(err, diff --git a/cmd/admin-handlers.go b/cmd/admin-handlers.go index 831ba5a6d..a10e1b559 100644 --- a/cmd/admin-handlers.go +++ b/cmd/admin-handlers.go @@ -944,11 +944,6 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) { return } - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL) - return - } - hip, errCode := extractHealInitParams(mux.Vars(r), r.Form, r.Body) if errCode != ErrNone { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(errCode), r.URL) @@ -1135,12 +1130,6 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r * return } - // Check if this setup has an erasure coded backend. - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrHealNotImplemented), r.URL) - return - } - aggregateHealStateResult, err := getAggregatedBackgroundHealState(r.Context(), objectAPI) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) @@ -1216,11 +1205,6 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http. return } - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - sizeStr := r.Form.Get(peerRESTSize) durationStr := r.Form.Get(peerRESTDuration) concurrentStr := r.Form.Get(peerRESTConcurrent) @@ -1389,11 +1373,6 @@ func (a adminAPIHandlers) DriveSpeedtestHandler(w http.ResponseWriter, r *http.R return } - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - // Freeze all incoming S3 API calls before running speedtest. globalNotificationSys.ServiceFreeze(ctx, true) @@ -1910,7 +1889,7 @@ func getKubernetesInfo(dctx context.Context) madmin.KubernetesInfo { } client := &http.Client{ - Transport: NewGatewayHTTPTransport(), + Transport: NewHTTPTransport(), Timeout: 10 * time.Second, } diff --git a/cmd/admin-router.go b/cmd/admin-router.go index c40ccb755..ad28454cc 100644 --- a/cmd/admin-router.go +++ b/cmd/admin-router.go @@ -274,16 +274,14 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) { adminRouter.Methods(http.MethodPost).Path(adminVersion+"/kms/key/create").HandlerFunc(gz(httpTraceAll(adminAPI.KMSCreateKeyHandler))).Queries("key-id", "{key-id:.*}") adminRouter.Methods(http.MethodGet).Path(adminVersion + "/kms/key/status").HandlerFunc(gz(httpTraceAll(adminAPI.KMSKeyStatusHandler))) - if !globalIsGateway { - // Keep obdinfo for backward compatibility with mc - adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo"). - HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler))) - // -- Health API -- - adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo"). - HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler))) - adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth"). - HandlerFunc(gz(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))) - } + // Keep obdinfo for backward compatibility with mc + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/obdinfo"). + HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler))) + // -- Health API -- + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/healthinfo"). + HandlerFunc(gz(httpTraceHdrs(adminAPI.HealthInfoHandler))) + adminRouter.Methods(http.MethodGet).Path(adminVersion + "/bandwidth"). + HandlerFunc(gz(httpTraceHdrs(adminAPI.BandwidthMonitorHandler))) } // If none of the routes match add default error handler routes diff --git a/cmd/admin-server-info.go b/cmd/admin-server-info.go index 7b41b5333..bb907f81e 100644 --- a/cmd/admin-server-info.go +++ b/cmd/admin-server-info.go @@ -142,7 +142,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req } objLayer := newObjectLayerFn() - if objLayer != nil && !globalIsGateway { + if objLayer != nil { // only need Disks information in server mode. storageInfo, _ := objLayer.LocalStorageInfo(GlobalContext) props.State = string(madmin.ItemOnline) diff --git a/cmd/api-errors.go b/cmd/api-errors.go index bd9dc2cb1..f7557e981 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -2238,8 +2238,7 @@ func toAPIError(ctx context.Context, err error) APIError { if apiErr.Code == "InternalError" { // If we see an internal error try to interpret // any underlying errors if possible depending on - // their internal error types. This code is only - // useful with gateway implementations. + // their internal error types. switch e := err.(type) { case batchReplicationJobError: apiErr = APIError{ @@ -2309,7 +2308,7 @@ func toAPIError(ctx context.Context, err error) APIError { Description: e.Message, HTTPStatusCode: e.StatusCode, } - if globalIsGateway && strings.Contains(e.Message, "KMS is not configured") { + if strings.Contains(e.Message, "KMS is not configured") { apiErr = APIError{ Code: "NotImplemented", Description: e.Message, @@ -2333,7 +2332,7 @@ func toAPIError(ctx context.Context, err error) APIError { Description: e.Error(), HTTPStatusCode: e.Response().StatusCode, } - // Add more Gateway SDKs here if any in future. + // Add more other SDK related errors here if any in future. default: //nolint:gocritic if errors.Is(err, errMalformedEncoding) { diff --git a/cmd/api-response.go b/cmd/api-response.go index 7d44717c1..485c466be 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -636,7 +636,7 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, case crypto.SSEC: content.UserMetadata.Set(xhttp.AmzServerSideEncryptionCustomerAlgorithm, xhttp.AmzEncryptionAES) } - for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) { + for k, v := range cleanMinioInternalMetadataKeys(object.UserDefined) { if strings.HasPrefix(strings.ToLower(k), ReservedMetadataPrefixLower) { // Do not need to send any internal metadata // values to client. diff --git a/cmd/bucket-encryption.go b/cmd/bucket-encryption.go index f31ebdde9..c74fbae37 100644 --- a/cmd/bucket-encryption.go +++ b/cmd/bucket-encryption.go @@ -34,15 +34,6 @@ func NewBucketSSEConfigSys() *BucketSSEConfigSys { // Get - gets bucket encryption config for the given bucket. func (sys *BucketSSEConfigSys) Get(bucket string) (*sse.BucketSSEConfig, error) { - if globalIsGateway { - objAPI := newObjectLayerFn() - if objAPI == nil { - return nil, errServerNotInitialized - } - - return nil, BucketSSEConfigNotFound{Bucket: bucket} - } - sseCfg, _, err := globalBucketMetadataSys.GetSSEConfig(bucket) return sseCfg, err } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 18ff802d9..d6e2cbb1c 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -1065,10 +1065,8 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h sseConfig, _ := globalBucketSSEConfigSys.Get(bucket) sseConfig.Apply(r.Header, sse.ApplyOptions{ AutoEncrypt: globalAutoEncryption, - Passthrough: globalIsGateway && globalGatewayName == S3BackendGateway, }) - // get gateway encryption options var opts ObjectOptions opts, err = putOpts(ctx, r, bucket, object, metadata) if err != nil { @@ -1398,10 +1396,6 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) return } - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) return diff --git a/cmd/bucket-lifecycle.go b/cmd/bucket-lifecycle.go index 829b0113d..e28c15d33 100644 --- a/cmd/bucket-lifecycle.go +++ b/cmd/bucket-lifecycle.go @@ -58,15 +58,6 @@ type LifecycleSys struct{} // Get - gets lifecycle config associated to a given bucket name. func (sys *LifecycleSys) Get(bucketName string) (lc *lifecycle.Lifecycle, err error) { - if globalIsGateway { - objAPI := newObjectLayerFn() - if objAPI == nil { - return nil, errServerNotInitialized - } - - return nil, BucketLifecycleNotFound{Bucket: bucketName} - } - return globalBucketMetadataSys.GetLifecycleConfig(bucketName) } diff --git a/cmd/bucket-metadata-sys.go b/cmd/bucket-metadata-sys.go index b7bc78eed..767c14027 100644 --- a/cmd/bucket-metadata-sys.go +++ b/cmd/bucket-metadata-sys.go @@ -18,7 +18,6 @@ package cmd import ( - "bytes" "context" "errors" "fmt" @@ -55,9 +54,6 @@ func (sys *BucketMetadataSys) Count() int { // Remove bucket metadata from memory. func (sys *BucketMetadataSys) Remove(bucket string) { - if globalIsGateway { - return - } sys.Lock() delete(sys.metadataMap, bucket) globalBucketMonitor.DeleteBucket(bucket) @@ -70,10 +66,6 @@ func (sys *BucketMetadataSys) Remove(bucket string) { // so they should be replaced atomically and not appended to, etc. // Data is not persisted to disk. func (sys *BucketMetadataSys) Set(bucket string, meta BucketMetadata) { - if globalIsGateway { - return - } - if bucket != minioMetaBucket { sys.Lock() sys.metadataMap[bucket] = meta @@ -87,20 +79,6 @@ func (sys *BucketMetadataSys) updateAndParse(ctx context.Context, bucket string, return updatedAt, errServerNotInitialized } - if globalIsGateway && globalGatewayName != NASBackendGateway { - if configFile == bucketPolicyConfig { - if configData == nil { - return updatedAt, objAPI.DeleteBucketPolicy(ctx, bucket) - } - config, err := policy.ParseConfig(bytes.NewReader(configData), bucket) - if err != nil { - return updatedAt, err - } - return updatedAt, objAPI.SetBucketPolicy(ctx, bucket, config) - } - return updatedAt, NotImplemented{} - } - if bucket == minioMetaBucket { return updatedAt, errInvalidArgument } @@ -186,7 +164,7 @@ func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configF // For all other bucket specific metadata, use the relevant // calls implemented specifically for each of those features. func (sys *BucketMetadataSys) Get(bucket string) (BucketMetadata, error) { - if globalIsGateway || bucket == minioMetaBucket { + if bucket == minioMetaBucket { return newBucketMetadata(bucket), errConfigNotFound } @@ -249,22 +227,6 @@ func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Co // GetLifecycleConfig returns configured lifecycle config // The returned object may not be modified. func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Lifecycle, error) { - if globalIsGateway && globalGatewayName == NASBackendGateway { - // Only needed in case of NAS gateway. - objAPI := newObjectLayerFn() - if objAPI == nil { - return nil, errServerNotInitialized - } - meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket) - if err != nil { - return nil, err - } - if meta.lifecycleConfig == nil { - return nil, BucketLifecycleNotFound{Bucket: bucket} - } - return meta.lifecycleConfig, nil - } - meta, err := sys.GetConfig(GlobalContext, bucket) if err != nil { if errors.Is(err, errConfigNotFound) { @@ -281,19 +243,6 @@ func (sys *BucketMetadataSys) GetLifecycleConfig(bucket string) (*lifecycle.Life // GetNotificationConfig returns configured notification config // The returned object may not be modified. func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Config, error) { - if globalIsGateway && globalGatewayName == NASBackendGateway { - // Only needed in case of NAS gateway. - objAPI := newObjectLayerFn() - if objAPI == nil { - return nil, errServerNotInitialized - } - meta, err := loadBucketMetadata(GlobalContext, objAPI, bucket) - if err != nil { - return nil, err - } - return meta.notificationConfig, nil - } - meta, err := sys.GetConfig(GlobalContext, bucket) if err != nil { return nil, err @@ -329,15 +278,6 @@ func (sys *BucketMetadataSys) CreatedAt(bucket string) (time.Time, error) { // GetPolicyConfig returns configured bucket policy // The returned object may not be modified. func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, time.Time, error) { - if globalIsGateway { - objAPI := newObjectLayerFn() - if objAPI == nil { - return nil, time.Time{}, errServerNotInitialized - } - p, err := objAPI.GetBucketPolicy(GlobalContext, bucket) - return p, UTCNow(), err - } - meta, err := sys.GetConfig(GlobalContext, bucket) if err != nil { if errors.Is(err, errConfigNotFound) { @@ -405,10 +345,6 @@ func (sys *BucketMetadataSys) GetConfig(ctx context.Context, bucket string) (Buc return newBucketMetadata(bucket), errServerNotInitialized } - if globalIsGateway { - return newBucketMetadata(bucket), NotImplemented{} - } - if bucket == minioMetaBucket { return newBucketMetadata(bucket), errInvalidArgument } @@ -436,12 +372,6 @@ func (sys *BucketMetadataSys) Init(ctx context.Context, buckets []BucketInfo, ob return errServerNotInitialized } - // In gateway mode, we don't need to load bucket metadata except - // NAS gateway backend. - if globalIsGateway && !objAPI.IsNotificationSupported() { - return nil - } - // Load bucket metadata sys in background go sys.load(ctx, buckets, objAPI) return nil diff --git a/cmd/bucket-object-lock.go b/cmd/bucket-object-lock.go index eb8b0daff..1ffcb8cc7 100644 --- a/cmd/bucket-object-lock.go +++ b/cmd/bucket-object-lock.go @@ -36,15 +36,6 @@ type BucketObjectLockSys struct{} // Get - Get retention configuration. func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, err error) { - if globalIsGateway { - objAPI := newObjectLayerFn() - if objAPI == nil { - return r, errServerNotInitialized - } - - return r, nil - } - config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName) if err != nil { if errors.Is(err, BucketObjectLockConfigNotFound{Bucket: bucketName}) { diff --git a/cmd/bucket-quota.go b/cmd/bucket-quota.go index 3771532e3..cd2f66c8d 100644 --- a/cmd/bucket-quota.go +++ b/cmd/bucket-quota.go @@ -35,13 +35,6 @@ type BucketQuotaSys struct { // Get - Get quota configuration. func (sys *BucketQuotaSys) Get(ctx context.Context, bucketName string) (*madmin.BucketQuota, error) { - if globalIsGateway { - objAPI := newObjectLayerFn() - if objAPI == nil { - return nil, errServerNotInitialized - } - return &madmin.BucketQuota{}, nil - } qCfg, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName) return qCfg, err } diff --git a/cmd/bucket-replication-handlers.go b/cmd/bucket-replication-handlers.go index bb0e8b332..8777d89f4 100644 --- a/cmd/bucket-replication-handlers.go +++ b/cmd/bucket-replication-handlers.go @@ -46,10 +46,6 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) return } - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL) return diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 0e6e44183..2eab692d6 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -74,15 +74,6 @@ const ( // gets replication config associated to a given bucket name. func getReplicationConfig(ctx context.Context, bucketName string) (rc *replication.Config, err error) { - if globalIsGateway { - objAPI := newObjectLayerFn() - if objAPI == nil { - return rc, errServerNotInitialized - } - - return rc, BucketReplicationConfigNotFound{Bucket: bucketName} - } - rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName) if err != nil { if errors.Is(err, BucketReplicationConfigNotFound{Bucket: bucketName}) || errors.Is(err, errInvalidArgument) { @@ -201,10 +192,6 @@ func getMustReplicateOptions(o ObjectInfo, op replication.Type, opts ObjectOptio // mustReplicate returns 2 booleans - true if object meets replication criteria and true if replication is to be done in // a synchronous manner. func mustReplicate(ctx context.Context, bucket, object string, mopts mustReplicateOptions) (dsc ReplicateDecision) { - if globalIsGateway { - return - } - // object layer not initialized we return with no decision. if newObjectLayerFn() == nil { return diff --git a/cmd/bucket-targets.go b/cmd/bucket-targets.go index 178f2f333..72dd1346d 100644 --- a/cmd/bucket-targets.go +++ b/cmd/bucket-targets.go @@ -208,9 +208,6 @@ func (sys *BucketTargetSys) Delete(bucket string) { // SetTarget - sets a new minio-go client target for this bucket. func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *madmin.BucketTarget, update bool) error { - if globalIsGateway { - return nil - } if !tgt.Type.IsValid() && !update { return BucketRemoteArnTypeInvalid{Bucket: bucket} } @@ -271,9 +268,6 @@ func (sys *BucketTargetSys) SetTarget(ctx context.Context, bucket string, tgt *m } func (sys *BucketTargetSys) updateBandwidthLimit(bucket string, limit int64) { - if globalIsGateway { - return - } if limit == 0 { globalBucketMonitor.DeleteBucket(bucket) return @@ -285,10 +279,6 @@ func (sys *BucketTargetSys) updateBandwidthLimit(bucket string, limit int64) { // RemoveTarget - removes a remote bucket target for this source bucket. func (sys *BucketTargetSys) RemoveTarget(ctx context.Context, bucket, arnStr string) error { - if globalIsGateway { - return nil - } - if arnStr == "" { return BucketRemoteArnInvalid{Bucket: bucket} } diff --git a/cmd/bucket-versioning.go b/cmd/bucket-versioning.go index 00a2c69ff..5bfd44a4a 100644 --- a/cmd/bucket-versioning.go +++ b/cmd/bucket-versioning.go @@ -69,11 +69,6 @@ func (sys *BucketVersioningSys) PrefixSuspended(bucket, prefix string) bool { // Get returns stored bucket policy func (sys *BucketVersioningSys) Get(bucket string) (*versioning.Versioning, error) { - if globalIsGateway { - // Gateway does not implement versioning. - return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, nil - } - if bucket == minioMetaBucket || strings.HasPrefix(bucket, minioMetaBucket) { return &versioning.Versioning{XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/"}, nil } diff --git a/cmd/common-main.go b/cmd/common-main.go index 417b88032..8cd4cfabb 100644 --- a/cmd/common-main.go +++ b/cmd/common-main.go @@ -134,7 +134,7 @@ func init() { globalForwarder = handlers.NewForwarder(&handlers.Forwarder{ PassHost: true, - RoundTripper: newGatewayHTTPTransport(1 * time.Hour), + RoundTripper: newHTTPTransport(1 * time.Hour), Logger: func(err error) { if err != nil && !errors.Is(err, context.Canceled) { logger.LogIf(GlobalContext, err) @@ -151,7 +151,7 @@ func init() { defaultAWSCredProvider = []credentials.Provider{ &credentials.IAM{ Client: &http.Client{ - Transport: NewGatewayHTTPTransport(), + Transport: NewHTTPTransport(), }, }, } @@ -308,22 +308,6 @@ func initConsoleServer() (*restapi.Server, error) { return server, nil } -func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) { - if strings.HasPrefix(name, "gateway") { - if GlobalGatewaySSE.IsSet() && GlobalKMS == nil { - uiErr := config.ErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured") - logger.Fatal(uiErr, "Unable to start gateway with SSE") - } - } - - globalCompressConfigMu.Lock() - if globalCompressConfig.Enabled && !objAPI.IsCompressionSupported() { - logger.Fatal(errInvalidArgument, - "Compression support is requested but '%s' does not support compression", name) - } - globalCompressConfigMu.Unlock() -} - // Check for updates and print a notification message func checkUpdate(mode string) { updateURL := minioReleaseInfoURL diff --git a/cmd/config-current.go b/cmd/config-current.go index 0414ce3b7..ebd1f2964 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -331,7 +331,7 @@ func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) er } case config.IdentityOpenIDSubSys: if _, err := openid.LookupConfig(s, - NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil { + NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil { return err } case config.IdentityLDAPSubSys: @@ -352,7 +352,7 @@ func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) er } case config.IdentityPluginSubSys: if _, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default], - NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil { + NewHTTPTransport(), xhttp.DrainBody, globalSite.Region); err != nil { return err } case config.SubnetSubSys: @@ -370,12 +370,12 @@ func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) er fallthrough case config.PolicyPluginSubSys: if ppargs, err := polplugin.LookupConfig(s[config.PolicyPluginSubSys][config.Default], - NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil { + NewHTTPTransport(), xhttp.DrainBody); err != nil { return err } else if ppargs.URL == nil { // Check if legacy opa is configured. if _, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default], - NewGatewayHTTPTransport(), xhttp.DrainBody); err != nil { + NewHTTPTransport(), xhttp.DrainBody); err != nil { return err } } @@ -388,7 +388,7 @@ func validateSubSysConfig(s config.Config, subSys string, objAPI ObjectLayer) er } if config.NotifySubSystems.Contains(subSys) { - if err := notify.TestSubSysNotificationTargets(GlobalContext, s, subSys, NewGatewayHTTPTransport()); err != nil { + if err := notify.TestSubSysNotificationTargets(GlobalContext, s, subSys, NewHTTPTransport()); err != nil { return err } } @@ -434,42 +434,26 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { dnsURL, dnsUser, dnsPass, err := env.LookupEnv(config.EnvDNSWebhook) if err != nil { - if globalIsGateway { - logger.FatalIf(err, "Unable to initialize remote webhook DNS config") - } else { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) - } + logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) } if err == nil && dnsURL != "" { globalDNSConfig, err = dns.NewOperatorDNS(dnsURL, dns.Authentication(dnsUser, dnsPass), dns.RootCAs(globalRootCAs)) if err != nil { - if globalIsGateway { - logger.FatalIf(err, "Unable to initialize remote webhook DNS config") - } else { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) - } + logger.LogIf(ctx, fmt.Errorf("Unable to initialize remote webhook DNS config %w", err)) } } etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs) if err != nil { - if globalIsGateway { - logger.FatalIf(err, "Unable to initialize etcd config") - } else { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) - } + logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) } if etcdCfg.Enabled { globalEtcdClient, err = etcd.New(etcdCfg) if err != nil { - if globalIsGateway { - logger.FatalIf(err, "Unable to initialize etcd config") - } else { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) - } + logger.LogIf(ctx, fmt.Errorf("Unable to initialize etcd config: %w", err)) } if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil { @@ -485,12 +469,8 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { dns.CoreDNSPath(etcdCfg.CoreDNSPath), ) if err != nil { - if globalIsGateway { - logger.FatalIf(err, "Unable to initialize DNS config") - } else { - logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w", - globalDomainNames, err)) - } + logger.LogIf(ctx, fmt.Errorf("Unable to initialize DNS config for %s: %w", + globalDomainNames, err)) } } } @@ -510,11 +490,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { globalCacheConfig, err = cache.LookupConfig(s[config.CacheSubSys][config.Default]) if err != nil { - if globalIsGateway { - logger.FatalIf(err, "Unable to setup cache") - } else { - logger.LogIf(ctx, fmt.Errorf("Unable to setup cache: %w", err)) - } + logger.LogIf(ctx, fmt.Errorf("Unable to setup cache: %w", err)) } if globalCacheConfig.Enabled { @@ -545,7 +521,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { logger.LogIf(ctx, fmt.Errorf("Unable to parse subnet configuration: %w", err)) } - transport := NewGatewayHTTPTransport() + transport := NewHTTPTransport() globalConfigTargetList, err = notify.FetchEnabledTargets(GlobalContext, s, transport) if err != nil { @@ -554,11 +530,7 @@ func lookupConfigs(s config.Config, objAPI ObjectLayer) { // Apply dynamic config values if err := applyDynamicConfig(ctx, objAPI, s); err != nil { - if globalIsGateway { - logger.FatalIf(err, "Unable to initialize dynamic configuration") - } else { - logger.LogIf(ctx, err) - } + logger.LogIf(ctx, err) } } @@ -579,7 +551,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf // Initialize remote instance transport once. getRemoteInstanceTransportOnce.Do(func() { - getRemoteInstanceTransport = newGatewayHTTPTransport(apiConfig.RemoteTransportDeadline) + getRemoteInstanceTransport = newHTTPTransport(apiConfig.RemoteTransportDeadline) }) case config.CompressionSubSys: cmpCfg, err := compress.LookupConfig(s[config.CompressionSubSys][config.Default]) @@ -617,7 +589,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf if l.Enabled { l.LogOnce = logger.LogOnceConsoleIf l.UserAgent = userAgent - l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) + l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) loggerCfg.HTTP[n] = l } } @@ -634,7 +606,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf if l.Enabled { l.LogOnce = logger.LogOnceConsoleIf l.UserAgent = userAgent - l.Transport = NewGatewayHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) + l.Transport = NewHTTPTransportWithClientCerts(l.ClientCert, l.ClientKey) loggerCfg.AuditWebhook[n] = l } } diff --git a/cmd/data-scanner-metric.go b/cmd/data-scanner-metric.go index 53a6d5326..0c43f5acc 100644 --- a/cmd/data-scanner-metric.go +++ b/cmd/data-scanner-metric.go @@ -119,13 +119,6 @@ func (p *scannerMetrics) incTime(s scannerMetric, d time.Duration) { } } -func (p *scannerMetrics) incNoTime(s scannerMetric) { - atomic.AddUint64(&p.operations[s], 1) - if s < scannerMetricLastRealtime { - p.latency[s].add(0) - } -} - // timeILM times an ILM action. // lifecycle.NoneAction is ignored. // Use for s < scannerMetricLastRealtime diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index 67334bb4e..bed28eaa9 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -392,13 +392,6 @@ func (e *dataUsageEntry) addChild(hash dataUsageHash) { e.Children[hash.Key()] = struct{}{} } -// removeChild will remove a child based on its hash. -func (e *dataUsageEntry) removeChild(hash dataUsageHash) { - if len(e.Children) > 0 { - delete(e.Children, hash.Key()) - } -} - // Create a clone of the entry. func (e dataUsageEntry) clone() dataUsageEntry { // We operate on a copy from the receiver. @@ -488,43 +481,6 @@ func (d *dataUsageCache) deleteRecursive(h dataUsageHash) { } } -// keepBuckets will keep only the buckets specified specified by delete all others. -func (d *dataUsageCache) keepBuckets(b []BucketInfo) { - lu := make(map[dataUsageHash]struct{}) - for _, v := range b { - lu[hashPath(v.Name)] = struct{}{} - } - d.keepRootChildren(lu) -} - -// keepRootChildren will keep the root children specified by delete all others. -func (d *dataUsageCache) keepRootChildren(list map[dataUsageHash]struct{}) { - root := d.root() - if root == nil { - return - } - rh := d.rootHash() - for k := range d.Cache { - h := dataUsageHash(k) - if h == rh { - continue - } - if _, ok := list[h]; !ok { - delete(d.Cache, k) - d.deleteRecursive(h) - root.removeChild(h) - } - } - // Clean up abandoned children. - for k := range root.Children { - h := dataUsageHash(k) - if _, ok := list[h]; !ok { - delete(root.Children, k) - } - } - d.Cache[rh.Key()] = *root -} - // dui converts the flattened version of the path to madmin.DataUsageInfo. // As a side effect d will be flattened, use a clone if this is not ok. func (d *dataUsageCache) dui(path string, buckets []BucketInfo) DataUsageInfo { diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index 0ee2bf586..b9ff603ea 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -953,7 +953,7 @@ func (c *diskCache) putRange(ctx context.Context, bucket, object string, data io objSize, _ = sio.EncryptedSize(uint64(size)) } - cacheFile := MustGetUUID() + cacheFile := mustGetUUID() n, _, err := c.bitrotWriteToCache(cachePath, cacheFile, reader, actualSize) if IsErr(err, baseErrs...) { // take the cache drive offline diff --git a/cmd/encryption-v1.go b/cmd/encryption-v1.go index c46468006..3d3f672f3 100644 --- a/cmd/encryption-v1.go +++ b/cmd/encryption-v1.go @@ -1043,18 +1043,6 @@ func DecryptObjectInfo(info *ObjectInfo, r *http.Request) (encrypted bool, err e return encrypted, nil } -// The customer key in the header is used by the gateway for encryption in the case of -// s3 gateway double encryption. A new client key is derived from the customer provided -// key to be sent to the s3 backend for encryption at the backend. -func deriveClientKey(clientKey [32]byte, bucket, object string) [32]byte { - var key [32]byte - mac := hmac.New(sha256.New, clientKey[:]) - mac.Write([]byte(crypto.SSEC.String())) - mac.Write([]byte(path.Join(bucket, object))) - mac.Sum(key[:0]) - return key -} - type ( objectMetaEncryptFn func(baseKey string, data []byte) []byte objectMetaDecryptFn func(baseKey string, data []byte) ([]byte, error) diff --git a/cmd/erasure-server-pool.go b/cmd/erasure-server-pool.go index 1e545269e..f57bda199 100644 --- a/cmd/erasure-server-pool.go +++ b/cmd/erasure-server-pool.go @@ -42,8 +42,6 @@ import ( ) type erasureServerPools struct { - GatewayUnsupported - poolMetaMutex sync.RWMutex poolMeta poolMeta serverPools []*erasureSets @@ -2132,12 +2130,6 @@ func (z *erasureServerPools) HealObject(ctx context.Context, bucket, object, ver } } -// GetMetrics - returns metrics of local disks -func (z *erasureServerPools) GetMetrics(ctx context.Context) (*BackendMetrics, error) { - logger.LogIf(ctx, NotImplemented{}) - return &BackendMetrics{}, NotImplemented{} -} - func (z *erasureServerPools) getPoolAndSet(id string) (poolIdx, setIdx, diskIdx int, err error) { for poolIdx := range z.serverPools { format := z.serverPools[poolIdx].format diff --git a/cmd/erasure-sets.go b/cmd/erasure-sets.go index c7ba013bf..bb22fd957 100644 --- a/cmd/erasure-sets.go +++ b/cmd/erasure-sets.go @@ -51,8 +51,6 @@ type setsDsyncLockers [][]dsync.NetLocker // object sets. NOTE: There is no dynamic scaling allowed or intended in // current design. type erasureSets struct { - GatewayUnsupported - sets []*erasureObjects // Reference format. diff --git a/cmd/erasure-single-drive.go b/cmd/erasure-single-drive.go index 8d0076790..c597749d3 100644 --- a/cmd/erasure-single-drive.go +++ b/cmd/erasure-single-drive.go @@ -55,8 +55,6 @@ import ( // erasureSingle - Implements single drive XL layer type erasureSingle struct { - GatewayUnsupported - disk StorageAPI endpoint Endpoint @@ -2484,6 +2482,22 @@ func (es *erasureSingle) PutObjectPart(ctx context.Context, bucket, object, uplo }, nil } +func (es *erasureSingle) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { + return madmin.HealResultItem{}, NotImplemented{} +} + +func (es *erasureSingle) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) { + return madmin.HealResultItem{}, NotImplemented{} +} + +func (es *erasureSingle) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) error { + return NotImplemented{} +} + +func (es *erasureSingle) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) { + return madmin.HealResultItem{}, NotImplemented{} +} + // GetMultipartInfo returns multipart metadata uploaded during newMultipartUpload, used // by callers to verify object states // - encrypted diff --git a/cmd/erasure.go b/cmd/erasure.go index f24fa12e0..5d431e62b 100644 --- a/cmd/erasure.go +++ b/cmd/erasure.go @@ -41,8 +41,6 @@ var OfflineDisk StorageAPI // zero value is nil // erasureObjects - Implements ER object layer. type erasureObjects struct { - GatewayUnsupported - setDriveCount int defaultParityCount int diff --git a/cmd/event-notification.go b/cmd/event-notification.go index 267309d9f..2b7bfbbe8 100644 --- a/cmd/event-notification.go +++ b/cmd/event-notification.go @@ -100,11 +100,6 @@ func (evnot *EventNotifier) InitBucketTargets(ctx context.Context, objAPI Object return errServerNotInitialized } - // In gateway mode, notifications are not supported - except NAS gateway. - if globalIsGateway && !objAPI.IsNotificationSupported() { - return nil - } - if err := evnot.targetList.Add(globalConfigTargetList.Targets()...); err != nil { return err } @@ -326,10 +321,6 @@ func sendEvent(args eventArgs) { crypto.RemoveSensitiveEntries(args.Object.UserDefined) crypto.RemoveInternalEntries(args.Object.UserDefined) - // globalNotificationSys is not initialized in gateway mode. - if globalNotificationSys == nil { - return - } if globalHTTPListen.NumSubscribers(args.EventName) > 0 { globalHTTPListen.Publish(args.ToEvent(false)) } diff --git a/cmd/format-fs.go b/cmd/format-fs.go deleted file mode 100644 index 5b08f401b..000000000 --- a/cmd/format-fs.go +++ /dev/null @@ -1,387 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "fmt" - "io" - "math/rand" - "os" - "path" - "time" - - "github.com/minio/minio/internal/config" - "github.com/minio/minio/internal/lock" - "github.com/minio/minio/internal/logger" -) - -// FS format version strings. -const ( - formatBackendFS = "fs" - formatFSVersionV1 = "1" - formatFSVersionV2 = "2" -) - -// formatFSV1 - structure holds format version '1'. -type formatFSV1 struct { - formatMetaV1 - FS struct { - Version string `json:"version"` - } `json:"fs"` -} - -// formatFSV2 - structure is same as formatFSV1. But the multipart backend -// structure is flat instead of hierarchy now. -// In .minio.sys/multipart we have: -// sha256(bucket/object)/uploadID/[fs.json, 1.etag, 2.etag ....] -type formatFSV2 = formatFSV1 - -// Used to detect the version of "fs" format. -type formatFSVersionDetect struct { - FS struct { - Version string `json:"version"` - } `json:"fs"` -} - -// Generic structure to manage both v1 and v2 structures -type formatFS struct { - formatMetaV1 - FS interface{} `json:"fs"` -} - -// Returns the latest "fs" format V1 -func newFormatFSV1() (format *formatFSV1) { - f := &formatFSV1{} - f.Version = formatMetaVersionV1 - f.Format = formatBackendFS - f.ID = mustGetUUID() - f.FS.Version = formatFSVersionV1 - return f -} - -// Returns the field formatMetaV1.Format i.e the string "fs" which is never likely to change. -// We do not use this function in Erasure to get the format as the file is not fcntl-locked on Erasure. -func formatMetaGetFormatBackendFS(r io.ReadSeeker) (string, error) { - format := &formatMetaV1{} - if err := jsonLoad(r, format); err != nil { - return "", err - } - if format.Version == formatMetaVersionV1 { - return format.Format, nil - } - return "", fmt.Errorf(`format.Version expected: %s, got: %s`, formatMetaVersionV1, format.Version) -} - -// Returns formatFS.FS.Version -func formatFSGetVersion(r io.ReadSeeker) (string, error) { - format := &formatFSVersionDetect{} - if err := jsonLoad(r, format); err != nil { - return "", err - } - return format.FS.Version, nil -} - -// Migrate from V1 to V2. V2 implements new backend format for multipart -// uploads. Delete the previous multipart directory. -func formatFSMigrateV1ToV2(ctx context.Context, wlk *lock.LockedFile, fsPath string) error { - version, err := formatFSGetVersion(wlk) - if err != nil { - return err - } - - if version != formatFSVersionV1 { - return fmt.Errorf(`format.json version expected %s, found %s`, formatFSVersionV1, version) - } - - if err = fsRemoveAll(ctx, path.Join(fsPath, minioMetaMultipartBucket)); err != nil { - return err - } - - if err = os.MkdirAll(path.Join(fsPath, minioMetaMultipartBucket), 0o755); err != nil { - return err - } - - formatV1 := formatFSV1{} - if err = jsonLoad(wlk, &formatV1); err != nil { - return err - } - - formatV2 := formatFSV2{} - formatV2.formatMetaV1 = formatV1.formatMetaV1 - formatV2.FS.Version = formatFSVersionV2 - - return jsonSave(wlk.File, formatV2) -} - -// Migrate the "fs" backend. -// Migration should happen when formatFSV1.FS.Version changes. This version -// can change when there is a change to the struct formatFSV1.FS or if there -// is any change in the backend file system tree structure. -func formatFSMigrate(ctx context.Context, wlk *lock.LockedFile, fsPath string) error { - // Add any migration code here in case we bump format.FS.Version - version, err := formatFSGetVersion(wlk) - if err != nil { - return err - } - - switch version { - case formatFSVersionV1: - if err = formatFSMigrateV1ToV2(ctx, wlk, fsPath); err != nil { - return err - } - fallthrough - case formatFSVersionV2: - // We are at the latest version. - } - - // Make sure that the version is what we expect after the migration. - version, err = formatFSGetVersion(wlk) - if err != nil { - return err - } - if version != formatFSVersionV2 { - return config.ErrUnexpectedBackendVersion(fmt.Errorf(`%s file: expected FS version: %s, found FS version: %s`, formatConfigFile, formatFSVersionV2, version)) - } - return nil -} - -// Creates a new format.json if unformatted. -func createFormatFS(fsFormatPath string) error { - // Attempt a write lock on formatConfigFile `format.json` - // file stored in minioMetaBucket(.minio.sys) directory. - lk, err := lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR|os.O_CREATE, 0o666) - if err != nil { - return err - } - // Close the locked file upon return. - defer lk.Close() - - fi, err := lk.Stat() - if err != nil { - return err - } - if fi.Size() != 0 { - // format.json already got created because of another minio process's createFormatFS() - return nil - } - - return jsonSave(lk.File, newFormatFSV1()) -} - -// This function returns a read-locked format.json reference to the caller. -// The file descriptor should be kept open throughout the life -// of the process so that another minio process does not try to -// migrate the backend when we are actively working on the backend. -func initFormatFS(ctx context.Context, fsPath string) (rlk *lock.RLockedFile, err error) { - fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile) - - // Add a deployment ID, if it does not exist. - if err := formatFSFixDeploymentID(ctx, fsFormatPath); err != nil { - return nil, err - } - - // Any read on format.json should be done with read-lock. - // Any write on format.json should be done with write-lock. - for { - isEmpty := false - rlk, err := lock.RLockedOpenFile(fsFormatPath) - if err == nil { - // format.json can be empty in a rare condition when another - // minio process just created the file but could not hold lock - // and write to it. - var fi os.FileInfo - fi, err = rlk.Stat() - if err != nil { - return nil, err - } - isEmpty = fi.Size() == 0 - } - if osIsNotExist(err) || isEmpty { - if err == nil { - rlk.Close() - } - // Fresh disk - create format.json - err = createFormatFS(fsFormatPath) - if err == lock.ErrAlreadyLocked { - // Lock already present, sleep and attempt again. - // Can happen in a rare situation when a parallel minio process - // holds the lock and creates format.json - time.Sleep(100 * time.Millisecond) - continue - } - if err != nil { - return nil, err - } - // After successfully creating format.json try to hold a read-lock on - // the file. - continue - } - if err != nil { - return nil, err - } - - formatBackend, err := formatMetaGetFormatBackendFS(rlk) - if err != nil { - return nil, err - } - if formatBackend == formatBackendErasureSingle { - return nil, errFreshDisk - } - if formatBackend != formatBackendFS { - return nil, fmt.Errorf(`%s file: expected format-type: %s, found: %s`, formatConfigFile, formatBackendFS, formatBackend) - } - version, err := formatFSGetVersion(rlk) - if err != nil { - return nil, err - } - if version != formatFSVersionV2 { - // Format needs migration - rlk.Close() - // Hold write lock during migration so that we do not disturb any - // minio processes running in parallel. - var wlk *lock.LockedFile - wlk, err = lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0o666) - if err == lock.ErrAlreadyLocked { - // Lock already present, sleep and attempt again. - time.Sleep(100 * time.Millisecond) - continue - } - if err != nil { - return nil, err - } - err = formatFSMigrate(ctx, wlk, fsPath) - wlk.Close() - if err != nil { - // Migration failed, bail out so that the user can observe what happened. - return nil, err - } - // Successfully migrated, now try to hold a read-lock on format.json - continue - } - var id string - if id, err = formatFSGetDeploymentID(rlk); err != nil { - rlk.Close() - return nil, err - } - globalDeploymentID = id - return rlk, nil - } -} - -func formatFSGetDeploymentID(rlk *lock.RLockedFile) (id string, err error) { - format := &formatFS{} - if err := jsonLoad(rlk, format); err != nil { - return "", err - } - return format.ID, nil -} - -// Generate a deployment ID if one does not exist already. -func formatFSFixDeploymentID(ctx context.Context, fsFormatPath string) error { - rlk, err := lock.RLockedOpenFile(fsFormatPath) - if err == nil { - // format.json can be empty in a rare condition when another - // minio process just created the file but could not hold lock - // and write to it. - var fi os.FileInfo - fi, err = rlk.Stat() - if err != nil { - rlk.Close() - return err - } - if fi.Size() == 0 { - rlk.Close() - return nil - } - } - if osIsNotExist(err) { - return nil - } - if err != nil { - return err - } - - formatBackend, err := formatMetaGetFormatBackendFS(rlk) - if err != nil { - rlk.Close() - return err - } - if formatBackend == formatBackendErasureSingle { - rlk.Close() - return errFreshDisk - } - if formatBackend != formatBackendFS { - rlk.Close() - return fmt.Errorf(`%s file: expected format-type: %s, found: %s`, formatConfigFile, formatBackendFS, formatBackend) - } - - format := &formatFS{} - err = jsonLoad(rlk, format) - rlk.Close() - if err != nil { - return err - } - - // Check if it needs to be updated - if format.ID != "" { - return nil - } - - formatStartTime := time.Now().Round(time.Second) - getElapsedTime := func() string { - return time.Now().Round(time.Second).Sub(formatStartTime).String() - } - - r := rand.New(rand.NewSource(time.Now().UnixNano())) - - var wlk *lock.LockedFile - var stop bool - for !stop { - select { - case <-ctx.Done(): - return fmt.Errorf("Initializing FS format stopped gracefully") - default: - wlk, err = lock.TryLockedOpenFile(fsFormatPath, os.O_RDWR, 0o666) - if err == lock.ErrAlreadyLocked { - // Lock already present, sleep and attempt again - logger.Info("Another minio process(es) might be holding a lock to the file %s. Please kill that minio process(es) (elapsed %s)\n", fsFormatPath, getElapsedTime()) - time.Sleep(time.Duration(r.Float64() * float64(5*time.Second))) - continue - } - if err != nil { - return err - } - } - stop = true - } - defer wlk.Close() - - if err = jsonLoad(wlk, format); err != nil { - return err - } - - // Check if format needs to be updated - if format.ID != "" { - return nil - } - - // Set new UUID to the format and save it - format.ID = mustGetUUID() - return jsonSave(wlk, format) -} diff --git a/cmd/format-fs_test.go b/cmd/format-fs_test.go deleted file mode 100644 index b6e7b0b4d..000000000 --- a/cmd/format-fs_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "os" - "path/filepath" - "testing" -) - -// TestFSFormatFS - tests initFormatFS, formatMetaGetFormatBackendFS, formatFSGetVersion. -func TestFSFormatFS(t *testing.T) { - // Prepare for testing - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - fsFormatPath := pathJoin(disk, minioMetaBucket, formatConfigFile) - - // Assign a new UUID. - uuid := mustGetUUID() - - // Initialize meta volume, if volume already exists ignores it. - if err := initMetaVolumeFS(disk, uuid); err != nil { - t.Fatal(err) - } - - rlk, err := initFormatFS(context.Background(), disk) - if err != nil { - t.Fatal(err) - } - rlk.Close() - - // Do the basic sanity checks to check if initFormatFS() did its job. - f, err := os.OpenFile(fsFormatPath, os.O_RDWR|os.O_SYNC, 0) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - format, err := formatMetaGetFormatBackendFS(f) - if err != nil { - t.Fatal(err) - } - if format != formatBackendFS { - t.Fatalf(`expected: %s, got: %s`, formatBackendFS, format) - } - version, err := formatFSGetVersion(f) - if err != nil { - t.Fatal(err) - } - if version != formatFSVersionV2 { - t.Fatalf(`expected: %s, got: %s`, formatFSVersionV2, version) - } - - // Corrupt the format.json file and test the functions. - // formatMetaGetFormatBackendFS, formatFSGetVersion, initFormatFS should return errors. - if err = f.Truncate(0); err != nil { - t.Fatal(err) - } - if _, err = f.WriteString("b"); err != nil { - t.Fatal(err) - } - - if _, err = formatMetaGetFormatBackendFS(f); err == nil { - t.Fatal("expected to fail") - } - if _, err = formatFSGetVersion(rlk); err == nil { - t.Fatal("expected to fail") - } - if _, err = initFormatFS(context.Background(), disk); err == nil { - t.Fatal("expected to fail") - } - - // With unknown formatMetaV1.Version formatMetaGetFormatBackendFS, initFormatFS should return error. - if err = f.Truncate(0); err != nil { - t.Fatal(err) - } - // Here we set formatMetaV1.Version to "2" - if _, err = f.WriteString(`{"version":"2","format":"fs","fs":{"version":"1"}}`); err != nil { - t.Fatal(err) - } - if _, err = formatMetaGetFormatBackendFS(f); err == nil { - t.Fatal("expected to fail") - } - if _, err = initFormatFS(context.Background(), disk); err == nil { - t.Fatal("expected to fail") - } -} diff --git a/cmd/fs-tree-walk-pool.go b/cmd/fs-tree-walk-pool.go deleted file mode 100644 index 030803852..000000000 --- a/cmd/fs-tree-walk-pool.go +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "errors" - "reflect" - "sync" - "time" -) - -// Global lookup timeout. -const ( - globalLookupTimeout = time.Minute * 30 // 30minutes. - treeWalkEntryLimit = 50 - treeWalkSameEntryLimit = 4 -) - -// listParams - list object params used for list object map -type listParams struct { - bucket string - recursive bool - marker string - prefix string -} - -// errWalkAbort - returned by doTreeWalk() if it returns prematurely. -// doTreeWalk() can return prematurely if -// 1) treeWalk is timed out by the timer go-routine. -// 2) there is an error during tree walk. -var errWalkAbort = errors.New("treeWalk abort") - -// treeWalk - represents the go routine that does the file tree walk. -type treeWalk struct { - added time.Time - resultCh chan TreeWalkResult - endWalkCh chan struct{} // To signal when treeWalk go-routine should end. - endTimerCh chan<- struct{} // To signal when timer go-routine should end. -} - -// TreeWalkPool - pool of treeWalk go routines. -// A treeWalk is added to the pool by Set() and removed either by -// doing a Release() or if the concerned timer goes off. -// treeWalkPool's purpose is to maintain active treeWalk go-routines in a map so that -// it can be looked up across related list calls. -type TreeWalkPool struct { - mu sync.Mutex - pool map[listParams][]treeWalk - timeOut time.Duration -} - -// NewTreeWalkPool - initialize new tree walk pool. -func NewTreeWalkPool(timeout time.Duration) *TreeWalkPool { - tPool := &TreeWalkPool{ - pool: make(map[listParams][]treeWalk), - timeOut: timeout, - } - return tPool -} - -// Release - selects a treeWalk from the pool based on the input -// listParams, removes it from the pool, and returns the TreeWalkResult -// channel. -// Returns nil if listParams does not have an associated treeWalk. -func (t *TreeWalkPool) Release(params listParams) (resultCh chan TreeWalkResult, endWalkCh chan struct{}) { - t.mu.Lock() - defer t.mu.Unlock() - walks, ok := t.pool[params] // Pick the valid walks. - if !ok || len(walks) == 0 { - // Release return nil if params not found. - return nil, nil - } - - // Pop out the first valid walk entry. - walk := walks[0] - walks = walks[1:] - if len(walks) > 0 { - t.pool[params] = walks - } else { - delete(t.pool, params) - } - walk.endTimerCh <- struct{}{} - return walk.resultCh, walk.endWalkCh -} - -// Set - adds a treeWalk to the treeWalkPool. -// Also starts a timer go-routine that ends when: -// 1. time.After() expires after t.timeOut seconds. -// The expiration is needed so that the treeWalk go-routine resources are freed after a timeout -// if the S3 client does only partial listing of objects. -// 2. Release() signals the timer go-routine to end on endTimerCh. -// During listing the timer should not timeout and end the treeWalk go-routine, hence the -// timer go-routine should be ended. -func (t *TreeWalkPool) Set(params listParams, resultCh chan TreeWalkResult, endWalkCh chan struct{}) { - t.mu.Lock() - defer t.mu.Unlock() - // If we are above the limit delete at least one entry from the pool. - if len(t.pool) > treeWalkEntryLimit { - age := time.Now() - var oldest listParams - for k, v := range t.pool { - if len(v) == 0 { - delete(t.pool, k) - continue - } - // The first element is the oldest, so we only check that. - e := v[0] - if e.added.Before(age) { - oldest = k - age = e.added - } - } - // Invalidate and delete oldest. - if walks, ok := t.pool[oldest]; ok && len(walks) > 0 { - endCh := walks[0].endTimerCh - endWalkCh := walks[0].endWalkCh - if len(walks) > 1 { - // Move walks forward - copy(walks, walks[1:]) - walks = walks[:len(walks)-1] - t.pool[oldest] = walks - } else { - // Only entry, just delete. - delete(t.pool, oldest) - } - select { - case endCh <- struct{}{}: - close(endWalkCh) - default: - } - } else { - // Shouldn't happen, but just in case. - delete(t.pool, oldest) - } - } - - // Should be a buffered channel so that Release() never blocks. - endTimerCh := make(chan struct{}, 1) - walkInfo := treeWalk{ - added: UTCNow(), - resultCh: resultCh, - endWalkCh: endWalkCh, - endTimerCh: endTimerCh, - } - - // Append new walk info. - walks := t.pool[params] - if len(walks) < treeWalkSameEntryLimit { - t.pool[params] = append(walks, walkInfo) - } else { - // We are at limit, invalidate oldest, move list down and add new as last. - select { - case walks[0].endTimerCh <- struct{}{}: - close(walks[0].endWalkCh) - default: - } - copy(walks, walks[1:]) - walks[len(walks)-1] = walkInfo - } - - // Timer go-routine which times out after t.timeOut seconds. - go func(endTimerCh <-chan struct{}, walkInfo treeWalk) { - select { - // Wait until timeOut - case <-time.After(t.timeOut): - // Timeout has expired. Remove the treeWalk from treeWalkPool and - // end the treeWalk go-routine. - t.mu.Lock() - defer t.mu.Unlock() - walks, ok := t.pool[params] - if ok { - // Trick of filtering without allocating - // https://github.com/golang/go/wiki/SliceTricks#filtering-without-allocating - nwalks := walks[:0] - // Look for walkInfo, remove it from the walks list. - for _, walk := range walks { - if !reflect.DeepEqual(walk, walkInfo) { - nwalks = append(nwalks, walk) - } - } - if len(nwalks) == 0 { - // No more treeWalk go-routines associated with listParams - // hence remove map entry. - delete(t.pool, params) - } else { - // There are more treeWalk go-routines associated with listParams - // hence save the list in the map. - t.pool[params] = nwalks - } - } - // Signal the treeWalk go-routine to die. - close(endWalkCh) - case <-endTimerCh: - return - } - }(endTimerCh, walkInfo) -} diff --git a/cmd/fs-tree-walk-pool_test.go b/cmd/fs-tree-walk-pool_test.go deleted file mode 100644 index 3ff24cfb5..000000000 --- a/cmd/fs-tree-walk-pool_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "testing" - "time" -) - -// Test if tree walker go-routine is removed from the pool after timeout -// and that is available in the pool before the timeout. -func TestTreeWalkPoolBasic(t *testing.T) { - // Create a treeWalkPool - tw := NewTreeWalkPool(1 * time.Second) - - // Create sample params - params := listParams{ - bucket: "test-bucket", - } - - // Add a treeWalk to the pool - resultCh := make(chan TreeWalkResult) - endWalkCh := make(chan struct{}) - tw.Set(params, resultCh, endWalkCh) - - // Wait for treeWalkPool timeout to happen - <-time.After(2 * time.Second) - if c1, _ := tw.Release(params); c1 != nil { - t.Error("treeWalk go-routine must have been freed") - } - - // Add the treeWalk back to the pool - tw.Set(params, resultCh, endWalkCh) - - // Release the treeWalk before timeout - select { - case <-time.After(1 * time.Second): - break - default: - if c1, _ := tw.Release(params); c1 == nil { - t.Error("treeWalk go-routine got freed before timeout") - } - } -} - -// Test if multiple tree walkers for the same listParams are managed as expected by the pool. -func TestManyWalksSameParam(t *testing.T) { - // Create a treeWalkPool. - tw := NewTreeWalkPool(5 * time.Second) - - // Create sample params. - params := listParams{ - bucket: "test-bucket", - } - - select { - // This timeout is an upper-bound. This is started - // before the first treeWalk go-routine's timeout period starts. - case <-time.After(5 * time.Second): - break - default: - // Create many treeWalk go-routines for the same params. - for i := 0; i < treeWalkSameEntryLimit; i++ { - resultCh := make(chan TreeWalkResult) - endWalkCh := make(chan struct{}) - tw.Set(params, resultCh, endWalkCh) - } - - tw.mu.Lock() - if walks, ok := tw.pool[params]; ok { - if len(walks) != treeWalkSameEntryLimit { - t.Error("There aren't as many walks as were Set") - } - } - tw.mu.Unlock() - for i := 0; i < treeWalkSameEntryLimit; i++ { - tw.mu.Lock() - if walks, ok := tw.pool[params]; ok { - // Before ith Release we should have n-i treeWalk go-routines. - if treeWalkSameEntryLimit-i != len(walks) { - t.Error("There aren't as many walks as were Set") - } - } - tw.mu.Unlock() - tw.Release(params) - } - } -} - -// Test if multiple tree walkers for the same listParams are managed as expected by the pool -// but that treeWalkSameEntryLimit is respected. -func TestManyWalksSameParamPrune(t *testing.T) { - // Create a treeWalkPool. - tw := NewTreeWalkPool(5 * time.Second) - - // Create sample params. - params := listParams{ - bucket: "test-bucket", - } - - select { - // This timeout is an upper-bound. This is started - // before the first treeWalk go-routine's timeout period starts. - case <-time.After(5 * time.Second): - break - default: - // Create many treeWalk go-routines for the same params. - for i := 0; i < treeWalkSameEntryLimit*4; i++ { - resultCh := make(chan TreeWalkResult) - endWalkCh := make(chan struct{}) - tw.Set(params, resultCh, endWalkCh) - } - - tw.mu.Lock() - if walks, ok := tw.pool[params]; ok { - if len(walks) != treeWalkSameEntryLimit { - t.Error("There aren't as many walks as were Set") - } - } - tw.mu.Unlock() - for i := 0; i < treeWalkSameEntryLimit; i++ { - tw.mu.Lock() - if walks, ok := tw.pool[params]; ok { - // Before ith Release we should have n-i treeWalk go-routines. - if treeWalkSameEntryLimit-i != len(walks) { - t.Error("There aren't as many walks as were Set") - } - } - tw.mu.Unlock() - tw.Release(params) - } - } -} diff --git a/cmd/fs-v1-helpers.go b/cmd/fs-v1-helpers.go deleted file mode 100644 index 34dddffd3..000000000 --- a/cmd/fs-v1-helpers.go +++ /dev/null @@ -1,481 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "io" - "os" - pathutil "path" - "runtime" - "strings" - "time" - - xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/lock" - "github.com/minio/minio/internal/logger" -) - -// Removes only the file at given path does not remove -// any parent directories, handles long paths for -// windows automatically. -func fsRemoveFile(ctx context.Context, filePath string) (err error) { - if filePath == "" { - logger.LogIf(ctx, errInvalidArgument) - return errInvalidArgument - } - - if err = checkPathLength(filePath); err != nil { - logger.LogIf(ctx, err) - return err - } - - if err = os.Remove(filePath); err != nil { - if err = osErrToFileErr(err); err != errFileNotFound { - logger.LogIf(ctx, err) - } - } - - return err -} - -// Removes all files and folders at a given path, handles -// long paths for windows automatically. -func fsRemoveAll(ctx context.Context, dirPath string) (err error) { - if dirPath == "" { - logger.LogIf(ctx, errInvalidArgument) - return errInvalidArgument - } - - if err = checkPathLength(dirPath); err != nil { - logger.LogIf(ctx, err) - return err - } - - if err = removeAll(dirPath); err != nil { - if osIsPermission(err) { - logger.LogIf(ctx, errVolumeAccessDenied) - return errVolumeAccessDenied - } else if isSysErrNotEmpty(err) { - logger.LogIf(ctx, errVolumeNotEmpty) - return errVolumeNotEmpty - } - logger.LogIf(ctx, err) - return err - } - - return nil -} - -// Removes a directory only if its empty, handles long -// paths for windows automatically. -func fsRemoveDir(ctx context.Context, dirPath string) (err error) { - if dirPath == "" { - logger.LogIf(ctx, errInvalidArgument) - return errInvalidArgument - } - - if err = checkPathLength(dirPath); err != nil { - logger.LogIf(ctx, err) - return err - } - - if err = os.Remove((dirPath)); err != nil { - if osIsNotExist(err) { - return errVolumeNotFound - } else if isSysErrNotEmpty(err) { - return errVolumeNotEmpty - } - logger.LogIf(ctx, err) - return err - } - - return nil -} - -// Creates a new directory, parent dir should exist -// otherwise returns an error. If directory already -// exists returns an error. Windows long paths -// are handled automatically. -func fsMkdir(ctx context.Context, dirPath string) (err error) { - if dirPath == "" { - logger.LogIf(ctx, errInvalidArgument) - return errInvalidArgument - } - - if err = checkPathLength(dirPath); err != nil { - logger.LogIf(ctx, err) - return err - } - - if err = os.Mkdir((dirPath), 0o777); err != nil { - switch { - case osIsExist(err): - return errVolumeExists - case osIsPermission(err): - logger.LogIf(ctx, errDiskAccessDenied) - return errDiskAccessDenied - case isSysErrNotDir(err): - // File path cannot be verified since - // one of the parents is a file. - logger.LogIf(ctx, errDiskAccessDenied) - return errDiskAccessDenied - case isSysErrPathNotFound(err): - // Add specific case for windows. - logger.LogIf(ctx, errDiskAccessDenied) - return errDiskAccessDenied - default: - logger.LogIf(ctx, err) - return err - } - } - - return nil -} - -// fsStat is a low level call which validates input arguments -// and checks input length upto supported maximum. Does -// not perform any higher layer interpretation of files v/s -// directories. For higher level interpretation look at -// fsStatFileDir, fsStatFile, fsStatDir. -func fsStat(ctx context.Context, statLoc string) (os.FileInfo, error) { - if statLoc == "" { - logger.LogIf(ctx, errInvalidArgument) - return nil, errInvalidArgument - } - if err := checkPathLength(statLoc); err != nil { - logger.LogIf(ctx, err) - return nil, err - } - fi, err := os.Stat(statLoc) - if err != nil { - return nil, err - } - - return fi, nil -} - -// fsTouch updates a file access & modtime with current time -func fsTouch(ctx context.Context, statLoc string) error { - if statLoc == "" { - logger.LogIf(ctx, errInvalidArgument) - return errInvalidArgument - } - if err := checkPathLength(statLoc); err != nil { - logger.LogIf(ctx, err) - return err - } - now := time.Now() - if err := os.Chtimes(statLoc, now, now); err != nil { - logger.LogIf(ctx, err) - return err - } - - return nil -} - -// Lookup if volume exists, returns volume attributes upon success. -func fsStatVolume(ctx context.Context, volume string) (os.FileInfo, error) { - fi, err := fsStat(ctx, volume) - if err != nil { - if osIsNotExist(err) { - return nil, errVolumeNotFound - } else if osIsPermission(err) { - return nil, errVolumeAccessDenied - } - return nil, err - } - - if !fi.IsDir() { - return nil, errVolumeAccessDenied - } - - return fi, nil -} - -// Lookup if directory exists, returns directory attributes upon success. -func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) { - fi, err := fsStat(ctx, statDir) - if err != nil { - err = osErrToFileErr(err) - if err != errFileNotFound { - logger.LogIf(ctx, err) - } - return nil, err - } - if !fi.IsDir() { - return nil, errFileNotFound - } - return fi, nil -} - -// Lookup if file exists, returns file attributes upon success. -func fsStatFile(ctx context.Context, statFile string) (os.FileInfo, error) { - fi, err := fsStat(ctx, statFile) - if err != nil { - err = osErrToFileErr(err) - if err != errFileNotFound { - logger.LogIf(ctx, err) - } - return nil, err - } - if fi.IsDir() { - return nil, errFileNotFound - } - return fi, nil -} - -// Returns if the filePath is a regular file. -func fsIsFile(ctx context.Context, filePath string) bool { - fi, err := fsStat(ctx, filePath) - if err != nil { - return false - } - return fi.Mode().IsRegular() -} - -// Opens the file at given path, optionally from an offset. Upon success returns -// a readable stream and the size of the readable stream. -func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadCloser, int64, error) { - if readPath == "" || offset < 0 { - logger.LogIf(ctx, errInvalidArgument) - return nil, 0, errInvalidArgument - } - if err := checkPathLength(readPath); err != nil { - logger.LogIf(ctx, err) - return nil, 0, err - } - - fr, err := os.Open(readPath) - if err != nil { - return nil, 0, osErrToFileErr(err) - } - - // Stat to get the size of the file at path. - st, err := fr.Stat() - if err != nil { - fr.Close() - err = osErrToFileErr(err) - if err != errFileNotFound { - logger.LogIf(ctx, err) - } - return nil, 0, err - } - - // Verify if its not a regular file, since subsequent Seek is undefined. - if !st.Mode().IsRegular() { - fr.Close() - return nil, 0, errIsNotRegular - } - - // Seek to the requested offset. - if offset > 0 { - _, err = fr.Seek(offset, io.SeekStart) - if err != nil { - fr.Close() - logger.LogIf(ctx, err) - return nil, 0, err - } - } - - // Success. - return fr, st.Size(), nil -} - -// Creates a file and copies data from incoming reader. -func fsCreateFile(ctx context.Context, filePath string, reader io.Reader, fallocSize int64) (int64, error) { - if filePath == "" || reader == nil { - logger.LogIf(ctx, errInvalidArgument) - return 0, errInvalidArgument - } - - if err := checkPathLength(filePath); err != nil { - logger.LogIf(ctx, err) - return 0, err - } - - if err := mkdirAll(pathutil.Dir(filePath), 0o777); err != nil { - switch { - case osIsPermission(err): - return 0, errFileAccessDenied - case osIsExist(err): - return 0, errFileAccessDenied - case isSysErrIO(err): - return 0, errFaultyDisk - case isSysErrInvalidArg(err): - return 0, errUnsupportedDisk - case isSysErrNoSpace(err): - return 0, errDiskFull - } - return 0, err - } - - flags := os.O_CREATE | os.O_WRONLY - if globalFSOSync { - flags |= os.O_SYNC - } - writer, err := lock.Open(filePath, flags, 0o666) - if err != nil { - return 0, osErrToFileErr(err) - } - defer writer.Close() - - bytesWritten, err := xioutil.Copy(writer, reader) - if err != nil { - logger.LogIf(ctx, err) - return 0, err - } - - return bytesWritten, nil -} - -// Renames source path to destination path, creates all the -// missing parents if they don't exist. -func fsRenameFile(ctx context.Context, sourcePath, destPath string) error { - if err := checkPathLength(sourcePath); err != nil { - logger.LogIf(ctx, err) - return err - } - if err := checkPathLength(destPath); err != nil { - logger.LogIf(ctx, err) - return err - } - - if err := renameAll(sourcePath, destPath); err != nil { - logger.LogIf(ctx, err) - return err - } - - return nil -} - -func deleteFile(basePath, deletePath string, recursive bool) error { - if basePath == "" || deletePath == "" { - return nil - } - isObjectDir := HasSuffix(deletePath, SlashSeparator) - basePath = pathutil.Clean(basePath) - deletePath = pathutil.Clean(deletePath) - if !strings.HasPrefix(deletePath, basePath) || deletePath == basePath { - return nil - } - - var err error - if recursive { - os.RemoveAll(deletePath) - } else { - err = os.Remove(deletePath) - } - if err != nil { - switch { - case isSysErrNotEmpty(err): - // if object is a directory, but if its not empty - // return FileNotFound to indicate its an empty prefix. - if isObjectDir { - return errFileNotFound - } - // Ignore errors if the directory is not empty. The server relies on - // this functionality, and sometimes uses recursion that should not - // error on parent directories. - return nil - case osIsNotExist(err): - return errFileNotFound - case osIsPermission(err): - return errFileAccessDenied - case isSysErrIO(err): - return errFaultyDisk - default: - return err - } - } - - deletePath = pathutil.Dir(deletePath) - - // Delete parent directory obviously not recursively. Errors for - // parent directories shouldn't trickle down. - deleteFile(basePath, deletePath, false) - - return nil -} - -// fsDeleteFile is a wrapper for deleteFile(), after checking the path length. -func fsDeleteFile(ctx context.Context, basePath, deletePath string) error { - if err := checkPathLength(basePath); err != nil { - logger.LogIf(ctx, err) - return err - } - - if err := checkPathLength(deletePath); err != nil { - logger.LogIf(ctx, err) - return err - } - - if err := deleteFile(basePath, deletePath, false); err != nil { - if err != errFileNotFound { - logger.LogIf(ctx, err) - } - return err - } - return nil -} - -// fsRemoveMeta safely removes a locked file and takes care of Windows special case -func fsRemoveMeta(ctx context.Context, basePath, deletePath, tmpDir string) error { - // Special case for windows please read through. - if runtime.GOOS == globalWindowsOSName { - // Ordinarily windows does not permit deletion or renaming of files still - // in use, but if all open handles to that file were opened with FILE_SHARE_DELETE - // then it can permit renames and deletions of open files. - // - // There are however some gotchas with this, and it is worth listing them here. - // Firstly, Windows never allows you to really delete an open file, rather it is - // flagged as delete pending and its entry in its directory remains visible - // (though no new file handles may be opened to it) and when the very last - // open handle to the file in the system is closed, only then is it truly - // deleted. Well, actually only sort of truly deleted, because Windows only - // appears to remove the file entry from the directory, but in fact that - // entry is merely hidden and actually still exists and attempting to create - // a file with the same name will return an access denied error. How long it - // silently exists for depends on a range of factors, but put it this way: - // if your code loops creating and deleting the same file name as you might - // when operating a lock file, you're going to see lots of random spurious - // access denied errors and truly dismal lock file performance compared to POSIX. - // - // We work-around these un-POSIX file semantics by taking a dual step to - // deleting files. Firstly, it renames the file to tmp location into multipartTmpBucket - // We always open files with FILE_SHARE_DELETE permission enabled, with that - // flag Windows permits renaming and deletion, and because the name was changed - // to a very random name somewhere not in its origin directory before deletion, - // you don't see those unexpected random errors when creating files with the - // same name as a recently deleted file as you do anywhere else on Windows. - // Because the file is probably not in its original containing directory any more, - // deletions of that directory will not fail with "directory not empty" as they - // otherwise normally would either. - - tmpPath := pathJoin(tmpDir, mustGetUUID()) - - fsRenameFile(ctx, deletePath, tmpPath) - - // Proceed to deleting the directory if empty - fsDeleteFile(ctx, basePath, pathutil.Dir(deletePath)) - - // Finally delete the renamed file. - return fsDeleteFile(ctx, tmpDir, tmpPath) - } - return fsDeleteFile(ctx, basePath, deletePath) -} diff --git a/cmd/fs-v1-helpers_test.go b/cmd/fs-v1-helpers_test.go deleted file mode 100644 index 26ba8456e..000000000 --- a/cmd/fs-v1-helpers_test.go +++ /dev/null @@ -1,549 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "bytes" - "io" - "os" - "path" - "testing" - - "github.com/minio/minio/internal/lock" -) - -func TestFSRenameFile(t *testing.T) { - // create xlStorage test setup - _, path, err := newXLStorageTestSetup(t) - if err != nil { - t.Fatalf("Unable to create xlStorage test setup, %s", err) - } - - if err = fsMkdir(GlobalContext, pathJoin(path, "testvolume1")); err != nil { - t.Fatal(err) - } - if err = fsRenameFile(GlobalContext, pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != nil { - t.Fatal(err) - } - if err = fsRenameFile(GlobalContext, pathJoin(path, "testvolume1"), pathJoin(path, "testvolume2")); err != errFileNotFound { - t.Fatal(err) - } - if err = fsRenameFile(GlobalContext, pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"), pathJoin(path, "testvolume2")); err != errFileNameTooLong { - t.Fatal("Unexpected error", err) - } - if err = fsRenameFile(GlobalContext, pathJoin(path, "testvolume1"), pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); err != errFileNameTooLong { - t.Fatal("Unexpected error", err) - } -} - -func TestFSStats(t *testing.T) { - // create xlStorage test setup - _, path, err := newXLStorageTestSetup(t) - if err != nil { - t.Fatalf("Unable to create xlStorage test setup, %s", err) - } - - // Setup test environment. - - if err = fsMkdir(GlobalContext, ""); err != errInvalidArgument { - t.Fatal("Unexpected error", err) - } - - if err = fsMkdir(GlobalContext, pathJoin(path, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001")); err != errFileNameTooLong { - t.Fatal("Unexpected error", err) - } - - if err = fsMkdir(GlobalContext, pathJoin(path, "success-vol")); err != nil { - t.Fatalf("Unable to create volume, %s", err) - } - - reader := bytes.NewReader([]byte("Hello, world")) - if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil { - t.Fatalf("Unable to create file, %s", err) - } - // Seek back. - reader.Seek(0, 0) - - if err = fsMkdir(GlobalContext, pathJoin(path, "success-vol", "success-file")); err != errVolumeExists { - t.Fatal("Unexpected error", err) - } - - if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "path/to/success-file"), reader, 0); err != nil { - t.Fatalf("Unable to create file, %s", err) - } - // Seek back. - reader.Seek(0, 0) - - testCases := []struct { - srcFSPath string - srcVol string - srcPath string - expectedErr error - }{ - // Test case - 1. - // Test case with valid inputs, expected to pass. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "success-file", - expectedErr: nil, - }, - // Test case - 2. - // Test case with valid inputs, expected to pass. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "path/to/success-file", - expectedErr: nil, - }, - // Test case - 3. - // Test case with non-existent file. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "nonexistent-file", - expectedErr: errFileNotFound, - }, - // Test case - 4. - // Test case with non-existent file path. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "path/2/success-file", - expectedErr: errFileNotFound, - }, - // Test case - 5. - // Test case with path being a directory. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "path", - expectedErr: errFileNotFound, - }, - // Test case - 6. - // Test case with src path segment > 255. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - expectedErr: errFileNameTooLong, - }, - // Test case - 7. - // Test case validate only srcVol exists. - { - srcFSPath: path, - srcVol: "success-vol", - expectedErr: nil, - }, - // Test case - 8. - // Test case validate only srcVol doesn't exist. - { - srcFSPath: path, - srcVol: "success-vol-non-existent", - expectedErr: errVolumeNotFound, - }, - // Test case - 9. - // Test case validate invalid argument. - { - expectedErr: errInvalidArgument, - }, - } - - for i, testCase := range testCases { - if testCase.srcPath != "" { - if _, err := fsStatFile(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol, - testCase.srcPath)); err != testCase.expectedErr { - t.Fatalf("TestErasureStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) - } - } else { - if _, err := fsStatVolume(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol)); err != testCase.expectedErr { - t.Fatalf("TestFS case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) - } - } - } -} - -func TestFSCreateAndOpen(t *testing.T) { - // Setup test environment. - _, path, err := newXLStorageTestSetup(t) - if err != nil { - t.Fatalf("Unable to create xlStorage test setup, %s", err) - } - - if err = fsMkdir(GlobalContext, pathJoin(path, "success-vol")); err != nil { - t.Fatalf("Unable to create directory, %s", err) - } - - if _, err = fsCreateFile(GlobalContext, "", nil, 0); err != errInvalidArgument { - t.Fatal("Unexpected error", err) - } - - if _, _, err = fsOpenFile(GlobalContext, "", -1); err != errInvalidArgument { - t.Fatal("Unexpected error", err) - } - - reader := bytes.NewReader([]byte("Hello, world")) - if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil { - t.Fatalf("Unable to create file, %s", err) - } - // Seek back. - reader.Seek(0, 0) - - testCases := []struct { - srcVol string - srcPath string - expectedErr error - }{ - // Test case - 1. - // Test case with segment of the volume name > 255. - { - srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - srcPath: "success-file", - expectedErr: errFileNameTooLong, - }, - // Test case - 2. - // Test case with src path segment > 255. - { - srcVol: "success-vol", - srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - expectedErr: errFileNameTooLong, - }, - } - - for i, testCase := range testCases { - _, err = fsCreateFile(GlobalContext, pathJoin(path, testCase.srcVol, testCase.srcPath), reader, 0) - if err != testCase.expectedErr { - t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) - } - _, _, err = fsOpenFile(GlobalContext, pathJoin(path, testCase.srcVol, testCase.srcPath), 0) - if err != testCase.expectedErr { - t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) - } - } - - // Attempt to open a directory. - if _, _, err = fsOpenFile(GlobalContext, pathJoin(path), 0); err != errIsNotRegular { - t.Fatal("Unexpected error", err) - } -} - -func TestFSDeletes(t *testing.T) { - // create xlStorage test setup - _, path, err := newXLStorageTestSetup(t) - if err != nil { - t.Fatalf("Unable to create xlStorage test setup, %s", err) - } - - // Setup test environment. - if err = fsMkdir(GlobalContext, pathJoin(path, "success-vol")); err != nil { - t.Fatalf("Unable to create directory, %s", err) - } - - reader := bytes.NewReader([]byte("Hello, world")) - if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, reader.Size()); err != nil { - t.Fatalf("Unable to create file, %s", err) - } - // Seek back. - reader.Seek(0, io.SeekStart) - - // folder is not empty - err = fsMkdir(GlobalContext, pathJoin(path, "success-vol", "not-empty")) - if err != nil { - t.Fatal(err) - } - err = os.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0o777) - if err != nil { - t.Fatal(err) - } - - // recursive - if err = fsMkdir(GlobalContext, pathJoin(path, "success-vol", "parent")); err != nil { - t.Fatal(err) - } - if err = fsMkdir(GlobalContext, pathJoin(path, "success-vol", "parent", "dir")); err != nil { - t.Fatal(err) - } - - testCases := []struct { - basePath string - srcVol string - srcPath string - expectedErr error - }{ - // valid case with existing volume and file to delete. - { - basePath: path, - srcVol: "success-vol", - srcPath: "success-file", - expectedErr: nil, - }, - // The file was deleted in the last case, so Delete should fail. - { - basePath: path, - srcVol: "success-vol", - srcPath: "success-file", - expectedErr: errFileNotFound, - }, - // Test case with segment of the volume name > 255. - { - basePath: path, - srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - srcPath: "success-file", - expectedErr: errFileNameTooLong, - }, - // Test case with src path segment > 255. - { - basePath: path, - srcVol: "success-vol", - srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - expectedErr: errFileNameTooLong, - }, - // Base path is way too long. - { - basePath: "path03333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333", - srcVol: "success-vol", - srcPath: "object", - expectedErr: errFileNameTooLong, - }, - // Directory is not empty. Should give nil, but won't delete. - { - basePath: path, - srcVol: "success-vol", - srcPath: "not-empty", - expectedErr: nil, - }, - // Should delete recursively. - { - basePath: path, - srcVol: "success-vol", - srcPath: pathJoin("parent", "dir"), - expectedErr: nil, - }, - } - - for i, testCase := range testCases { - if err = fsDeleteFile(GlobalContext, testCase.basePath, pathJoin(testCase.basePath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { - t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) - } - } -} - -func BenchmarkFSDeleteFile(b *testing.B) { - // create xlStorage test setup - _, path, err := newXLStorageTestSetup(b) - if err != nil { - b.Fatalf("Unable to create xlStorage test setup, %s", err) - } - - // Setup test environment. - if err = fsMkdir(GlobalContext, pathJoin(path, "benchmark")); err != nil { - b.Fatalf("Unable to create directory, %s", err) - } - - benchDir := pathJoin(path, "benchmark") - filename := pathJoin(benchDir, "file.txt") - - b.ResetTimer() - // We need to create and delete the file sequentially inside the benchmark. - for i := 0; i < b.N; i++ { - b.StopTimer() - err = os.WriteFile(filename, []byte("data"), 0o777) - if err != nil { - b.Fatal(err) - } - b.StartTimer() - - err = fsDeleteFile(GlobalContext, benchDir, filename) - if err != nil { - b.Fatal(err) - } - } -} - -// Tests fs removes. -func TestFSRemoves(t *testing.T) { - // create xlStorage test setup - _, path, err := newXLStorageTestSetup(t) - if err != nil { - t.Fatalf("Unable to create xlStorage test setup, %s", err) - } - - // Setup test environment. - if err = fsMkdir(GlobalContext, pathJoin(path, "success-vol")); err != nil { - t.Fatalf("Unable to create directory, %s", err) - } - - reader := bytes.NewReader([]byte("Hello, world")) - if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file"), reader, 0); err != nil { - t.Fatalf("Unable to create file, %s", err) - } - // Seek back. - reader.Seek(0, 0) - - if _, err = fsCreateFile(GlobalContext, pathJoin(path, "success-vol", "success-file-new"), reader, 0); err != nil { - t.Fatalf("Unable to create file, %s", err) - } - // Seek back. - reader.Seek(0, 0) - - testCases := []struct { - srcFSPath string - srcVol string - srcPath string - expectedErr error - }{ - // Test case - 1. - // valid case with existing volume and file to delete. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "success-file", - expectedErr: nil, - }, - // Test case - 2. - // The file was deleted in the last case, so Delete should fail. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "success-file", - expectedErr: errFileNotFound, - }, - // Test case - 3. - // Test case with segment of the volume name > 255. - { - srcFSPath: path, - srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - srcPath: "success-file", - expectedErr: errFileNameTooLong, - }, - // Test case - 4. - // Test case with src path segment > 255. - { - srcFSPath: path, - srcVol: "success-vol", - srcPath: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - expectedErr: errFileNameTooLong, - }, - // Test case - 5. - // Test case with src path empty. - { - srcFSPath: path, - srcVol: "success-vol", - expectedErr: errVolumeNotEmpty, - }, - // Test case - 6. - // Test case with src path empty. - { - srcFSPath: path, - srcVol: "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001", - expectedErr: errFileNameTooLong, - }, - // Test case - 7. - // Test case with src path empty. - { - srcFSPath: path, - srcVol: "non-existent", - expectedErr: errVolumeNotFound, - }, - // Test case - 8. - // Test case with src and volume path empty. - { - expectedErr: errInvalidArgument, - }, - } - - for i, testCase := range testCases { - if testCase.srcPath != "" { - if err = fsRemoveFile(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { - t.Errorf("Test case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) - } - } else { - if err = fsRemoveDir(GlobalContext, pathJoin(testCase.srcFSPath, testCase.srcVol, testCase.srcPath)); err != testCase.expectedErr { - t.Error(err) - } - } - } - - if err = fsRemoveAll(GlobalContext, pathJoin(path, "success-vol")); err != nil { - t.Fatal(err) - } - - if err = fsRemoveAll(GlobalContext, ""); err != errInvalidArgument { - t.Fatal(err) - } - - if err = fsRemoveAll(GlobalContext, "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001"); err != errFileNameTooLong { - t.Fatal(err) - } -} - -func TestFSRemoveMeta(t *testing.T) { - // create xlStorage test setup - _, fsPath, err := newXLStorageTestSetup(t) - if err != nil { - t.Fatalf("Unable to create xlStorage test setup, %s", err) - } - - // Setup test environment. - if err = fsMkdir(GlobalContext, pathJoin(fsPath, "success-vol")); err != nil { - t.Fatalf("Unable to create directory, %s", err) - } - - filePath := pathJoin(fsPath, "success-vol", "success-file") - - reader := bytes.NewReader([]byte("Hello, world")) - if _, err = fsCreateFile(GlobalContext, filePath, reader, 0); err != nil { - t.Fatalf("Unable to create file, %s", err) - } - - rwPool := &fsIOPool{ - readersMap: make(map[string]*lock.RLockedFile), - } - - if _, err := rwPool.Open(filePath); err != nil { - t.Fatalf("Unable to lock file %s", filePath) - } - - defer rwPool.Close(filePath) - - tmpDir := t.TempDir() - - if err := fsRemoveMeta(GlobalContext, fsPath, filePath, tmpDir); err != nil { - t.Fatalf("Unable to remove file, %s", err) - } - - if _, err := os.Stat((filePath)); !osIsNotExist(err) { - t.Fatalf("`%s` file found though it should have been deleted.", filePath) - } - - if _, err := os.Stat((path.Dir(filePath))); !osIsNotExist(err) { - t.Fatalf("`%s` parent directory found though it should have been deleted.", filePath) - } -} - -func TestFSIsFile(t *testing.T) { - filePath := pathJoin(t.TempDir(), "tmpfile") - - if err := os.WriteFile(filePath, nil, 0o777); err != nil { - t.Fatalf("Unable to create file %s", filePath) - } - - if !fsIsFile(GlobalContext, filePath) { - t.Fatalf("Expected %s to be a file", filePath) - } -} diff --git a/cmd/fs-v1-metadata.go b/cmd/fs-v1-metadata.go deleted file mode 100644 index dee0a3353..000000000 --- a/cmd/fs-v1-metadata.go +++ /dev/null @@ -1,250 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "encoding/hex" - "encoding/json" - "io" - "os" - pathutil "path" - - jsoniter "github.com/json-iterator/go" - "github.com/minio/minio/internal/amztime" - xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/lock" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/mimedb" -) - -// FS format, and object metadata. -const ( - // fs.json object metadata. - fsMetaJSONFile = "fs.json" -) - -// FS metadata constants. -const ( - // FS backend meta 1.0.0 version. - fsMetaVersion100 = "1.0.0" - - // FS backend meta 1.0.1 version. - fsMetaVersion101 = "1.0.1" - - // FS backend meta 1.0.2 - // Removed the fields "Format" and "MinIO" from fsMetaV1 as they were unused. Added "Checksum" field - to be used in future for bit-rot protection. - fsMetaVersion = "1.0.2" - - // Add more constants here. -) - -// FSChecksumInfoV1 - carries checksums of individual blocks on disk. -type FSChecksumInfoV1 struct { - Algorithm string - Blocksize int64 - Hashes [][]byte -} - -// MarshalJSON marshals the FSChecksumInfoV1 struct -func (c FSChecksumInfoV1) MarshalJSON() ([]byte, error) { - type checksuminfo struct { - Algorithm string `json:"algorithm"` - Blocksize int64 `json:"blocksize"` - Hashes []string `json:"hashes"` - } - var hashes []string - for _, h := range c.Hashes { - hashes = append(hashes, hex.EncodeToString(h)) - } - info := checksuminfo{ - Algorithm: c.Algorithm, - Hashes: hashes, - Blocksize: c.Blocksize, - } - return json.Marshal(info) -} - -// UnmarshalJSON unmarshals the given data into the FSChecksumInfoV1 struct -func (c *FSChecksumInfoV1) UnmarshalJSON(data []byte) error { - type checksuminfo struct { - Algorithm string `json:"algorithm"` - Blocksize int64 `json:"blocksize"` - Hashes []string `json:"hashes"` - } - - var info checksuminfo - json := jsoniter.ConfigCompatibleWithStandardLibrary - err := json.Unmarshal(data, &info) - if err != nil { - return err - } - c.Algorithm = info.Algorithm - c.Blocksize = info.Blocksize - var hashes [][]byte - for _, hashStr := range info.Hashes { - h, err := hex.DecodeString(hashStr) - if err != nil { - return err - } - hashes = append(hashes, h) - } - c.Hashes = hashes - return nil -} - -// A fsMetaV1 represents a metadata header mapping keys to sets of values. -type fsMetaV1 struct { - Version string `json:"version"` - // checksums of blocks on disk. - Checksum FSChecksumInfoV1 `json:"checksum,omitempty"` - // Metadata map for current object. - Meta map[string]string `json:"meta,omitempty"` - // parts info for current object - used in encryption. - Parts []ObjectPartInfo `json:"parts,omitempty"` -} - -// IsValid - tells if the format is sane by validating the version -// string and format style. -func (m fsMetaV1) IsValid() bool { - return isFSMetaValid(m.Version) -} - -// Verifies if the backend format metadata is same by validating -// the version string. -func isFSMetaValid(version string) bool { - return (version == fsMetaVersion || version == fsMetaVersion100 || version == fsMetaVersion101) -} - -// Converts metadata to object info. -func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo { - if len(m.Meta) == 0 { - m.Meta = make(map[string]string) - } - - // Guess content-type from the extension if possible. - if m.Meta["content-type"] == "" { - m.Meta["content-type"] = mimedb.TypeByExtension(pathutil.Ext(object)) - } - - if HasSuffix(object, SlashSeparator) { - m.Meta["etag"] = emptyETag // For directories etag is d41d8cd98f00b204e9800998ecf8427e - m.Meta["content-type"] = "application/octet-stream" - } - - objInfo := ObjectInfo{ - Bucket: bucket, - Name: object, - } - - // We set file info only if its valid. - objInfo.ModTime = timeSentinel - if fi != nil { - objInfo.ModTime = fi.ModTime() - objInfo.Size = fi.Size() - if fi.IsDir() { - // Directory is always 0 bytes in S3 API, treat it as such. - objInfo.Size = 0 - objInfo.IsDir = fi.IsDir() - } - } - - objInfo.ETag = extractETag(m.Meta) - - objInfo.ContentType = m.Meta["content-type"] - objInfo.ContentEncoding = m.Meta["content-encoding"] - if storageClass, ok := m.Meta[xhttp.AmzStorageClass]; ok { - objInfo.StorageClass = storageClass - } else { - objInfo.StorageClass = globalMinioDefaultStorageClass - } - - if exp, ok := m.Meta["expires"]; ok { - if t, e := amztime.ParseHeader(exp); e == nil { - objInfo.Expires = t.UTC() - } - } - - // Add user tags to the object info - objInfo.UserTags = m.Meta[xhttp.AmzObjectTagging] - - // etag/md5Sum has already been extracted. We need to - // remove to avoid it from appearing as part of - // response headers. e.g, X-Minio-* or X-Amz-*. - // Tags have also been extracted, we remove that as well. - objInfo.UserDefined = cleanMetadata(m.Meta) - - // All the parts per object. - objInfo.Parts = m.Parts - - // Success.. - return objInfo -} - -func (m *fsMetaV1) WriteTo(lk *lock.LockedFile) (n int64, err error) { - if err = jsonSave(lk, m); err != nil { - return 0, err - } - fi, err := lk.Stat() - if err != nil { - return 0, err - } - return fi.Size(), nil -} - -func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64, err error) { - var fsMetaBuf []byte - fi, err := lk.Stat() - if err != nil { - logger.LogIf(ctx, err) - return 0, err - } - - fsMetaBuf, err = io.ReadAll(io.NewSectionReader(lk, 0, fi.Size())) - if err != nil { - logger.LogIf(ctx, err) - return 0, err - } - - if len(fsMetaBuf) == 0 { - return 0, io.EOF - } - - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err = json.Unmarshal(fsMetaBuf, m); err != nil { - return 0, err - } - - // Verify if the format is valid, return corrupted format - // for unrecognized formats. - if !isFSMetaValid(m.Version) { - logger.GetReqInfo(ctx).AppendTags("file", lk.Name()) - logger.LogIf(ctx, errCorruptedFormat) - return 0, errCorruptedFormat - } - - // Success. - return int64(len(fsMetaBuf)), nil -} - -// newFSMetaV1 - initializes new fsMetaV1. -func newFSMetaV1() (fsMeta fsMetaV1) { - fsMeta = fsMetaV1{} - fsMeta.Version = fsMetaVersion - return fsMeta -} diff --git a/cmd/fs-v1-metadata_test.go b/cmd/fs-v1-metadata_test.go deleted file mode 100644 index 6a7c4a42c..000000000 --- a/cmd/fs-v1-metadata_test.go +++ /dev/null @@ -1,164 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "bytes" - "os" - "path/filepath" - "reflect" - "testing" -) - -// Tests ToObjectInfo function. -func TestFSV1MetadataObjInfo(t *testing.T) { - fsMeta := newFSMetaV1() - objInfo := fsMeta.ToObjectInfo("testbucket", "testobject", nil) - if objInfo.Size != 0 { - t.Fatal("Unexpected object info value for Size", objInfo.Size) - } - if !objInfo.ModTime.Equal(timeSentinel) { - t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime) - } - if objInfo.IsDir { - t.Fatal("Unexpected object info value for IsDir", objInfo.IsDir) - } - if !objInfo.Expires.IsZero() { - t.Fatal("Unexpected object info value for Expires ", objInfo.Expires) - } -} - -// TestReadFSMetadata - readFSMetadata testing with a healthy and faulty disk -func TestReadFSMetadata(t *testing.T) { - t.Skip() - - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - fs := obj.(*FSObjects) - - bucketName := "bucket" - objectName := "object" - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Unexpected err: ", err) - } - if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { - t.Fatal("Unexpected err: ", err) - } - - // Construct the full path of fs.json - fsPath := pathJoin(bucketMetaPrefix, bucketName, objectName, "fs.json") - fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath) - - rlk, err := fs.rwPool.Open(fsPath) - if err != nil { - t.Fatal("Unexpected error ", err) - } - defer rlk.Close() - - // Regular fs metadata reading, no errors expected - fsMeta := fsMetaV1{} - if _, err = fsMeta.ReadFrom(GlobalContext, rlk.LockedFile); err != nil { - t.Fatal("Unexpected error ", err) - } -} - -// TestWriteFSMetadata - tests of writeFSMetadata with healthy disk. -func TestWriteFSMetadata(t *testing.T) { - t.Skip() - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - fs := obj.(*FSObjects) - - bucketName := "bucket" - objectName := "object" - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Unexpected err: ", err) - } - if _, err := obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}); err != nil { - t.Fatal("Unexpected err: ", err) - } - - // Construct the full path of fs.json - fsPath := pathJoin(bucketMetaPrefix, bucketName, objectName, "fs.json") - fsPath = pathJoin(fs.fsPath, minioMetaBucket, fsPath) - - rlk, err := fs.rwPool.Open(fsPath) - if err != nil { - t.Fatal("Unexpected error ", err) - } - defer rlk.Close() - - // FS metadata reading, no errors expected (healthy disk) - fsMeta := fsMetaV1{} - _, err = fsMeta.ReadFrom(GlobalContext, rlk.LockedFile) - if err != nil { - t.Fatal("Unexpected error ", err) - } - if fsMeta.Version != fsMetaVersion { - t.Fatalf("Unexpected version %s", fsMeta.Version) - } -} - -func TestFSChecksumV1MarshalJSON(t *testing.T) { - var cs FSChecksumInfoV1 - - testCases := []struct { - checksum FSChecksumInfoV1 - expectedResult string - }{ - {cs, `{"algorithm":"","blocksize":0,"hashes":null}`}, - {FSChecksumInfoV1{Algorithm: "highwayhash", Blocksize: 500}, `{"algorithm":"highwayhash","blocksize":500,"hashes":null}`}, - {FSChecksumInfoV1{Algorithm: "highwayhash", Blocksize: 10, Hashes: [][]byte{[]byte("hello")}}, `{"algorithm":"highwayhash","blocksize":10,"hashes":["68656c6c6f"]}`}, - } - - for _, testCase := range testCases { - data, _ := testCase.checksum.MarshalJSON() - if testCase.expectedResult != string(data) { - t.Fatalf("expected: %v, got: %v", testCase.expectedResult, string(data)) - } - } -} - -func TestFSChecksumV1UnMarshalJSON(t *testing.T) { - var cs FSChecksumInfoV1 - - testCases := []struct { - data []byte - expectedResult FSChecksumInfoV1 - }{ - {[]byte(`{"algorithm":"","blocksize":0,"hashes":null}`), cs}, - {[]byte(`{"algorithm":"highwayhash","blocksize":500,"hashes":null}`), FSChecksumInfoV1{Algorithm: "highwayhash", Blocksize: 500}}, - {[]byte(`{"algorithm":"highwayhash","blocksize":10,"hashes":["68656c6c6f"]}`), FSChecksumInfoV1{Algorithm: "highwayhash", Blocksize: 10, Hashes: [][]byte{[]byte("hello")}}}, - } - - for _, testCase := range testCases { - err := (&cs).UnmarshalJSON(testCase.data) - if err != nil { - t.Fatal("Unexpected error during checksum unmarshalling ", err) - } - if !reflect.DeepEqual(testCase.expectedResult, cs) { - t.Fatalf("expected: %v, got: %v", testCase.expectedResult, cs) - } - } -} diff --git a/cmd/fs-v1-multipart.go b/cmd/fs-v1-multipart.go deleted file mode 100644 index e7cc264eb..000000000 --- a/cmd/fs-v1-multipart.go +++ /dev/null @@ -1,947 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - jsoniter "github.com/json-iterator/go" - xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/trie" -) - -const ( - bgAppendsDirName = "bg-appends" - bgAppendsCleanupInterval = 10 * time.Minute -) - -// Returns EXPORT/.minio.sys/multipart/SHA256/UPLOADID -func (fs *FSObjects) getUploadIDDir(bucket, object, uploadID string) string { - return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object))), uploadID) -} - -// Returns EXPORT/.minio.sys/multipart/SHA256 -func (fs *FSObjects) getMultipartSHADir(bucket, object string) string { - return pathJoin(fs.fsPath, minioMetaMultipartBucket, getSHA256Hash([]byte(pathJoin(bucket, object)))) -} - -// Returns partNumber.etag -func (fs *FSObjects) encodePartFile(partNumber int, etag string, actualSize int64) string { - return fmt.Sprintf("%.5d.%s.%d", partNumber, etag, actualSize) -} - -// Returns partNumber and etag -func (fs *FSObjects) decodePartFile(name string) (partNumber int, etag string, actualSize int64, err error) { - result := strings.Split(name, ".") - if len(result) != 3 { - return 0, "", 0, errUnexpected - } - partNumber, err = strconv.Atoi(result[0]) - if err != nil { - return 0, "", 0, errUnexpected - } - actualSize, err = strconv.ParseInt(result[2], 10, 64) - if err != nil { - return 0, "", 0, errUnexpected - } - return partNumber, result[1], actualSize, nil -} - -// Appends parts to an appendFile sequentially. -func (fs *FSObjects) backgroundAppend(ctx context.Context, bucket, object, uploadID string) { - fs.appendFileMapMu.Lock() - logger.GetReqInfo(ctx).AppendTags("uploadID", uploadID) - file := fs.appendFileMap[uploadID] - if file == nil { - file = &fsAppendFile{ - filePath: pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, bgAppendsDirName, fmt.Sprintf("%s.%s", uploadID, mustGetUUID())), - } - fs.appendFileMap[uploadID] = file - } - fs.appendFileMapMu.Unlock() - - file.Lock() - defer file.Unlock() - - // Since we append sequentially nextPartNumber will always be len(file.parts)+1 - nextPartNumber := len(file.parts) + 1 - uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) - - entries, err := readDir(uploadIDDir) - if err != nil { - logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir) - logger.LogIf(ctx, err) - return - } - sort.Strings(entries) - - for _, entry := range entries { - if entry == fs.metaJSONFile { - continue - } - partNumber, etag, actualSize, err := fs.decodePartFile(entry) - if err != nil { - // Skip part files whose name don't match expected format. These could be backend filesystem specific files. - continue - } - if partNumber < nextPartNumber { - // Part already appended. - continue - } - if partNumber > nextPartNumber { - // Required part number is not yet uploaded. - return - } - - partPath := pathJoin(uploadIDDir, entry) - err = xioutil.AppendFile(file.filePath, partPath, globalFSOSync) - if err != nil { - reqInfo := logger.GetReqInfo(ctx).AppendTags("partPath", partPath) - reqInfo.AppendTags("filepath", file.filePath) - logger.LogIf(ctx, err) - return - } - - file.parts = append(file.parts, PartInfo{PartNumber: partNumber, ETag: etag, ActualSize: actualSize}) - nextPartNumber++ - } -} - -// ListMultipartUploads - lists all the uploadIDs for the specified object. -// We do not support prefix based listing. -func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (result ListMultipartsInfo, e error) { - if err := checkListMultipartArgs(ctx, bucket, object, keyMarker, uploadIDMarker, delimiter, fs); err != nil { - return result, toObjectErr(err) - } - - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return result, toObjectErr(err, bucket) - } - - result.MaxUploads = maxUploads - result.KeyMarker = keyMarker - result.Prefix = object - result.Delimiter = delimiter - result.NextKeyMarker = object - result.UploadIDMarker = uploadIDMarker - - uploadIDs, err := readDir(fs.getMultipartSHADir(bucket, object)) - if err != nil { - if err == errFileNotFound { - result.IsTruncated = false - return result, nil - } - logger.LogIf(ctx, err) - return result, toObjectErr(err) - } - - // S3 spec says uploadIDs should be sorted based on initiated time. ModTime of fs.json - // is the creation time of the uploadID, hence we will use that. - var uploads []MultipartInfo - for _, uploadID := range uploadIDs { - metaFilePath := pathJoin(fs.getMultipartSHADir(bucket, object), uploadID, fs.metaJSONFile) - fi, err := fsStatFile(ctx, metaFilePath) - if err != nil { - return result, toObjectErr(err, bucket, object) - } - uploads = append(uploads, MultipartInfo{ - Object: object, - UploadID: strings.TrimSuffix(uploadID, SlashSeparator), - Initiated: fi.ModTime(), - }) - } - sort.Slice(uploads, func(i int, j int) bool { - return uploads[i].Initiated.Before(uploads[j].Initiated) - }) - - uploadIndex := 0 - if uploadIDMarker != "" { - for uploadIndex < len(uploads) { - if uploads[uploadIndex].UploadID != uploadIDMarker { - uploadIndex++ - continue - } - if uploads[uploadIndex].UploadID == uploadIDMarker { - uploadIndex++ - break - } - uploadIndex++ - } - } - for uploadIndex < len(uploads) { - result.Uploads = append(result.Uploads, uploads[uploadIndex]) - result.NextUploadIDMarker = uploads[uploadIndex].UploadID - uploadIndex++ - if len(result.Uploads) == maxUploads { - break - } - } - - result.IsTruncated = uploadIndex < len(uploads) - - if !result.IsTruncated { - result.NextKeyMarker = "" - result.NextUploadIDMarker = "" - } - - return result, nil -} - -// NewMultipartUpload - initialize a new multipart upload, returns a -// unique id. The unique id returned here is of UUID form, for each -// subsequent request each UUID is unique. -// -// Implements S3 compatible initiate multipart API. -func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object string, opts ObjectOptions) (*NewMultipartUploadResult, error) { - if err := checkNewMultipartArgs(ctx, bucket, object, fs); err != nil { - return nil, toObjectErr(err, bucket) - } - - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return nil, toObjectErr(err, bucket) - } - - uploadID := mustGetUUID() - uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) - - err := mkdirAll(uploadIDDir, 0o755) - if err != nil { - logger.LogIf(ctx, err) - return nil, err - } - - // Initialize fs.json values. - fsMeta := newFSMetaV1() - fsMeta.Meta = opts.UserDefined - - fsMetaBytes, err := json.Marshal(fsMeta) - if err != nil { - logger.LogIf(ctx, err) - return nil, err - } - - if err = os.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil { - logger.LogIf(ctx, err) - return nil, err - } - return &NewMultipartUploadResult{UploadID: uploadID}, nil -} - -// CopyObjectPart - similar to PutObjectPart but reads data from an existing -// object. Internally incoming data is written to '.minio.sys/tmp' location -// and safely renamed to '.minio.sys/multipart' for reach parts. -func (fs *FSObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, - startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error, -) { - if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { - return pi, VersionNotFound{ - Bucket: srcBucket, - Object: srcObject, - VersionID: srcOpts.VersionID, - } - } - - if err := checkNewMultipartArgs(ctx, srcBucket, srcObject, fs); err != nil { - return pi, toObjectErr(err) - } - - partInfo, err := fs.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) - if err != nil { - return pi, toObjectErr(err, dstBucket, dstObject) - } - - return partInfo, nil -} - -// PutObjectPart - reads incoming data until EOF for the part file on -// an ongoing multipart transaction. Internally incoming data is -// written to '.minio.sys/tmp' location and safely renamed to -// '.minio.sys/multipart' for reach parts. -func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, e error) { - if opts.VersionID != "" && opts.VersionID != nullVersionID { - return pi, VersionNotFound{ - Bucket: bucket, - Object: object, - VersionID: opts.VersionID, - } - } - - data := r.Reader - if err := checkPutObjectPartArgs(ctx, bucket, object, fs); err != nil { - return pi, toObjectErr(err, bucket) - } - - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return pi, toObjectErr(err, bucket) - } - - // Validate input data size and it can never be less than -1. - if data.Size() < -1 { - logger.LogIf(ctx, errInvalidArgument, logger.Application) - return pi, toObjectErr(errInvalidArgument) - } - - uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) - - // Just check if the uploadID exists to avoid copy if it doesn't. - _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) - if err != nil { - if err == errFileNotFound || err == errFileAccessDenied { - return pi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} - } - return pi, toObjectErr(err, bucket, object) - } - - tmpPartPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, uploadID+"."+mustGetUUID()+"."+strconv.Itoa(partID)) - bytesWritten, err := fsCreateFile(ctx, tmpPartPath, data, data.Size()) - - // Delete temporary part in case of failure. If - // PutObjectPart succeeds then there would be nothing to - // delete in which case we just ignore the error. - defer fsRemoveFile(ctx, tmpPartPath) - - if err != nil { - return pi, toObjectErr(err, minioMetaTmpBucket, tmpPartPath) - } - - // Should return IncompleteBody{} error when reader has fewer - // bytes than specified in request header. - if bytesWritten < data.Size() { - return pi, IncompleteBody{Bucket: bucket, Object: object} - } - - etag := r.MD5CurrentHexString() - - if etag == "" { - etag = GenETag() - } - - partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag, data.ActualSize())) - - // Make sure not to create parent directories if they don't exist - the upload might have been aborted. - if err = Rename(tmpPartPath, partPath); err != nil { - if err == errFileNotFound || err == errFileAccessDenied { - return pi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} - } - return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) - } - - go fs.backgroundAppend(ctx, bucket, object, uploadID) - - fi, err := fsStatFile(ctx, partPath) - if err != nil { - return pi, toObjectErr(err, minioMetaMultipartBucket, partPath) - } - return PartInfo{ - PartNumber: partID, - LastModified: fi.ModTime(), - ETag: etag, - Size: fi.Size(), - ActualSize: data.ActualSize(), - }, nil -} - -// GetMultipartInfo returns multipart metadata uploaded during newMultipartUpload, used -// by callers to verify object states -// - encrypted -// - compressed -func (fs *FSObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) (MultipartInfo, error) { - minfo := MultipartInfo{ - Bucket: bucket, - Object: object, - UploadID: uploadID, - } - - if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil { - return minfo, toObjectErr(err) - } - - // Check if bucket exists - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return minfo, toObjectErr(err, bucket) - } - - uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) - if _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)); err != nil { - if err == errFileNotFound || err == errFileAccessDenied { - return minfo, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} - } - return minfo, toObjectErr(err, bucket, object) - } - - fsMetaBytes, err := xioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) - if err != nil { - logger.LogIf(ctx, err) - return minfo, toObjectErr(err, bucket, object) - } - - var fsMeta fsMetaV1 - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil { - return minfo, toObjectErr(err, bucket, object) - } - - minfo.UserDefined = fsMeta.Meta - return minfo, nil -} - -// ListObjectParts - lists all previously uploaded parts for a given -// object and uploadID. Takes additional input of part-number-marker -// to indicate where the listing should begin from. -// -// Implements S3 compatible ListObjectParts API. The resulting -// ListPartsInfo structure is unmarshalled directly into XML and -// replied back to the client. -func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, uploadID string, partNumberMarker, maxParts int, opts ObjectOptions) (result ListPartsInfo, e error) { - if err := checkListPartsArgs(ctx, bucket, object, fs); err != nil { - return result, toObjectErr(err) - } - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - result.MaxParts = maxParts - result.PartNumberMarker = partNumberMarker - - // Check if bucket exists - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return result, toObjectErr(err, bucket) - } - - uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) - if _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)); err != nil { - if err == errFileNotFound || err == errFileAccessDenied { - return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} - } - return result, toObjectErr(err, bucket, object) - } - - entries, err := readDir(uploadIDDir) - if err != nil { - logger.LogIf(ctx, err) - return result, toObjectErr(err, bucket) - } - - partsMap := make(map[int]PartInfo) - for _, entry := range entries { - if entry == fs.metaJSONFile { - continue - } - - partNumber, currentEtag, actualSize, derr := fs.decodePartFile(entry) - if derr != nil { - // Skip part files whose name don't match expected format. These could be backend filesystem specific files. - continue - } - - entryStat, err := fsStatFile(ctx, pathJoin(uploadIDDir, entry)) - if err != nil { - continue - } - - currentMeta := PartInfo{ - PartNumber: partNumber, - ETag: currentEtag, - ActualSize: actualSize, - Size: entryStat.Size(), - LastModified: entryStat.ModTime(), - } - - cachedMeta, ok := partsMap[partNumber] - if !ok { - partsMap[partNumber] = currentMeta - continue - } - - if currentMeta.LastModified.After(cachedMeta.LastModified) { - partsMap[partNumber] = currentMeta - } - } - - var parts []PartInfo - for _, partInfo := range partsMap { - parts = append(parts, partInfo) - } - - sort.Slice(parts, func(i int, j int) bool { - return parts[i].PartNumber < parts[j].PartNumber - }) - - i := 0 - if partNumberMarker != 0 { - // If the marker was set, skip the entries till the marker. - for _, part := range parts { - i++ - if part.PartNumber == partNumberMarker { - break - } - } - } - - partsCount := 0 - for partsCount < maxParts && i < len(parts) { - result.Parts = append(result.Parts, parts[i]) - i++ - partsCount++ - } - if i < len(parts) { - result.IsTruncated = true - if partsCount != 0 { - result.NextPartNumberMarker = result.Parts[partsCount-1].PartNumber - } - } - - rc, _, err := fsOpenFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile), 0) - if err != nil { - if err == errFileNotFound || err == errFileAccessDenied { - return result, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} - } - return result, toObjectErr(err, bucket, object) - } - defer rc.Close() - - fsMetaBytes, err := io.ReadAll(rc) - if err != nil { - return result, toObjectErr(err, bucket, object) - } - - var fsMeta fsMetaV1 - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err = json.Unmarshal(fsMetaBytes, &fsMeta); err != nil { - return result, toObjectErr(fmt.Errorf("unable to parse %s: error %w", pathJoin(uploadIDDir, fs.metaJSONFile), err), bucket, object) - } - - result.UserDefined = fsMeta.Meta - return result, nil -} - -// CompleteMultipartUpload - completes an ongoing multipart -// transaction after receiving all the parts indicated by the client. -// Returns an md5sum calculated by concatenating all the individual -// md5sums of all the parts. -// -// Implements S3 compatible Complete multipart API. -func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, parts []CompletePart, opts ObjectOptions) (oi ObjectInfo, e error) { - var actualSize int64 - - if err := checkCompleteMultipartArgs(ctx, bucket, object, fs); err != nil { - return oi, toObjectErr(err) - } - - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return oi, toObjectErr(err, bucket) - } - defer NSUpdated(bucket, object) - - uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) - // Just check if the uploadID exists to avoid copy if it doesn't. - _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) - if err != nil { - if err == errFileNotFound || err == errFileAccessDenied { - return oi, InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} - } - return oi, toObjectErr(err, bucket, object) - } - - // ensure that part ETag is canonicalized to strip off extraneous quotes - for i := range parts { - parts[i].ETag = canonicalizeETag(parts[i].ETag) - } - - fsMeta := fsMetaV1{} - - // Allocate parts similar to incoming slice. - fsMeta.Parts = make([]ObjectPartInfo, len(parts)) - - entries, err := readDir(uploadIDDir) - if err != nil { - logger.GetReqInfo(ctx).AppendTags("uploadIDDir", uploadIDDir) - logger.LogIf(ctx, err) - return oi, err - } - - // Create entries trie structure for prefix match - entriesTrie := trie.NewTrie() - for _, entry := range entries { - entriesTrie.Insert(entry) - } - - // Save consolidated actual size. - var objectActualSize int64 - // Validate all parts and then commit to disk. - for i, part := range parts { - partFile := getPartFile(entriesTrie, part.PartNumber, part.ETag) - if partFile == "" { - return oi, InvalidPart{ - PartNumber: part.PartNumber, - GotETag: part.ETag, - } - } - - // Read the actualSize from the pathFileName. - subParts := strings.Split(partFile, ".") - actualSize, err = strconv.ParseInt(subParts[len(subParts)-1], 10, 64) - if err != nil { - return oi, InvalidPart{ - PartNumber: part.PartNumber, - GotETag: part.ETag, - } - } - - partPath := pathJoin(uploadIDDir, partFile) - - var fi os.FileInfo - fi, err = fsStatFile(ctx, partPath) - if err != nil { - if err == errFileNotFound || err == errFileAccessDenied { - return oi, InvalidPart{} - } - return oi, err - } - - fsMeta.Parts[i] = ObjectPartInfo{ - Number: part.PartNumber, - Size: fi.Size(), - ActualSize: actualSize, - } - - // Consolidate the actual size. - objectActualSize += actualSize - - if i == len(parts)-1 { - break - } - - // All parts except the last part has to be atleast 5MB. - if !isMinAllowedPartSize(actualSize) { - return oi, PartTooSmall{ - PartNumber: part.PartNumber, - PartSize: actualSize, - PartETag: part.ETag, - } - } - } - - appendFallback := true // In case background-append did not append the required parts. - appendFilePath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, "bg-appends", fmt.Sprintf("%s.%s", uploadID, mustGetUUID())) - - // Most of the times appendFile would already be fully appended by now. We call fs.backgroundAppend() - // to take care of the following corner case: - // 1. The last PutObjectPart triggers go-routine fs.backgroundAppend, this go-routine has not started yet. - // 2. Now CompleteMultipartUpload gets called which sees that lastPart is not appended and starts appending - // from the beginning - fs.backgroundAppend(ctx, bucket, object, uploadID) - - fs.appendFileMapMu.Lock() - file := fs.appendFileMap[uploadID] - delete(fs.appendFileMap, uploadID) - fs.appendFileMapMu.Unlock() - - if file != nil { - file.Lock() - defer file.Unlock() - // Verify that appendFile has all the parts. - if len(file.parts) == len(parts) { - for i := range parts { - if parts[i].ETag != file.parts[i].ETag { - break - } - if parts[i].PartNumber != file.parts[i].PartNumber { - break - } - if i == len(parts)-1 { - appendFilePath = file.filePath - appendFallback = false - } - } - } - } - - if appendFallback { - if file != nil { - fsRemoveFile(ctx, file.filePath) - } - for _, part := range parts { - partFile := getPartFile(entriesTrie, part.PartNumber, part.ETag) - if partFile == "" { - logger.LogIf(ctx, fmt.Errorf("%.5d.%s missing will not proceed", - part.PartNumber, part.ETag)) - return oi, InvalidPart{ - PartNumber: part.PartNumber, - GotETag: part.ETag, - } - } - if err = xioutil.AppendFile(appendFilePath, pathJoin(uploadIDDir, partFile), globalFSOSync); err != nil { - logger.LogIf(ctx, err) - return oi, toObjectErr(err) - } - } - } - - // Hold write lock on the object. - destLock := fs.NewNSLock(bucket, object) - lkctx, err := destLock.GetLock(ctx, globalOperationTimeout) - if err != nil { - return oi, err - } - ctx = lkctx.Context() - defer destLock.Unlock(lkctx.Cancel) - - bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix) - fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile) - metaFile, err := fs.rwPool.Write(fsMetaPath) - var freshFile bool - if err != nil { - if !errors.Is(err, errFileNotFound) { - logger.LogIf(ctx, err) - return oi, toObjectErr(err, bucket, object) - } - metaFile, err = fs.rwPool.Create(fsMetaPath) - if err != nil { - logger.LogIf(ctx, err) - return oi, toObjectErr(err, bucket, object) - } - freshFile = true - } - defer metaFile.Close() - defer func() { - // Remove meta file when CompleteMultipart encounters - // any error and it is a fresh file. - // - // We should preserve the `fs.json` of any - // existing object - if e != nil && freshFile { - tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID) - fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir) - } - }() - - // Read saved fs metadata for ongoing multipart. - fsMetaBuf, err := xioutil.ReadFile(pathJoin(uploadIDDir, fs.metaJSONFile)) - if err != nil { - logger.LogIf(ctx, err) - return oi, toObjectErr(err, bucket, object) - } - err = json.Unmarshal(fsMetaBuf, &fsMeta) - if err != nil { - logger.LogIf(ctx, err) - return oi, toObjectErr(err, bucket, object) - } - // Save additional metadata. - if fsMeta.Meta == nil { - fsMeta.Meta = make(map[string]string) - } - - fsMeta.Meta["etag"] = opts.UserDefined["etag"] - if fsMeta.Meta["etag"] == "" { - fsMeta.Meta["etag"] = getCompleteMultipartMD5(parts) - } - - // Save consolidated actual size. - fsMeta.Meta[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(objectActualSize, 10) - if _, err = fsMeta.WriteTo(metaFile); err != nil { - logger.LogIf(ctx, err) - return oi, toObjectErr(err, bucket, object) - } - - err = fsRenameFile(ctx, appendFilePath, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - logger.LogIf(ctx, err) - return oi, toObjectErr(err, bucket, object) - } - - // Purge multipart folders - { - fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, mustGetUUID()) - defer fsRemoveAll(ctx, fsTmpObjPath) // remove multipart temporary files in background. - - Rename(uploadIDDir, fsTmpObjPath) - - // It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object) - fsRemoveDir(ctx, fs.getMultipartSHADir(bucket, object)) - } - - fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - return oi, toObjectErr(err, bucket, object) - } - - return fsMeta.ToObjectInfo(bucket, object, fi), nil -} - -// AbortMultipartUpload - aborts an ongoing multipart operation -// signified by the input uploadID. This is an atomic operation -// doesn't require clients to initiate multiple such requests. -// -// All parts are purged from all disks and reference to the uploadID -// would be removed from the system, rollback is not possible on this -// operation. -// -// Implements S3 compatible Abort multipart API, slight difference is -// that this is an atomic idempotent operation. Subsequent calls have -// no affect and further requests to the same uploadID would not be -// honored. -func (fs *FSObjects) AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error { - if err := checkAbortMultipartArgs(ctx, bucket, object, fs); err != nil { - return err - } - - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return toObjectErr(err, bucket) - } - - fs.appendFileMapMu.Lock() - // Remove file in tmp folder - file := fs.appendFileMap[uploadID] - if file != nil { - fsRemoveFile(ctx, file.filePath) - } - delete(fs.appendFileMap, uploadID) - fs.appendFileMapMu.Unlock() - - uploadIDDir := fs.getUploadIDDir(bucket, object, uploadID) - // Just check if the uploadID exists to avoid copy if it doesn't. - _, err := fsStatFile(ctx, pathJoin(uploadIDDir, fs.metaJSONFile)) - if err != nil { - if err == errFileNotFound || err == errFileAccessDenied { - return InvalidUploadID{Bucket: bucket, Object: object, UploadID: uploadID} - } - return toObjectErr(err, bucket, object) - } - - // Purge multipart folders - { - fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, mustGetUUID()) - defer fsRemoveAll(ctx, fsTmpObjPath) // remove multipart temporary files in background. - - Rename(uploadIDDir, fsTmpObjPath) - - // It is safe to ignore any directory not empty error (in case there were multiple uploadIDs on the same object) - fsRemoveDir(ctx, fs.getMultipartSHADir(bucket, object)) - } - - return nil -} - -// Return all uploads IDs with full path of each upload-id directory. -// Do not return an error as this is a lazy operation -func (fs *FSObjects) getAllUploadIDs(ctx context.Context) (result map[string]string) { - result = make(map[string]string) - - entries, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket)) - if err != nil { - return - } - for _, entry := range entries { - uploadIDs, err := readDir(pathJoin(fs.fsPath, minioMetaMultipartBucket, entry)) - if err != nil { - continue - } - // Remove the trailing slash separator - for i := range uploadIDs { - uploadID := strings.TrimSuffix(uploadIDs[i], SlashSeparator) - result[uploadID] = pathJoin(fs.fsPath, minioMetaMultipartBucket, entry, uploadID) - } - } - return -} - -// Removes multipart uploads if any older than `expiry` duration -// on all buckets for every `cleanupInterval`, this function is -// blocking and should be run in a go-routine. -func (fs *FSObjects) cleanupStaleUploads(ctx context.Context) { - expiryUploadsTimer := time.NewTimer(globalAPIConfig.getStaleUploadsCleanupInterval()) - defer expiryUploadsTimer.Stop() - - bgAppendTmpCleaner := time.NewTimer(bgAppendsCleanupInterval) - defer bgAppendTmpCleaner.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-bgAppendTmpCleaner.C: - foundUploadIDs := fs.getAllUploadIDs(ctx) - - // Remove background append map from the memory - fs.appendFileMapMu.Lock() - for uploadID := range fs.appendFileMap { - _, ok := foundUploadIDs[uploadID] - if !ok { - delete(fs.appendFileMap, uploadID) - } - } - fs.appendFileMapMu.Unlock() - - // Remove background appends file from the disk - bgAppendsDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, bgAppendsDirName) - entries, err := readDir(bgAppendsDir) - if err != nil { - break - } - for _, entry := range entries { - uploadID := strings.Split(entry, ".")[0] - _, ok := foundUploadIDs[uploadID] - if !ok { - fsRemoveFile(ctx, pathJoin(bgAppendsDir, entry)) - } - } - - bgAppendTmpCleaner.Reset(bgAppendsCleanupInterval) - case <-expiryUploadsTimer.C: - expiry := globalAPIConfig.getStaleUploadsExpiry() - now := time.Now() - - uploadIDs := fs.getAllUploadIDs(ctx) - - for uploadID, path := range uploadIDs { - fi, err := fsStatDir(ctx, path) - if err != nil { - continue - } - if now.Sub(fi.ModTime()) > expiry { - fsRemoveAll(ctx, path) - // Remove upload ID parent directory if empty - fsRemoveDir(ctx, filepath.Base(path)) - - // Remove uploadID from the append file map and its corresponding temporary file - fs.appendFileMapMu.Lock() - bgAppend, ok := fs.appendFileMap[uploadID] - if ok { - _ = fsRemoveFile(ctx, bgAppend.filePath) - delete(fs.appendFileMap, uploadID) - } - fs.appendFileMapMu.Unlock() - } - } - - // Reset for the next interval - expiryUploadsTimer.Reset(globalAPIConfig.getStaleUploadsCleanupInterval()) - } - } -} diff --git a/cmd/fs-v1-multipart_test.go b/cmd/fs-v1-multipart_test.go deleted file mode 100644 index 22003f502..000000000 --- a/cmd/fs-v1-multipart_test.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "bytes" - "context" - "os" - "path/filepath" - "runtime" - "sync" - "testing" - "time" - - "github.com/minio/minio/internal/config/api" -) - -// Tests cleanup multipart uploads for filesystem backend. -func TestFSCleanupMultipartUploadsInRoutine(t *testing.T) { - t.Skip() - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - fs := obj.(*FSObjects) - - bucketName := "bucket" - objectName := "object" - - // Create a context we can cancel. - ctx, cancel := context.WithCancel(GlobalContext) - obj.MakeBucketWithLocation(ctx, bucketName, MakeBucketOptions{}) - - res, err := obj.NewMultipartUpload(ctx, bucketName, objectName, ObjectOptions{}) - if err != nil { - t.Fatal("Unexpected err: ", err) - } - - globalAPIConfig.init(api.Config{ - ListQuorum: "optimal", - StaleUploadsExpiry: time.Millisecond, - StaleUploadsCleanupInterval: time.Millisecond, - }, obj.SetDriveCounts()) - - defer func() { - globalAPIConfig.init(api.Config{ - ListQuorum: "optimal", - }, obj.SetDriveCounts()) - }() - - var cleanupWg sync.WaitGroup - cleanupWg.Add(1) - go func() { - defer cleanupWg.Done() - fs.cleanupStaleUploads(ctx) - }() - - // Wait for 100ms such that - we have given enough time for - // cleanup routine to kick in. Flaky on slow systems... - time.Sleep(100 * time.Millisecond) - cancel() - cleanupWg.Wait() - - // Check if upload id was already purged. - if err = obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, ObjectOptions{}); err != nil { - if _, ok := err.(InvalidUploadID); !ok { - t.Fatal("Unexpected err: ", err) - } - } else { - t.Error("Item was not cleaned up.") - } -} - -// TestNewMultipartUploadFaultyDisk - test NewMultipartUpload with faulty disks -func TestNewMultipartUploadFaultyDisk(t *testing.T) { - t.Skip() - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - obj := initFSObjects(disk, t) - - fs := obj.(*FSObjects) - bucketName := "bucket" - objectName := "object" - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Cannot create bucket, err: ", err) - } - - // Test with disk removed. - os.RemoveAll(disk) - if _, err := fs.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}); err != nil { - if !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error ", err) - } - } -} - -// TestPutObjectPartFaultyDisk - test PutObjectPart with faulty disks -func TestPutObjectPartFaultyDisk(t *testing.T) { - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - obj := initFSObjects(disk, t) - - bucketName := "bucket" - objectName := "object" - data := []byte("12345") - dataLen := int64(len(data)) - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Cannot create bucket, err: ", err) - } - - res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) - if err != nil { - t.Fatal("Unexpected error ", err) - } - - md5Hex := getMD5Hash(data) - sha256sum := "" - - newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(newDisk) - obj = initFSObjects(newDisk, t) - if _, err = obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), dataLen, md5Hex, sha256sum), ObjectOptions{}); err != nil { - if !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error ", err) - } - } -} - -// TestCompleteMultipartUploadFaultyDisk - test CompleteMultipartUpload with faulty disks -func TestCompleteMultipartUploadFaultyDisk(t *testing.T) { - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - obj := initFSObjects(disk, t) - - bucketName := "bucket" - objectName := "object" - data := []byte("12345") - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Cannot create bucket, err: ", err) - } - - res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) - if err != nil { - t.Fatal("Unexpected error ", err) - } - - md5Hex := getMD5Hash(data) - - parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}} - newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(newDisk) - obj = initFSObjects(newDisk, t) - if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, parts, ObjectOptions{}); err != nil { - if !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error ", err) - } - } -} - -// TestCompleteMultipartUpload - test CompleteMultipartUpload -func TestCompleteMultipartUpload(t *testing.T) { - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - obj := initFSObjects(disk, t) - - bucketName := "bucket" - objectName := "object" - data := []byte("12345") - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Cannot create bucket, err: ", err) - } - - res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) - if err != nil { - t.Fatal("Unexpected error ", err) - } - - md5Hex := getMD5Hash(data) - - if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), ObjectOptions{}); err != nil { - t.Fatal("Unexpected error ", err) - } - - parts := []CompletePart{{PartNumber: 1, ETag: md5Hex}} - if _, err := obj.CompleteMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, parts, ObjectOptions{}); err != nil { - t.Fatal("Unexpected error ", err) - } -} - -// TestCompleteMultipartUpload - test CompleteMultipartUpload -func TestAbortMultipartUpload(t *testing.T) { - if runtime.GOOS == globalWindowsOSName { - // Concurrent AbortMultipartUpload() fails on windows - t.Skip() - } - - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - obj := initFSObjects(disk, t) - - bucketName := "bucket" - objectName := "object" - data := []byte("12345") - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Cannot create bucket, err: ", err) - } - - res, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) - if err != nil { - t.Fatal("Unexpected error ", err) - } - - md5Hex := getMD5Hash(data) - - opts := ObjectOptions{} - if _, err := obj.PutObjectPart(GlobalContext, bucketName, objectName, res.UploadID, 1, mustGetPutObjReader(t, bytes.NewReader(data), 5, md5Hex, ""), opts); err != nil { - t.Fatal("Unexpected error ", err) - } - if err := obj.AbortMultipartUpload(GlobalContext, bucketName, objectName, res.UploadID, opts); err != nil { - t.Fatal("Unexpected error ", err) - } -} - -// TestListMultipartUploadsFaultyDisk - test ListMultipartUploads with faulty disks -func TestListMultipartUploadsFaultyDisk(t *testing.T) { - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - - bucketName := "bucket" - objectName := "object" - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Cannot create bucket, err: ", err) - } - - _, err := obj.NewMultipartUpload(GlobalContext, bucketName, objectName, ObjectOptions{UserDefined: map[string]string{"X-Amz-Meta-xid": "3f"}}) - if err != nil { - t.Fatal("Unexpected error ", err) - } - - newDisk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(newDisk) - obj = initFSObjects(newDisk, t) - if _, err := obj.ListMultipartUploads(GlobalContext, bucketName, objectName, "", "", "", 1000); err != nil { - if !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error ", err) - } - } -} diff --git a/cmd/fs-v1-rwpool.go b/cmd/fs-v1-rwpool.go deleted file mode 100644 index ae714f24a..000000000 --- a/cmd/fs-v1-rwpool.go +++ /dev/null @@ -1,231 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "os" - pathutil "path" - "sync" - - "github.com/minio/minio/internal/lock" - "github.com/minio/minio/internal/logger" -) - -// fsIOPool represents a protected list to keep track of all -// the concurrent readers at a given path. -type fsIOPool struct { - sync.Mutex - readersMap map[string]*lock.RLockedFile -} - -// lookupToRead - looks up an fd from readers map and -// returns read locked fd for caller to read from, if -// fd found increments the reference count. If the fd -// is found to be closed then purges it from the -// readersMap and returns nil instead. -// -// NOTE: this function is not protected and it is callers -// responsibility to lock this call to be thread safe. For -// implementation ideas look at the usage inside Open() call. -func (fsi *fsIOPool) lookupToRead(path string) (*lock.RLockedFile, bool) { - rlkFile, ok := fsi.readersMap[path] - // File reference exists on map, validate if its - // really closed and we are safe to purge it. - if ok && rlkFile != nil { - // If the file is closed and not removed from map is a bug. - if rlkFile.IsClosed() { - // Log this as an error. - reqInfo := (&logger.ReqInfo{}).AppendTags("path", path) - ctx := logger.SetReqInfo(GlobalContext, reqInfo) - logger.LogIf(ctx, errUnexpected) - - // Purge the cached lock path from map. - delete(fsi.readersMap, path) - - // Indicate that we can populate the new fd. - ok = false - } else { - // Increment the lock ref, since the file is not closed yet - // and caller requested to read the file again. - rlkFile.IncLockRef() - } - } - return rlkFile, ok -} - -// Open is a wrapper call to read locked file which -// returns a ReadAtCloser. -// -// ReaderAt is provided so that the fd is non seekable, since -// we are sharing fd's with concurrent threads, we don't want -// all readers to change offsets on each other during such -// concurrent operations. Using ReadAt allows us to read from -// any offsets. -// -// Closer is implemented to track total readers and to close -// only when there no more readers, the fd is purged if the lock -// count has reached zero. -func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) { - if err := checkPathLength(path); err != nil { - return nil, err - } - - fsi.Lock() - rlkFile, ok := fsi.lookupToRead(path) - fsi.Unlock() - // Locked path reference doesn't exist, acquire a read lock again on the file. - if !ok { - // Open file for reading with read lock. - newRlkFile, err := lock.RLockedOpenFile(path) - if err != nil { - switch { - case osIsNotExist(err): - return nil, errFileNotFound - case osIsPermission(err): - return nil, errFileAccessDenied - case isSysErrIsDir(err): - return nil, errIsNotRegular - case isSysErrNotDir(err): - return nil, errFileAccessDenied - case isSysErrPathNotFound(err): - return nil, errFileNotFound - default: - return nil, err - } - } - - // Save new reader on the map. - - // It is possible by this time due to concurrent - // i/o we might have another lock present. Lookup - // again to check for such a possibility. If no such - // file exists save the newly opened fd, if not - // reuse the existing fd and close the newly opened - // file - fsi.Lock() - rlkFile, ok = fsi.lookupToRead(path) - if ok { - // Close the new fd, since we already seem to have - // an active reference. - newRlkFile.Close() - } else { - // Save the new rlk file. - rlkFile = newRlkFile - } - - // Save the new fd on the map. - fsi.readersMap[path] = rlkFile - fsi.Unlock() - - } - - // Success. - return rlkFile, nil -} - -// Write - Attempt to lock the file if it exists, -// - if the file exists. Then we try to get a write lock this -// will block if we can't get a lock perhaps another write -// or read is in progress. Concurrent calls are protected -// by the global namspace lock within the same process. -func (fsi *fsIOPool) Write(path string) (wlk *lock.LockedFile, err error) { - if err = checkPathLength(path); err != nil { - return nil, err - } - - wlk, err = lock.LockedOpenFile(path, os.O_RDWR, 0o666) - if err != nil { - switch { - case osIsNotExist(err): - return nil, errFileNotFound - case osIsPermission(err): - return nil, errFileAccessDenied - case isSysErrIsDir(err): - return nil, errIsNotRegular - default: - if isSysErrPathNotFound(err) { - return nil, errFileNotFound - } - return nil, err - } - } - return wlk, nil -} - -// Create - creates a new write locked file instance. -// - if the file doesn't exist. We create the file and hold lock. -func (fsi *fsIOPool) Create(path string) (wlk *lock.LockedFile, err error) { - if err = checkPathLength(path); err != nil { - return nil, err - } - - // Creates parent if missing. - if err = mkdirAll(pathutil.Dir(path), 0o777); err != nil { - return nil, err - } - - // Attempt to create the file. - wlk, err = lock.LockedOpenFile(path, os.O_RDWR|os.O_CREATE, 0o666) - if err != nil { - switch { - case osIsPermission(err): - return nil, errFileAccessDenied - case isSysErrIsDir(err): - return nil, errIsNotRegular - case isSysErrNotDir(err): - return nil, errFileAccessDenied - case isSysErrPathNotFound(err): - return nil, errFileAccessDenied - default: - return nil, err - } - } - - // Success. - return wlk, nil -} - -// Close implements closing the path referenced by the reader in such -// a way that it makes sure to remove entry from the map immediately -// if no active readers are present. -func (fsi *fsIOPool) Close(path string) error { - fsi.Lock() - defer fsi.Unlock() - - if err := checkPathLength(path); err != nil { - return err - } - - // Pop readers from path. - rlkFile, ok := fsi.readersMap[path] - if !ok { - return nil - } - - // Close the reader. - rlkFile.Close() - - // If the file is closed, remove it from the reader pool map. - if rlkFile.IsClosed() { - // Purge the cached lock path from map. - delete(fsi.readersMap, path) - } - - // Success. - return nil -} diff --git a/cmd/fs-v1-rwpool_test.go b/cmd/fs-v1-rwpool_test.go deleted file mode 100644 index 81f638826..000000000 --- a/cmd/fs-v1-rwpool_test.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "runtime" - "testing" - - "github.com/minio/minio/internal/lock" -) - -// Tests long path calls. -func TestRWPoolLongPath(t *testing.T) { - rwPool := &fsIOPool{ - readersMap: make(map[string]*lock.RLockedFile), - } - - longPath := "my-obj-del-0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001" - if _, err := rwPool.Create(longPath); err != errFileNameTooLong { - t.Fatal(err) - } - - if _, err := rwPool.Write(longPath); err != errFileNameTooLong { - t.Fatal(err) - } - - if _, err := rwPool.Open(longPath); err != errFileNameTooLong { - t.Fatal(err) - } -} - -// Tests all RWPool methods. -func TestRWPool(t *testing.T) { - // create xlStorage test setup - _, path, err := newXLStorageTestSetup(t) - if err != nil { - t.Fatalf("Unable to create xlStorage test setup, %s", err) - } - - rwPool := &fsIOPool{ - readersMap: make(map[string]*lock.RLockedFile), - } - wlk, err := rwPool.Create(pathJoin(path, "success-vol", "file/path/1.txt")) - if err != nil { - t.Fatal(err) - } - wlk.Close() - - // Fails to create a parent directory if there is a file. - _, err = rwPool.Create(pathJoin(path, "success-vol", "file/path/1.txt/test")) - if err != errFileAccessDenied { - t.Fatal("Unexpected error", err) - } - - // Fails to create a file if there is a directory. - _, err = rwPool.Create(pathJoin(path, "success-vol", "file")) - if runtime.GOOS == globalWindowsOSName { - if err != errFileAccessDenied { - t.Fatal("Unexpected error", err) - } - } else { - if err != errIsNotRegular { - t.Fatal("Unexpected error", err) - } - } - - rlk, err := rwPool.Open(pathJoin(path, "success-vol", "file/path/1.txt")) - if err != nil { - t.Fatal("Unexpected error", err) - } - rlk.Close() - - // Fails to read a directory. - _, err = rwPool.Open(pathJoin(path, "success-vol", "file")) - if runtime.GOOS == globalWindowsOSName { - if err != errFileAccessDenied { - t.Fatal("Unexpected error", err) - } - } else { - if err != errIsNotRegular { - t.Fatal("Unexpected error", err) - } - } - - // Fails to open a file which has a parent as file. - _, err = rwPool.Open(pathJoin(path, "success-vol", "file/path/1.txt/test")) - if runtime.GOOS != globalWindowsOSName { - if err != errFileAccessDenied { - t.Fatal("Unexpected error", err) - } - } else { - if err != errFileNotFound { - t.Fatal("Unexpected error", err) - } - } -} diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go deleted file mode 100644 index 33bee6fbe..000000000 --- a/cmd/fs-v1.go +++ /dev/null @@ -1,1493 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "bytes" - "context" - "fmt" - "io" - "net/http" - "os" - "os/user" - "path" - "sort" - "strings" - "sync" - "time" - - jsoniter "github.com/json-iterator/go" - "github.com/minio/madmin-go" - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/tags" - "github.com/minio/minio/internal/color" - "github.com/minio/minio/internal/config" - xhttp "github.com/minio/minio/internal/http" - xioutil "github.com/minio/minio/internal/ioutil" - "github.com/minio/minio/internal/lock" - "github.com/minio/minio/internal/logger" - "github.com/minio/minio/internal/mountinfo" - "github.com/minio/pkg/bucket/policy" - "github.com/minio/pkg/mimedb" -) - -// Default etag is used for pre-existing objects. -var defaultEtag = "00000000000000000000000000000000-1" - -// FSObjects - Implements fs object layer. -type FSObjects struct { - GatewayUnsupported - - // Path to be exported over S3 API. - fsPath string - // meta json filename, varies by fs / cache backend. - metaJSONFile string - // Unique value to be used for all - // temporary transactions. - fsUUID string - - // This value shouldn't be touched, once initialized. - fsFormatRlk *lock.RLockedFile // Is a read lock on `format.json`. - - // FS rw pool. - rwPool *fsIOPool - - // ListObjects pool management. - listPool *TreeWalkPool - - diskMount bool - - appendFileMap map[string]*fsAppendFile - appendFileMapMu sync.Mutex - - // To manage the appendRoutine go-routines - nsMutex *nsLockMap -} - -// Represents the background append file. -type fsAppendFile struct { - sync.Mutex - parts []PartInfo // List of parts appended. - filePath string // Absolute path of the file in the temp location. -} - -// Initializes meta volume on all the fs path. -func initMetaVolumeFS(fsPath, fsUUID string) error { - // This happens for the first time, but keep this here since this - // is the only place where it can be made less expensive - // optimizing all other calls. Create minio meta volume, - // if it doesn't exist yet. - metaBucketPath := pathJoin(fsPath, minioMetaBucket) - - if err := os.MkdirAll(metaBucketPath, 0o777); err != nil { - return err - } - - metaTmpPath := pathJoin(fsPath, minioMetaTmpBucket, fsUUID) - if err := os.MkdirAll(metaTmpPath, 0o777); err != nil { - return err - } - - if err := os.MkdirAll(pathJoin(metaTmpPath, bgAppendsDirName), 0o777); err != nil { - return err - } - - if err := os.MkdirAll(pathJoin(fsPath, dataUsageBucket), 0o777); err != nil { - return err - } - - metaMultipartPath := pathJoin(fsPath, minioMetaMultipartBucket) - return os.MkdirAll(metaMultipartPath, 0o777) -} - -// NewFSObjectLayer - initialize new fs object layer. -func NewFSObjectLayer(ctx context.Context, fsPath string) (ObjectLayer, error) { - if fsPath == "" { - return nil, errInvalidArgument - } - - var err error - if fsPath, err = getValidPath(fsPath); err != nil { - if err == errMinDiskSize { - return nil, config.ErrUnableToWriteInBackend(err).Hint(err.Error()) - } - - // Show a descriptive error with a hint about how to fix it. - var username string - if u, err := user.Current(); err == nil { - username = u.Username - } else { - username = "" - } - hint := fmt.Sprintf("Use 'sudo chown -R %s %s && sudo chmod u+rxw %s' to provide sufficient permissions.", username, fsPath, fsPath) - return nil, config.ErrUnableToWriteInBackend(err).Hint(hint) - } - - fsFormatPath := pathJoin(fsPath, minioMetaBucket, formatConfigFile) - if _, err = fsStat(ctx, fsFormatPath); err != nil && os.IsNotExist(err) { - return nil, errFreshDisk - } - - // Assign a new UUID for FS minio mode. Each server instance - // gets its own UUID for temporary file transaction. - fsUUID := mustGetUUID() - - // Initialize meta volume, if volume already exists ignores it. - if err = initMetaVolumeFS(fsPath, fsUUID); err != nil { - return nil, err - } - - // Initialize `format.json`, this function also returns. - rlk, err := initFormatFS(ctx, fsPath) - if err != nil { - return nil, err - } - - // Initialize fs objects. - fs := &FSObjects{ - fsPath: fsPath, - metaJSONFile: fsMetaJSONFile, - fsUUID: fsUUID, - rwPool: &fsIOPool{ - readersMap: make(map[string]*lock.RLockedFile), - }, - nsMutex: newNSLock(false), - listPool: NewTreeWalkPool(globalLookupTimeout), - appendFileMap: make(map[string]*fsAppendFile), - diskMount: mountinfo.IsLikelyMountPoint(fsPath), - } - - // Once the filesystem has initialized hold the read lock for - // the life time of the server. This is done to ensure that under - // shared backend mode for FS, remote servers do not migrate - // or cause changes on backend format. - fs.fsFormatRlk = rlk - - go fs.cleanupStaleUploads(ctx) - go intDataUpdateTracker.start(ctx, fsPath) - - // Return successfully initialized object layer. - return fs, nil -} - -// NewNSLock - initialize a new namespace RWLocker instance. -func (fs *FSObjects) NewNSLock(bucket string, objects ...string) RWLocker { - // lockers are explicitly 'nil' for FS mode since there are only local lockers - return fs.nsMutex.NewNSLock(nil, bucket, objects...) -} - -// SetDriveCounts no-op -func (fs *FSObjects) SetDriveCounts() []int { - return nil -} - -// Shutdown - should be called when process shuts down. -func (fs *FSObjects) Shutdown(ctx context.Context) error { - fs.fsFormatRlk.Close() - - // Cleanup and delete tmp uuid. - return fsRemoveAll(ctx, pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID)) -} - -// BackendInfo - returns backend information -func (fs *FSObjects) BackendInfo() madmin.BackendInfo { - return madmin.BackendInfo{ - Type: madmin.FS, - StandardSCData: []int{1}, - StandardSCParity: 0, - RRSCData: []int{1}, - RRSCParity: 0, - } -} - -// LocalStorageInfo - returns underlying storage statistics. -func (fs *FSObjects) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) { - return fs.StorageInfo(ctx) -} - -// StorageInfo - returns underlying storage statistics. -func (fs *FSObjects) StorageInfo(ctx context.Context) (StorageInfo, []error) { - di, err := getDiskInfo(fs.fsPath) - if err != nil { - return StorageInfo{}, []error{err} - } - storageInfo := StorageInfo{ - Disks: []madmin.Disk{ - { - State: madmin.DriveStateOk, - TotalSpace: di.Total, - UsedSpace: di.Used, - AvailableSpace: di.Free, - DrivePath: fs.fsPath, - }, - }, - } - storageInfo.Backend = fs.BackendInfo() - return storageInfo, nil -} - -// NSScanner returns data usage stats of the current FS deployment -func (fs *FSObjects) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32, _ madmin.HealScanMode) error { - defer close(updates) - // Load bucket totals - var totalCache dataUsageCache - err := totalCache.load(ctx, fs, dataUsageCacheName) - if err != nil { - return err - } - totalCache.Info.Name = dataUsageRoot - buckets, err := fs.ListBuckets(ctx, BucketOptions{}) - if err != nil { - return err - } - if len(buckets) == 0 { - totalCache.keepBuckets(buckets) - updates <- totalCache.dui(dataUsageRoot, buckets) - return nil - } - for i, b := range buckets { - if isReservedOrInvalidBucket(b.Name, false) { - // Delete bucket... - buckets = append(buckets[:i], buckets[i+1:]...) - } - } - - totalCache.Info.BloomFilter = bf.bytes() - - // Clear totals. - var root dataUsageEntry - if r := totalCache.root(); r != nil { - root.Children = r.Children - } - totalCache.replace(dataUsageRoot, "", root) - - // Delete all buckets that does not exist anymore. - totalCache.keepBuckets(buckets) - - for _, b := range buckets { - // Load bucket cache. - var bCache dataUsageCache - err := bCache.load(ctx, fs, path.Join(b.Name, dataUsageCacheName)) - if err != nil { - return err - } - if bCache.Info.Name == "" { - bCache.Info.Name = b.Name - } - bCache.Info.BloomFilter = totalCache.Info.BloomFilter - bCache.Info.NextCycle = wantCycle - upds := make(chan dataUsageEntry, 1) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - for update := range upds { - totalCache.replace(b.Name, dataUsageRoot, update) - if intDataUpdateTracker.debug { - logger.Info(color.Green("NSScanner:")+" Got update: %v", len(totalCache.Cache)) - } - cloned := totalCache.clone() - updates <- cloned.dui(dataUsageRoot, buckets) - } - }() - bCache.Info.updates = upds - cache, err := fs.scanBucket(ctx, b.Name, bCache) - select { - case <-ctx.Done(): - return ctx.Err() - default: - } - logger.LogIf(ctx, err) - cache.Info.BloomFilter = nil - wg.Wait() - - if cache.root() == nil { - if intDataUpdateTracker.debug { - logger.Info(color.Green("NSScanner:") + " No root added. Adding empty") - } - cache.replace(cache.Info.Name, dataUsageRoot, dataUsageEntry{}) - } - if cache.Info.LastUpdate.After(bCache.Info.LastUpdate) { - if intDataUpdateTracker.debug { - logger.Info(color.Green("NSScanner:")+" Saving bucket %q cache with %d entries", b.Name, len(cache.Cache)) - } - logger.LogIf(ctx, cache.save(ctx, fs, path.Join(b.Name, dataUsageCacheName))) - } - // Merge, save and send update. - // We do it even if unchanged. - cl := cache.clone() - entry := cl.flatten(*cl.root()) - totalCache.replace(cl.Info.Name, dataUsageRoot, entry) - if intDataUpdateTracker.debug { - logger.Info(color.Green("NSScanner:")+" Saving totals cache with %d entries", len(totalCache.Cache)) - } - totalCache.Info.LastUpdate = time.Now() - logger.LogIf(ctx, totalCache.save(ctx, fs, dataUsageCacheName)) - cloned := totalCache.clone() - updates <- cloned.dui(dataUsageRoot, buckets) - - } - - return nil -} - -// scanBucket scans a single bucket in FS mode. -// The updated cache for the bucket is returned. -// A partially updated bucket may be returned. -func (fs *FSObjects) scanBucket(ctx context.Context, bucket string, cache dataUsageCache) (dataUsageCache, error) { - defer close(cache.Info.updates) - defer globalScannerMetrics.log(scannerMetricScanBucketDisk, fs.fsPath, bucket)() - - // Get bucket policy - // Check if the current bucket has a configured lifecycle policy - lc, err := globalLifecycleSys.Get(bucket) - if err == nil && lc.HasActiveRules("", true) { - if intDataUpdateTracker.debug { - logger.Info(color.Green("scanBucket:") + " lifecycle: Active rules found") - } - cache.Info.lifeCycle = lc - } - - // Load bucket info. - cache, err = scanDataFolder(ctx, -1, -1, fs.fsPath, cache, func(item scannerItem) (sizeSummary, error) { - bucket, object := item.bucket, item.objectPath() - stopFn := globalScannerMetrics.log(scannerMetricScanObject, fs.fsPath, PathJoin(item.bucket, item.objectPath())) - defer stopFn() - - var fsMetaBytes []byte - done := globalScannerMetrics.timeSize(scannerMetricReadMetadata) - defer func() { - if done != nil { - done(len(fsMetaBytes)) - } - }() - fsMetaBytes, err := xioutil.ReadFile(pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile)) - if err != nil && !osIsNotExist(err) { - if intDataUpdateTracker.debug { - logger.Info(color.Green("scanBucket:")+" object return unexpected error: %v/%v: %w", item.bucket, item.objectPath(), err) - } - return sizeSummary{}, errSkipFile - } - - fsMeta := newFSMetaV1() - metaOk := false - if len(fsMetaBytes) > 0 { - json := jsoniter.ConfigCompatibleWithStandardLibrary - if err = json.Unmarshal(fsMetaBytes, &fsMeta); err == nil { - metaOk = true - } - } - if !metaOk { - fsMeta = fs.defaultFsJSON(object) - } - - // Stat the file. - fi, fiErr := os.Stat(item.Path) - if fiErr != nil { - if intDataUpdateTracker.debug { - logger.Info(color.Green("scanBucket:")+" object path missing: %v: %w", item.Path, fiErr) - } - return sizeSummary{}, errSkipFile - } - done(len(fsMetaBytes)) - done = nil - - // FS has no "all versions". Increment the counter, though - globalScannerMetrics.incNoTime(scannerMetricApplyAll) - - oi := fsMeta.ToObjectInfo(bucket, object, fi) - doneVer := globalScannerMetrics.time(scannerMetricApplyVersion) - sz := item.applyActions(ctx, fs, oi, &sizeSummary{}) - doneVer() - if sz >= 0 { - return sizeSummary{totalSize: sz, versions: 1}, nil - } - - return sizeSummary{totalSize: fi.Size(), versions: 1}, nil - }, 0) - - return cache, err -} - -// Bucket operations - -// getBucketDir - will convert incoming bucket names to -// corresponding valid bucket names on the backend in a platform -// compatible way for all operating systems. -func (fs *FSObjects) getBucketDir(ctx context.Context, bucket string) (string, error) { - if bucket == "" || bucket == "." || bucket == ".." { - return "", errVolumeNotFound - } - bucketDir := pathJoin(fs.fsPath, bucket) - return bucketDir, nil -} - -func (fs *FSObjects) statBucketDir(ctx context.Context, bucket string) (os.FileInfo, error) { - bucketDir, err := fs.getBucketDir(ctx, bucket) - if err != nil { - return nil, err - } - st, err := fsStatVolume(ctx, bucketDir) - if err != nil { - return nil, err - } - return st, nil -} - -// MakeBucketWithLocation - create a new bucket, returns if it already exists. -func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket string, opts MakeBucketOptions) error { - if opts.LockEnabled || opts.VersioningEnabled { - return NotImplemented{} - } - - // Verify if bucket is valid. - if s3utils.CheckValidBucketNameStrict(bucket) != nil { - return BucketNameInvalid{Bucket: bucket} - } - - defer NSUpdated(bucket, slashSeparator) - - bucketDir, err := fs.getBucketDir(ctx, bucket) - if err != nil { - return toObjectErr(err, bucket) - } - - if err = fsMkdir(ctx, bucketDir); err != nil { - return toObjectErr(err, bucket) - } - - meta := newBucketMetadata(bucket) - meta.SetCreatedAt(opts.CreatedAt) - if err := meta.Save(ctx, fs); err != nil { - return toObjectErr(err, bucket) - } - - globalBucketMetadataSys.Set(bucket, meta) - - return nil -} - -// GetBucketPolicy - only needed for FS in NAS mode -func (fs *FSObjects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { - meta, err := loadBucketMetadata(ctx, fs, bucket) - if err != nil { - return nil, BucketPolicyNotFound{Bucket: bucket} - } - if meta.policyConfig == nil { - return nil, BucketPolicyNotFound{Bucket: bucket} - } - return meta.policyConfig, nil -} - -// SetBucketPolicy - only needed for FS in NAS mode -func (fs *FSObjects) SetBucketPolicy(ctx context.Context, bucket string, p *policy.Policy) error { - meta, err := loadBucketMetadata(ctx, fs, bucket) - if err != nil { - return err - } - - json := jsoniter.ConfigCompatibleWithStandardLibrary - configData, err := json.Marshal(p) - if err != nil { - return err - } - meta.PolicyConfigJSON = configData - - return meta.Save(ctx, fs) -} - -// DeleteBucketPolicy - only needed for FS in NAS mode -func (fs *FSObjects) DeleteBucketPolicy(ctx context.Context, bucket string) error { - meta, err := loadBucketMetadata(ctx, fs, bucket) - if err != nil { - return err - } - meta.PolicyConfigJSON = nil - return meta.Save(ctx, fs) -} - -// GetBucketInfo - fetch bucket metadata info. -func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bi BucketInfo, e error) { - st, err := fs.statBucketDir(ctx, bucket) - if err != nil { - return bi, toObjectErr(err, bucket) - } - - createdTime := st.ModTime() - meta, err := globalBucketMetadataSys.Get(bucket) - if err == nil { - createdTime = meta.Created - } - - return BucketInfo{ - Name: bucket, - Created: createdTime, - }, nil -} - -// ListBuckets - list all s3 compatible buckets (directories) at fsPath. -func (fs *FSObjects) ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error) { - if err := checkPathLength(fs.fsPath); err != nil { - logger.LogIf(ctx, err) - return nil, err - } - - entries, err := readDirWithOpts(fs.fsPath, readDirOpts{count: -1, followDirSymlink: true}) - if err != nil { - logger.LogIf(ctx, errDiskNotFound) - return nil, toObjectErr(errDiskNotFound) - } - - bucketInfos := make([]BucketInfo, 0, len(entries)) - for _, entry := range entries { - // Ignore all reserved bucket names and invalid bucket names. - if isReservedOrInvalidBucket(entry, false) { - continue - } - var fi os.FileInfo - fi, err = fsStatVolume(ctx, pathJoin(fs.fsPath, entry)) - // There seems like no practical reason to check for errors - // at this point, if there are indeed errors we can simply - // just ignore such buckets and list only those which - // return proper Stat information instead. - if err != nil { - // Ignore any errors returned here. - continue - } - created := fi.ModTime() - meta, err := globalBucketMetadataSys.Get(fi.Name()) - if err == nil { - created = meta.Created - } - - bucketInfos = append(bucketInfos, BucketInfo{ - Name: fi.Name(), - Created: created, - }) - } - - // Sort bucket infos by bucket name. - sort.Slice(bucketInfos, func(i, j int) bool { - return bucketInfos[i].Name < bucketInfos[j].Name - }) - - // Succes. - return bucketInfos, nil -} - -// DeleteBucket - delete a bucket and all the metadata associated -// with the bucket including pending multipart, object metadata. -func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string, opts DeleteBucketOptions) error { - defer NSUpdated(bucket, slashSeparator) - - bucketDir, err := fs.getBucketDir(ctx, bucket) - if err != nil { - return toObjectErr(err, bucket) - } - - if !opts.Force { - // Attempt to delete regular bucket. - if err = fsRemoveDir(ctx, bucketDir); err != nil { - return toObjectErr(err, bucket) - } - } else { - tmpBucketPath := pathJoin(fs.fsPath, minioMetaTmpBucket, bucket+"."+mustGetUUID()) - if err = Rename(bucketDir, tmpBucketPath); err != nil { - return toObjectErr(err, bucket) - } - - go func() { - fsRemoveAll(ctx, tmpBucketPath) // ignore returned error if any. - }() - } - - // Cleanup all the bucket metadata. - minioMetadataBucketDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket) - if err = fsRemoveAll(ctx, minioMetadataBucketDir); err != nil { - return toObjectErr(err, bucket) - } - - // Delete all bucket metadata. - deleteBucketMetadata(ctx, fs, bucket) - - return nil -} - -// Object Operations - -// CopyObject - copy object source object to destination object. -// if source object and destination object are same we only -// update metadata. -func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, err error) { - if srcOpts.VersionID != "" && srcOpts.VersionID != nullVersionID { - return oi, VersionNotFound{ - Bucket: srcBucket, - Object: srcObject, - VersionID: srcOpts.VersionID, - } - } - - cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject)) - defer NSUpdated(dstBucket, dstObject) - - if !cpSrcDstSame { - objectDWLock := fs.NewNSLock(dstBucket, dstObject) - lkctx, err := objectDWLock.GetLock(ctx, globalOperationTimeout) - if err != nil { - return oi, err - } - ctx = lkctx.Context() - defer objectDWLock.Unlock(lkctx.Cancel) - } - - if _, err := fs.statBucketDir(ctx, srcBucket); err != nil { - return oi, toObjectErr(err, srcBucket) - } - - if cpSrcDstSame && srcInfo.metadataOnly { - fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, srcBucket, srcObject, fs.metaJSONFile) - wlk, err := fs.rwPool.Write(fsMetaPath) - if err != nil { - wlk, err = fs.rwPool.Create(fsMetaPath) - if err != nil { - logger.LogIf(ctx, err) - return oi, toObjectErr(err, srcBucket, srcObject) - } - } - // This close will allow for locks to be synchronized on `fs.json`. - defer wlk.Close() - - // Save objects' metadata in `fs.json`. - fsMeta := newFSMetaV1() - if _, err = fsMeta.ReadFrom(ctx, wlk); err != nil { - // For any error to read fsMeta, set default ETag and proceed. - fsMeta = fs.defaultFsJSON(srcObject) - } - - fsMeta.Meta = cloneMSS(srcInfo.UserDefined) - fsMeta.Meta["etag"] = srcInfo.ETag - if _, err = fsMeta.WriteTo(wlk); err != nil { - return oi, toObjectErr(err, srcBucket, srcObject) - } - - fsObjectPath := pathJoin(fs.fsPath, srcBucket, srcObject) - - // Update object modtime - err = fsTouch(ctx, fsObjectPath) - if err != nil { - return oi, toObjectErr(err, srcBucket, srcObject) - } - // Stat the file to get object info - fi, err := fsStatFile(ctx, fsObjectPath) - if err != nil { - return oi, toObjectErr(err, srcBucket, srcObject) - } - - // Return the new object info. - return fsMeta.ToObjectInfo(srcBucket, srcObject, fi), nil - } - - if err := checkPutObjectArgs(ctx, dstBucket, dstObject, fs); err != nil { - return ObjectInfo{}, err - } - - objInfo, err := fs.putObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, ObjectOptions{ServerSideEncryption: dstOpts.ServerSideEncryption, UserDefined: srcInfo.UserDefined}) - if err != nil { - return oi, toObjectErr(err, dstBucket, dstObject) - } - - return objInfo, nil -} - -// GetObjectNInfo - returns object info and a reader for object -// content. -func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error) { - if opts.VersionID != "" && opts.VersionID != nullVersionID { - return nil, VersionNotFound{ - Bucket: bucket, - Object: object, - VersionID: opts.VersionID, - } - } - if err = checkGetObjArgs(ctx, bucket, object); err != nil { - return nil, err - } - - if _, err = fs.statBucketDir(ctx, bucket); err != nil { - return nil, toObjectErr(err, bucket) - } - - nsUnlocker := func() {} - - if lockType != noLock { - // Lock the object before reading. - lock := fs.NewNSLock(bucket, object) - switch lockType { - case writeLock: - lkctx, err := lock.GetLock(ctx, globalOperationTimeout) - if err != nil { - return nil, err - } - ctx = lkctx.Context() - nsUnlocker = func() { lock.Unlock(lkctx.Cancel) } - case readLock: - lkctx, err := lock.GetRLock(ctx, globalOperationTimeout) - if err != nil { - return nil, err - } - ctx = lkctx.Context() - nsUnlocker = func() { lock.RUnlock(lkctx.Cancel) } - } - } - - // Otherwise we get the object info - var objInfo ObjectInfo - if objInfo, err = fs.getObjectInfo(ctx, bucket, object); err != nil { - nsUnlocker() - return nil, toObjectErr(err, bucket, object) - } - // For a directory, we need to return a reader that returns no bytes. - if HasSuffix(object, SlashSeparator) { - // The lock taken above is released when - // objReader.Close() is called by the caller. - return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts, nsUnlocker) - } - // Take a rwPool lock for NFS gateway type deployment - rwPoolUnlocker := func() {} - if bucket != minioMetaBucket && lockType != noLock { - fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) - _, err = fs.rwPool.Open(fsMetaPath) - if err != nil && err != errFileNotFound { - logger.LogIf(ctx, err) - nsUnlocker() - return nil, toObjectErr(err, bucket, object) - } - // Need to clean up lock after getObject is - // completed. - rwPoolUnlocker = func() { fs.rwPool.Close(fsMetaPath) } - } - - objReaderFn, off, length, err := NewGetObjectReader(rs, objInfo, opts) - if err != nil { - rwPoolUnlocker() - nsUnlocker() - return nil, err - } - - // Read the object, doesn't exist returns an s3 compatible error. - fsObjPath := pathJoin(fs.fsPath, bucket, object) - readCloser, size, err := fsOpenFile(ctx, fsObjPath, off) - if err != nil { - rwPoolUnlocker() - nsUnlocker() - return nil, toObjectErr(err, bucket, object) - } - - closeFn := func() { - readCloser.Close() - } - reader := io.LimitReader(readCloser, length) - - // Check if range is valid - if off > size || off+length > size { - err = InvalidRange{off, length, size} - logger.LogIf(ctx, err, logger.Application) - closeFn() - rwPoolUnlocker() - nsUnlocker() - return nil, err - } - - return objReaderFn(reader, h, closeFn, rwPoolUnlocker, nsUnlocker) -} - -// Create a new fs.json file, if the existing one is corrupt. Should happen very rarely. -func (fs *FSObjects) createFsJSON(object, fsMetaPath string) error { - fsMeta := newFSMetaV1() - fsMeta.Meta = map[string]string{ - "etag": GenETag(), - "content-type": mimedb.TypeByExtension(path.Ext(object)), - } - wlk, werr := fs.rwPool.Create(fsMetaPath) - if werr == nil { - _, err := fsMeta.WriteTo(wlk) - wlk.Close() - return err - } - return werr -} - -// Used to return default etag values when a pre-existing object's meta data is queried. -func (fs *FSObjects) defaultFsJSON(object string) fsMetaV1 { - fsMeta := newFSMetaV1() - fsMeta.Meta = map[string]string{ - "etag": defaultEtag, - "content-type": mimedb.TypeByExtension(path.Ext(object)), - } - return fsMeta -} - -func (fs *FSObjects) getObjectInfoNoFSLock(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) { - fsMeta := fsMetaV1{} - if HasSuffix(object, SlashSeparator) { - fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - return oi, err - } - return fsMeta.ToObjectInfo(bucket, object, fi), nil - } - - if !globalCLIContext.StrictS3Compat { - // Stat the file to get file size. - fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - return oi, err - } - return fsMeta.ToObjectInfo(bucket, object, fi), nil - } - - fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) - // Read `fs.json` to perhaps contend with - // parallel Put() operations. - - rc, _, err := fsOpenFile(ctx, fsMetaPath, 0) - if err == nil { - fsMetaBuf, rerr := io.ReadAll(rc) - rc.Close() - if rerr == nil { - json := jsoniter.ConfigCompatibleWithStandardLibrary - if rerr = json.Unmarshal(fsMetaBuf, &fsMeta); rerr != nil { - // For any error to read fsMeta, set default ETag and proceed. - fsMeta = fs.defaultFsJSON(object) - } - } else { - // For any error to read fsMeta, set default ETag and proceed. - fsMeta = fs.defaultFsJSON(object) - } - } - - // Return a default etag and content-type based on the object's extension. - if err == errFileNotFound { - fsMeta = fs.defaultFsJSON(object) - } - - // Ignore if `fs.json` is not available, this is true for pre-existing data. - if err != nil && err != errFileNotFound { - logger.LogIf(ctx, err) - return oi, err - } - - // Stat the file to get file size. - fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - return oi, err - } - - return fsMeta.ToObjectInfo(bucket, object, fi), nil -} - -// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo. -func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) { - if strings.HasSuffix(object, SlashSeparator) && !fs.isObjectDir(bucket, object) { - return oi, errFileNotFound - } - - fsMeta := fsMetaV1{} - if HasSuffix(object, SlashSeparator) { - fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - return oi, err - } - return fsMeta.ToObjectInfo(bucket, object, fi), nil - } - - fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) - // Read `fs.json` to perhaps contend with - // parallel Put() operations. - - rlk, err := fs.rwPool.Open(fsMetaPath) - if err == nil { - // Read from fs metadata only if it exists. - _, rerr := fsMeta.ReadFrom(ctx, rlk.LockedFile) - fs.rwPool.Close(fsMetaPath) - if rerr != nil { - // For any error to read fsMeta, set default ETag and proceed. - fsMeta = fs.defaultFsJSON(object) - } - } - - // Return a default etag and content-type based on the object's extension. - if err == errFileNotFound { - fsMeta = fs.defaultFsJSON(object) - } - - // Ignore if `fs.json` is not available, this is true for pre-existing data. - if err != nil && err != errFileNotFound { - logger.LogIf(ctx, err) - return oi, err - } - - // Stat the file to get file size. - fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - return oi, err - } - - return fsMeta.ToObjectInfo(bucket, object, fi), nil -} - -// getObjectInfoWithLock - reads object metadata and replies back ObjectInfo. -func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object string) (oi ObjectInfo, err error) { - // Lock the object before reading. - lk := fs.NewNSLock(bucket, object) - lkctx, err := lk.GetRLock(ctx, globalOperationTimeout) - if err != nil { - return oi, err - } - ctx = lkctx.Context() - defer lk.RUnlock(lkctx.Cancel) - - if err := checkGetObjArgs(ctx, bucket, object); err != nil { - return oi, err - } - - if _, err := fs.statBucketDir(ctx, bucket); err != nil { - return oi, err - } - - if strings.HasSuffix(object, SlashSeparator) && !fs.isObjectDir(bucket, object) { - return oi, errFileNotFound - } - - return fs.getObjectInfo(ctx, bucket, object) -} - -// GetObjectInfo - reads object metadata and replies back ObjectInfo. -func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) { - if opts.VersionID != "" && opts.VersionID != nullVersionID { - return oi, VersionNotFound{ - Bucket: bucket, - Object: object, - VersionID: opts.VersionID, - } - } - - oi, err := fs.getObjectInfoWithLock(ctx, bucket, object) - if err == errCorruptedFormat || err == io.EOF { - lk := fs.NewNSLock(bucket, object) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return oi, toObjectErr(err, bucket, object) - } - - fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) - err = fs.createFsJSON(object, fsMetaPath) - lk.Unlock(lkctx.Cancel) - if err != nil { - return oi, toObjectErr(err, bucket, object) - } - - oi, err = fs.getObjectInfoWithLock(ctx, bucket, object) - return oi, toObjectErr(err, bucket, object) - } - return oi, toObjectErr(err, bucket, object) -} - -// PutObject - creates an object upon reading from the input stream -// until EOF, writes data directly to configured filesystem path. -// Additionally writes `fs.json` which carries the necessary metadata -// for future object operations. -func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, err error) { - if opts.Versioned { - return objInfo, NotImplemented{} - } - - if err := checkPutObjectArgs(ctx, bucket, object, fs); err != nil { - return ObjectInfo{}, err - } - - defer NSUpdated(bucket, object) - - // Lock the object. - lk := fs.NewNSLock(bucket, object) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - logger.LogIf(ctx, err) - return objInfo, err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) - - return fs.putObject(ctx, bucket, object, r, opts) -} - -// putObject - wrapper for PutObject -func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string, r *PutObjReader, opts ObjectOptions) (objInfo ObjectInfo, retErr error) { - data := r.Reader - - // No metadata is set, allocate a new one. - meta := cloneMSS(opts.UserDefined) - var err error - - // Validate if bucket name is valid and exists. - if _, err = fs.statBucketDir(ctx, bucket); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket) - } - - fsMeta := newFSMetaV1() - fsMeta.Meta = meta - - // This is a special case with size as '0' and object ends - // with a slash separator, we treat it like a valid operation - // and return success. - if isObjectDir(object, data.Size()) { - if err = mkdirAll(pathJoin(fs.fsPath, bucket, object), 0o777); err != nil { - logger.LogIf(ctx, err) - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - var fi os.FileInfo - if fi, err = fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object)); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - return fsMeta.ToObjectInfo(bucket, object, fi), nil - } - - // Validate input data size and it can never be less than zero. - if data.Size() < -1 { - logger.LogIf(ctx, errInvalidArgument, logger.Application) - return ObjectInfo{}, errInvalidArgument - } - - var wlk *lock.LockedFile - if bucket != minioMetaBucket { - bucketMetaDir := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix) - fsMetaPath := pathJoin(bucketMetaDir, bucket, object, fs.metaJSONFile) - wlk, err = fs.rwPool.Write(fsMetaPath) - var freshFile bool - if err != nil { - wlk, err = fs.rwPool.Create(fsMetaPath) - if err != nil { - logger.LogIf(ctx, err) - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - freshFile = true - } - // This close will allow for locks to be synchronized on `fs.json`. - defer wlk.Close() - defer func() { - // Remove meta file when PutObject encounters - // any error and it is a fresh file. - // - // We should preserve the `fs.json` of any - // existing object - if retErr != nil && freshFile { - tmpDir := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID) - fsRemoveMeta(ctx, bucketMetaDir, fsMetaPath, tmpDir) - } - }() - } - - // Uploaded object will first be written to the temporary location which will eventually - // be renamed to the actual location. It is first written to the temporary location - // so that cleaning it up will be easy if the server goes down. - tempObj := mustGetUUID() - - fsTmpObjPath := pathJoin(fs.fsPath, minioMetaTmpBucket, fs.fsUUID, tempObj) - bytesWritten, err := fsCreateFile(ctx, fsTmpObjPath, data, data.Size()) - - // Delete the temporary object in the case of a - // failure. If PutObject succeeds, then there would be - // nothing to delete. - defer fsRemoveFile(ctx, fsTmpObjPath) - - if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - fsMeta.Meta["etag"] = r.MD5CurrentHexString() - - // Should return IncompleteBody{} error when reader has fewer - // bytes than specified in request header. - if bytesWritten < data.Size() { - return ObjectInfo{}, IncompleteBody{Bucket: bucket, Object: object} - } - - // Entire object was written to the temp location, now it's safe to rename it to the actual location. - fsNSObjPath := pathJoin(fs.fsPath, bucket, object) - if err = fsRenameFile(ctx, fsTmpObjPath, fsNSObjPath); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - - if bucket != minioMetaBucket { - // Write FS metadata after a successful namespace operation. - if _, err = fsMeta.WriteTo(wlk); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - } - - // Stat the file to fetch timestamp, size. - fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - - // Success. - return fsMeta.ToObjectInfo(bucket, object, fi), nil -} - -// DeleteObjects - deletes an object from a bucket, this operation is destructive -// and there are no rollbacks supported. -func (fs *FSObjects) DeleteObjects(ctx context.Context, bucket string, objects []ObjectToDelete, opts ObjectOptions) ([]DeletedObject, []error) { - errs := make([]error, len(objects)) - dobjects := make([]DeletedObject, len(objects)) - for idx, object := range objects { - if object.VersionID != "" { - errs[idx] = VersionNotFound{ - Bucket: bucket, - Object: object.ObjectName, - VersionID: object.VersionID, - } - continue - } - _, errs[idx] = fs.DeleteObject(ctx, bucket, object.ObjectName, opts) - if errs[idx] == nil || isErrObjectNotFound(errs[idx]) { - dobjects[idx] = DeletedObject{ - ObjectName: object.ObjectName, - } - errs[idx] = nil - } - } - return dobjects, errs -} - -// DeleteObject - deletes an object from a bucket, this operation is destructive -// and there are no rollbacks supported. -func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error) { - if opts.VersionID != "" && opts.VersionID != nullVersionID { - return objInfo, VersionNotFound{ - Bucket: bucket, - Object: object, - VersionID: opts.VersionID, - } - } - - defer NSUpdated(bucket, object) - - // Acquire a write lock before deleting the object. - lk := fs.NewNSLock(bucket, object) - lkctx, err := lk.GetLock(ctx, globalOperationTimeout) - if err != nil { - return objInfo, err - } - ctx = lkctx.Context() - defer lk.Unlock(lkctx.Cancel) - - if err = checkDelObjArgs(ctx, bucket, object); err != nil { - return objInfo, err - } - - if _, err = fs.statBucketDir(ctx, bucket); err != nil { - return objInfo, toObjectErr(err, bucket) - } - - var rwlk *lock.LockedFile - - minioMetaBucketDir := pathJoin(fs.fsPath, minioMetaBucket) - fsMetaPath := pathJoin(minioMetaBucketDir, bucketMetaPrefix, bucket, object, fs.metaJSONFile) - if bucket != minioMetaBucket { - rwlk, err = fs.rwPool.Write(fsMetaPath) - if err != nil && err != errFileNotFound { - logger.LogIf(ctx, err) - return objInfo, toObjectErr(err, bucket, object) - } - } - - // Delete the object. - if err = fsDeleteFile(ctx, pathJoin(fs.fsPath, bucket), pathJoin(fs.fsPath, bucket, object)); err != nil { - if rwlk != nil { - rwlk.Close() - } - return objInfo, toObjectErr(err, bucket, object) - } - - // Close fsMetaPath before deletion - if rwlk != nil { - rwlk.Close() - } - - if bucket != minioMetaBucket { - // Delete the metadata object. - err = fsDeleteFile(ctx, minioMetaBucketDir, fsMetaPath) - if err != nil && err != errFileNotFound { - return objInfo, toObjectErr(err, bucket, object) - } - } - return ObjectInfo{Bucket: bucket, Name: object}, nil -} - -func (fs *FSObjects) isLeafDir(bucket string, leafPath string) bool { - return fs.isObjectDir(bucket, leafPath) -} - -func (fs *FSObjects) isLeaf(bucket string, leafPath string) bool { - return !strings.HasSuffix(leafPath, slashSeparator) -} - -// Returns function "listDir" of the type listDirFunc. -// isLeaf - is used by listDir function to check if an entry -// is a leaf or non-leaf entry. -func (fs *FSObjects) listDirFactory() ListDirFunc { - // listDir - lists all the entries at a given prefix and given entry in the prefix. - listDir := func(bucket, prefixDir, prefixEntry string) (emptyDir bool, entries []string, delayIsLeaf bool) { - var err error - entries, err = readDir(pathJoin(fs.fsPath, bucket, prefixDir)) - if err != nil && err != errFileNotFound { - logger.LogIf(GlobalContext, err) - return false, nil, false - } - if len(entries) == 0 { - return true, nil, false - } - entries, delayIsLeaf = filterListEntries(bucket, prefixDir, entries, prefixEntry, fs.isLeaf) - return false, entries, delayIsLeaf - } - - // Return list factory instance. - return listDir -} - -// isObjectDir returns true if the specified bucket & prefix exists -// and the prefix represents an empty directory. An S3 empty directory -// is also an empty directory in the FS backend. -func (fs *FSObjects) isObjectDir(bucket, prefix string) bool { - entries, err := readDirN(pathJoin(fs.fsPath, bucket, prefix), 1) - if err != nil { - return false - } - return len(entries) == 0 -} - -// ListObjectVersions not implemented for FS mode. -func (fs *FSObjects) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (loi ListObjectVersionsInfo, e error) { - return loi, NotImplemented{} -} - -// ListObjects - list all objects at prefix upto maxKeys., optionally delimited by '/'. Maintains the list pool -// state for future re-entrant list requests. -func (fs *FSObjects) ListObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, err error) { - // listObjects may in rare cases not be able to find any valid results. - // Therefore, it cannot set a NextMarker. - // In that case we retry the operation, but we add a - // max limit, so we never end up in an infinite loop. - tries := 50 - for { - loi, err = listObjects(ctx, fs, bucket, prefix, marker, delimiter, maxKeys, fs.listPool, - fs.listDirFactory(), fs.isLeaf, fs.isLeafDir, fs.getObjectInfoNoFSLock, fs.getObjectInfoNoFSLock) - if err != nil { - return loi, err - } - if !loi.IsTruncated || loi.NextMarker != "" || tries == 0 { - return loi, nil - } - tries-- - } -} - -// GetObjectTags - get object tags from an existing object -func (fs *FSObjects) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { - if opts.VersionID != "" && opts.VersionID != nullVersionID { - return nil, VersionNotFound{ - Bucket: bucket, - Object: object, - VersionID: opts.VersionID, - } - } - oi, err := fs.GetObjectInfo(ctx, bucket, object, ObjectOptions{}) - if err != nil { - return nil, err - } - - return tags.ParseObjectTags(oi.UserTags) -} - -// PutObjectTags - replace or add tags to an existing object -func (fs *FSObjects) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) { - if opts.VersionID != "" && opts.VersionID != nullVersionID { - return ObjectInfo{}, VersionNotFound{ - Bucket: bucket, - Object: object, - VersionID: opts.VersionID, - } - } - - fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fs.metaJSONFile) - fsMeta := fsMetaV1{} - wlk, err := fs.rwPool.Write(fsMetaPath) - if err != nil { - wlk, err = fs.rwPool.Create(fsMetaPath) - if err != nil { - logger.LogIf(ctx, err) - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - } - // This close will allow for locks to be synchronized on `fs.json`. - defer wlk.Close() - - // Read objects' metadata in `fs.json`. - if _, err = fsMeta.ReadFrom(ctx, wlk); err != nil { - // For any error to read fsMeta, set default ETag and proceed. - fsMeta = fs.defaultFsJSON(object) - } - - // clean fsMeta.Meta of tag key, before updating the new tags - delete(fsMeta.Meta, xhttp.AmzObjectTagging) - - // Do not update for empty tags - if tags != "" { - fsMeta.Meta[xhttp.AmzObjectTagging] = tags - } - - if _, err = fsMeta.WriteTo(wlk); err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - - // Stat the file to get file size. - fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)) - if err != nil { - return ObjectInfo{}, err - } - - return fsMeta.ToObjectInfo(bucket, object, fi), nil -} - -// DeleteObjectTags - delete object tags from an existing object -func (fs *FSObjects) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { - return fs.PutObjectTags(ctx, bucket, object, "", opts) -} - -// HealFormat - no-op for fs, Valid only for Erasure. -func (fs *FSObjects) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { - return madmin.HealResultItem{}, NotImplemented{} -} - -// HealObject - no-op for fs. Valid only for Erasure. -func (fs *FSObjects) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) ( - res madmin.HealResultItem, err error, -) { - return res, NotImplemented{} -} - -// HealBucket - no-op for fs, Valid only for Erasure. -func (fs *FSObjects) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, - error, -) { - return madmin.HealResultItem{}, NotImplemented{} -} - -// Walk a bucket, optionally prefix recursively, until we have returned -// all the content to objectInfo channel, it is callers responsibility -// to allocate a receive channel for ObjectInfo, upon any unhandled -// error walker returns error. Optionally if context.Done() is received -// then Walk() stops the walker. -func (fs *FSObjects) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error { - return fsWalk(ctx, fs, bucket, prefix, fs.listDirFactory(), fs.isLeaf, fs.isLeafDir, results, fs.getObjectInfoNoFSLock, fs.getObjectInfoNoFSLock) -} - -// HealObjects - no-op for fs. Valid only for Erasure. -func (fs *FSObjects) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) { - logger.LogIf(ctx, NotImplemented{}) - return NotImplemented{} -} - -// GetMetrics - no op -func (fs *FSObjects) GetMetrics(ctx context.Context) (*BackendMetrics, error) { - logger.LogIf(ctx, NotImplemented{}) - return &BackendMetrics{}, NotImplemented{} -} - -// ListObjectsV2 lists all blobs in bucket filtered by prefix -func (fs *FSObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { - marker := continuationToken - if marker == "" { - marker = startAfter - } - - loi, err := fs.ListObjects(ctx, bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return result, err - } - - listObjectsV2Info := ListObjectsV2Info{ - IsTruncated: loi.IsTruncated, - ContinuationToken: continuationToken, - NextContinuationToken: loi.NextMarker, - Objects: loi.Objects, - Prefixes: loi.Prefixes, - } - return listObjectsV2Info, err -} - -// IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (fs *FSObjects) IsNotificationSupported() bool { - return true -} - -// IsListenSupported returns whether listen bucket notification is applicable for this layer. -func (fs *FSObjects) IsListenSupported() bool { - return true -} - -// IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (fs *FSObjects) IsEncryptionSupported() bool { - return true -} - -// IsCompressionSupported returns whether compression is applicable for this layer. -func (fs *FSObjects) IsCompressionSupported() bool { - return true -} - -// IsTaggingSupported returns true, object tagging is supported in fs object layer. -func (fs *FSObjects) IsTaggingSupported() bool { - return true -} - -// Health returns health of the object layer -func (fs *FSObjects) Health(ctx context.Context, opts HealthOptions) HealthResult { - if _, err := os.Stat(fs.fsPath); err != nil { - return HealthResult{} - } - return HealthResult{ - Healthy: newObjectLayerFn() != nil, - } -} - -// ReadHealth returns "read" health of the object layer -func (fs *FSObjects) ReadHealth(ctx context.Context) bool { - _, err := os.Stat(fs.fsPath) - return err == nil -} - -// TransitionObject - transition object content to target tier. -func (fs *FSObjects) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { - return NotImplemented{} -} - -// RestoreTransitionedObject - restore transitioned object content locally on this cluster. -func (fs *FSObjects) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { - return NotImplemented{} -} diff --git a/cmd/fs-v1_test.go b/cmd/fs-v1_test.go deleted file mode 100644 index 3420cfaea..000000000 --- a/cmd/fs-v1_test.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "bytes" - "context" - "os" - "path/filepath" - "testing" - - "github.com/minio/madmin-go" -) - -// TestNewFS - tests initialization of all input disks -// and constructs a valid `FS` object layer. -func TestNewFS(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - // Do not attempt to create this path, the test validates - // so that NewFSObjectLayer initializes non existing paths - // and successfully returns initialized object layer. - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - _, err := NewFSObjectLayer(ctx, "") - if err != errInvalidArgument { - t.Errorf("Expecting error invalid argument, got %s", err) - } - _, err = NewFSObjectLayer(ctx, disk) - if err != nil { - errMsg := "Unable to recognize backend format, Drive is not in FS format." - if err.Error() == errMsg { - t.Errorf("Expecting %s, got %s", errMsg, err) - } - } -} - -// TestFSShutdown - initialize a new FS object layer then calls -// Shutdown to check returned results -func TestFSShutdown(t *testing.T) { - t.Skip() - - bucketName := "testbucket" - objectName := "object" - // Create and return an fsObject with its path in the disk - prepareTest := func() (*FSObjects, string) { - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - obj := initFSObjects(disk, t) - fs := obj.(*FSObjects) - - objectContent := "12345" - obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}) - obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte(objectContent)), int64(len(objectContent)), "", ""), ObjectOptions{}) - return fs, disk - } - - // Test Shutdown with regular conditions - fs, disk := prepareTest() - if err := fs.Shutdown(GlobalContext); err != nil { - t.Fatal("Cannot shutdown the FS object: ", err) - } - os.RemoveAll(disk) - - // Test Shutdown with faulty disk - fs, disk = prepareTest() - fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}) - os.RemoveAll(disk) - if err := fs.Shutdown(GlobalContext); err != nil { - t.Fatal("Got unexpected fs shutdown error: ", err) - } -} - -// TestFSGetBucketInfo - test GetBucketInfo with healty and faulty disks -func TestFSGetBucketInfo(t *testing.T) { - t.Skip() - - // Prepare for testing - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - fs := obj.(*FSObjects) - bucketName := "bucket" - - err := obj.MakeBucketWithLocation(GlobalContext, "a", MakeBucketOptions{}) - if !isSameType(err, BucketNameInvalid{}) { - t.Fatal("BucketNameInvalid error not returned") - } - - err = obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}) - if err != nil { - t.Fatal(err) - } - - // Test with valid parameters - info, err := fs.GetBucketInfo(GlobalContext, bucketName, BucketOptions{}) - if err != nil { - t.Fatal(err) - } - if info.Name != bucketName { - t.Fatalf("wrong bucket name, expected: %s, found: %s", bucketName, info.Name) - } - - // Test with non-existent bucket - _, err = fs.GetBucketInfo(GlobalContext, "a", BucketOptions{}) - if !isSameType(err, BucketNotFound{}) { - t.Fatal("BucketNotFound error not returned") - } - - // Check for buckets and should get disk not found. - os.RemoveAll(disk) - - if _, err = fs.GetBucketInfo(GlobalContext, bucketName, BucketOptions{}); err != nil { - if !isSameType(err, BucketNotFound{}) { - t.Fatal("BucketNotFound error not returned") - } - } -} - -func TestFSPutObject(t *testing.T) { - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - bucketName := "bucket" - objectName := "1/2/3/4/object" - - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal(err) - } - - // With a regular object. - _, err := obj.PutObject(GlobalContext, bucketName+"non-existent", objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) - if err == nil { - t.Fatal("Unexpected should fail here, bucket doesn't exist") - } - if _, ok := err.(BucketNotFound); !ok { - t.Fatalf("Expected error type BucketNotFound, got %#v", err) - } - - // With a directory object. - _, err = obj.PutObject(GlobalContext, bucketName+"non-existent", objectName+SlashSeparator, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), ObjectOptions{}) - if err == nil { - t.Fatal("Unexpected should fail here, bucket doesn't exist") - } - if _, ok := err.(BucketNotFound); !ok { - t.Fatalf("Expected error type BucketNotFound, got %#v", err) - } - - _, err = obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) - if err != nil { - t.Fatal(err) - } -} - -// TestFSDeleteObject - test fs.DeleteObject() with healthy and corrupted disks -func TestFSDeleteObject(t *testing.T) { - t.Skip() - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - fs := obj.(*FSObjects) - bucketName := "bucket" - objectName := "object" - - obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}) - obj.PutObject(GlobalContext, bucketName, objectName, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), int64(len("abcd")), "", ""), ObjectOptions{}) - - // Test with invalid bucket name - if _, err := fs.DeleteObject(GlobalContext, "fo", objectName, ObjectOptions{}); !isSameType(err, BucketNameInvalid{}) { - t.Fatal("Unexpected error: ", err) - } - // Test with bucket does not exist - if _, err := fs.DeleteObject(GlobalContext, "foobucket", "fooobject", ObjectOptions{}); !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error: ", err) - } - // Test with invalid object name - if _, err := fs.DeleteObject(GlobalContext, bucketName, "\\", ObjectOptions{}); !(isSameType(err, ObjectNotFound{}) || isSameType(err, ObjectNameInvalid{})) { - t.Fatal("Unexpected error: ", err) - } - // Test with object does not exist. - if _, err := fs.DeleteObject(GlobalContext, bucketName, "foooobject", ObjectOptions{}); !isSameType(err, ObjectNotFound{}) { - t.Fatal("Unexpected error: ", err) - } - // Test with valid condition - if _, err := fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}); err != nil { - t.Fatal("Unexpected error: ", err) - } - - // Delete object should err disk not found. - os.RemoveAll(disk) - if _, err := fs.DeleteObject(GlobalContext, bucketName, objectName, ObjectOptions{}); err != nil { - if !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error: ", err) - } - } -} - -// TestFSDeleteBucket - tests for fs DeleteBucket -func TestFSDeleteBucket(t *testing.T) { - t.Skip() - // Prepare for testing - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - fs := obj.(*FSObjects) - bucketName := "bucket" - - err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}) - if err != nil { - t.Fatal("Unexpected error: ", err) - } - - // Test with an invalid bucket name - if err = fs.DeleteBucket(GlobalContext, "fo", DeleteBucketOptions{}); !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error: ", err) - } - - // Test with an inexistant bucket - if err = fs.DeleteBucket(GlobalContext, "foobucket", DeleteBucketOptions{}); !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error: ", err) - } - // Test with a valid case - if err = fs.DeleteBucket(GlobalContext, bucketName, DeleteBucketOptions{}); err != nil { - t.Fatal("Unexpected error: ", err) - } - - obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}) - - // Delete bucket should get error disk not found. - os.RemoveAll(disk) - if err = fs.DeleteBucket(GlobalContext, bucketName, DeleteBucketOptions{}); err != nil { - if !isSameType(err, BucketNotFound{}) { - t.Fatal("Unexpected error: ", err) - } - } -} - -// TestFSListBuckets - tests for fs ListBuckets -func TestFSListBuckets(t *testing.T) { - t.Skip() - // Prepare for tests - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - fs := obj.(*FSObjects) - - bucketName := "bucket" - if err := obj.MakeBucketWithLocation(GlobalContext, bucketName, MakeBucketOptions{}); err != nil { - t.Fatal("Unexpected error: ", err) - } - - // Create a bucket with invalid name - if err := os.MkdirAll(pathJoin(fs.fsPath, "vo^"), 0o777); err != nil { - t.Fatal("Unexpected error: ", err) - } - f, err := os.Create(pathJoin(fs.fsPath, "test")) - if err != nil { - t.Fatal("Unexpected error: ", err) - } - f.Close() - - // Test list buckets to have only one entry. - buckets, err := fs.ListBuckets(GlobalContext, BucketOptions{}) - if err != nil { - t.Fatal("Unexpected error: ", err) - } - if len(buckets) != 1 { - t.Fatal("ListBuckets not working properly", buckets) - } - - // Test ListBuckets with disk not found. - os.RemoveAll(disk) - if _, err := fs.ListBuckets(GlobalContext, BucketOptions{}); err != nil { - if err != errDiskNotFound { - t.Fatal("Unexpected error: ", err) - } - } -} - -// TestFSHealObject - tests for fs HealObject -func TestFSHealObject(t *testing.T) { - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - _, err := obj.HealObject(GlobalContext, "bucket", "object", "", madmin.HealOpts{}) - if err == nil || !isSameType(err, NotImplemented{}) { - t.Fatalf("Heal Object should return NotImplemented error ") - } -} - -// TestFSHealObjects - tests for fs HealObjects to return not implemented. -func TestFSHealObjects(t *testing.T) { - disk := filepath.Join(globalTestTmpDir, "minio-"+nextSuffix()) - defer os.RemoveAll(disk) - - obj := initFSObjects(disk, t) - err := obj.HealObjects(GlobalContext, "bucket", "prefix", madmin.HealOpts{}, nil) - if err == nil || !isSameType(err, NotImplemented{}) { - t.Fatalf("Heal Object should return NotImplemented error ") - } -} diff --git a/cmd/gateway-common.go b/cmd/gateway-common.go deleted file mode 100644 index b61b6de08..000000000 --- a/cmd/gateway-common.go +++ /dev/null @@ -1,426 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "net" - "net/http" - "strings" - "time" - - "github.com/minio/minio/internal/config" - "github.com/minio/minio/internal/hash" - xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/env" - xnet "github.com/minio/pkg/net" - - minio "github.com/minio/minio-go/v7" -) - -var ( - // CanonicalizeETag provides canonicalizeETag function alias. - CanonicalizeETag = canonicalizeETag - - // MustGetUUID function alias. - MustGetUUID = mustGetUUID - - // CleanMetadataKeys provides cleanMetadataKeys function alias. - CleanMetadataKeys = cleanMetadataKeys - - // PathJoin function alias. - PathJoin = pathJoin - - // ListObjects function alias. - ListObjects = listObjects - - // FilterListEntries function alias. - FilterListEntries = filterListEntries - - // IsStringEqual is string equal. - IsStringEqual = isStringEqual -) - -// FromMinioClientMetadata converts minio metadata to map[string]string -func FromMinioClientMetadata(metadata map[string][]string) map[string]string { - mm := make(map[string]string, len(metadata)) - for k, v := range metadata { - mm[http.CanonicalHeaderKey(k)] = v[0] - } - return mm -} - -// FromMinioClientObjectPart converts minio ObjectPart to PartInfo -func FromMinioClientObjectPart(op minio.ObjectPart) PartInfo { - return PartInfo{ - Size: op.Size, - ETag: canonicalizeETag(op.ETag), - LastModified: op.LastModified, - PartNumber: op.PartNumber, - } -} - -// FromMinioClientListPartsInfo converts minio ListObjectPartsResult to ListPartsInfo -func FromMinioClientListPartsInfo(lopr minio.ListObjectPartsResult) ListPartsInfo { - // Convert minio ObjectPart to PartInfo - fromMinioClientObjectParts := func(parts []minio.ObjectPart) []PartInfo { - toParts := make([]PartInfo, len(parts)) - for i, part := range parts { - toParts[i] = FromMinioClientObjectPart(part) - } - return toParts - } - - return ListPartsInfo{ - UploadID: lopr.UploadID, - Bucket: lopr.Bucket, - Object: lopr.Key, - StorageClass: "", - PartNumberMarker: lopr.PartNumberMarker, - NextPartNumberMarker: lopr.NextPartNumberMarker, - MaxParts: lopr.MaxParts, - IsTruncated: lopr.IsTruncated, - Parts: fromMinioClientObjectParts(lopr.ObjectParts), - } -} - -// FromMinioClientListMultipartsInfo converts minio ListMultipartUploadsResult to ListMultipartsInfo -func FromMinioClientListMultipartsInfo(lmur minio.ListMultipartUploadsResult) ListMultipartsInfo { - uploads := make([]MultipartInfo, len(lmur.Uploads)) - - for i, um := range lmur.Uploads { - uploads[i] = MultipartInfo{ - Object: um.Key, - UploadID: um.UploadID, - Initiated: um.Initiated, - } - } - - commonPrefixes := make([]string, len(lmur.CommonPrefixes)) - for i, cp := range lmur.CommonPrefixes { - commonPrefixes[i] = cp.Prefix - } - - return ListMultipartsInfo{ - KeyMarker: lmur.KeyMarker, - UploadIDMarker: lmur.UploadIDMarker, - NextKeyMarker: lmur.NextKeyMarker, - NextUploadIDMarker: lmur.NextUploadIDMarker, - MaxUploads: int(lmur.MaxUploads), - IsTruncated: lmur.IsTruncated, - Uploads: uploads, - Prefix: lmur.Prefix, - Delimiter: lmur.Delimiter, - CommonPrefixes: commonPrefixes, - EncodingType: lmur.EncodingType, - } -} - -// FromMinioClientObjectInfo converts minio ObjectInfo to gateway ObjectInfo -func FromMinioClientObjectInfo(bucket string, oi minio.ObjectInfo) ObjectInfo { - userDefined := FromMinioClientMetadata(oi.Metadata) - userDefined[xhttp.ContentType] = oi.ContentType - - return ObjectInfo{ - Bucket: bucket, - Name: oi.Key, - ModTime: oi.LastModified, - Size: oi.Size, - ETag: canonicalizeETag(oi.ETag), - UserDefined: userDefined, - ContentType: oi.ContentType, - ContentEncoding: oi.Metadata.Get(xhttp.ContentEncoding), - StorageClass: oi.StorageClass, - Expires: oi.Expires, - } -} - -// FromMinioClientListBucketV2Result converts minio ListBucketResult to ListObjectsInfo -func FromMinioClientListBucketV2Result(bucket string, result minio.ListBucketV2Result) ListObjectsV2Info { - objects := make([]ObjectInfo, len(result.Contents)) - - for i, oi := range result.Contents { - objects[i] = FromMinioClientObjectInfo(bucket, oi) - } - - prefixes := make([]string, len(result.CommonPrefixes)) - for i, p := range result.CommonPrefixes { - prefixes[i] = p.Prefix - } - - return ListObjectsV2Info{ - IsTruncated: result.IsTruncated, - Prefixes: prefixes, - Objects: objects, - - ContinuationToken: result.ContinuationToken, - NextContinuationToken: result.NextContinuationToken, - } -} - -// FromMinioClientListBucketResult converts minio ListBucketResult to ListObjectsInfo -func FromMinioClientListBucketResult(bucket string, result minio.ListBucketResult) ListObjectsInfo { - objects := make([]ObjectInfo, len(result.Contents)) - - for i, oi := range result.Contents { - objects[i] = FromMinioClientObjectInfo(bucket, oi) - } - - prefixes := make([]string, len(result.CommonPrefixes)) - for i, p := range result.CommonPrefixes { - prefixes[i] = p.Prefix - } - - return ListObjectsInfo{ - IsTruncated: result.IsTruncated, - NextMarker: result.NextMarker, - Prefixes: prefixes, - Objects: objects, - } -} - -// FromMinioClientListBucketResultToV2Info converts minio ListBucketResult to ListObjectsV2Info -func FromMinioClientListBucketResultToV2Info(bucket string, result minio.ListBucketResult) ListObjectsV2Info { - objects := make([]ObjectInfo, len(result.Contents)) - - for i, oi := range result.Contents { - objects[i] = FromMinioClientObjectInfo(bucket, oi) - } - - prefixes := make([]string, len(result.CommonPrefixes)) - for i, p := range result.CommonPrefixes { - prefixes[i] = p.Prefix - } - - return ListObjectsV2Info{ - IsTruncated: result.IsTruncated, - Prefixes: prefixes, - Objects: objects, - ContinuationToken: result.Marker, - NextContinuationToken: result.NextMarker, - } -} - -// ToMinioClientObjectInfoMetadata convertes metadata to map[string][]string -func ToMinioClientObjectInfoMetadata(metadata map[string]string) map[string][]string { - mm := make(map[string][]string, len(metadata)) - for k, v := range metadata { - mm[http.CanonicalHeaderKey(k)] = []string{v} - } - return mm -} - -// ToMinioClientMetadata converts metadata to map[string]string -func ToMinioClientMetadata(metadata map[string]string) map[string]string { - mm := make(map[string]string, len(metadata)) - for k, v := range metadata { - mm[http.CanonicalHeaderKey(k)] = v - } - return mm -} - -// ToMinioClientCompletePart converts CompletePart to minio CompletePart -func ToMinioClientCompletePart(part CompletePart) minio.CompletePart { - return minio.CompletePart{ - ETag: part.ETag, - PartNumber: part.PartNumber, - } -} - -// ToMinioClientCompleteParts converts []CompletePart to minio []CompletePart -func ToMinioClientCompleteParts(parts []CompletePart) []minio.CompletePart { - mparts := make([]minio.CompletePart, len(parts)) - for i, part := range parts { - mparts[i] = ToMinioClientCompletePart(part) - } - return mparts -} - -// IsBackendOnline - verifies if the backend is reachable -// by performing a GET request on the URL. returns 'true' -// if backend is reachable. -func IsBackendOnline(ctx context.Context, host string) bool { - var d net.Dialer - - ctx, cancel := context.WithTimeout(ctx, 1*time.Second) - defer cancel() - - conn, err := d.DialContext(ctx, "tcp", host) - if err != nil { - return false - } - - conn.Close() - return true -} - -// ErrorRespToObjectError converts MinIO errors to minio object layer errors. -func ErrorRespToObjectError(err error, params ...string) error { - if err == nil { - return nil - } - - bucket := "" - object := "" - if len(params) >= 1 { - bucket = params[0] - } - if len(params) == 2 { - object = params[1] - } - - if xnet.IsNetworkOrHostDown(err, false) { - return BackendDown{Err: err.Error()} - } - - minioErr, ok := err.(minio.ErrorResponse) - if !ok { - // We don't interpret non MinIO errors. As minio errors will - // have StatusCode to help to convert to object errors. - return err - } - - switch minioErr.Code { - case "PreconditionFailed": - err = PreConditionFailed{} - case "InvalidRange": - err = InvalidRange{} - case "BucketAlreadyOwnedByYou": - err = BucketAlreadyOwnedByYou{} - case "BucketNotEmpty": - err = BucketNotEmpty{} - case "NoSuchBucketPolicy": - err = BucketPolicyNotFound{} - case "NoSuchLifecycleConfiguration": - err = BucketLifecycleNotFound{} - case "InvalidBucketName": - err = BucketNameInvalid{Bucket: bucket} - case "InvalidPart": - err = InvalidPart{} - case "NoSuchBucket": - err = BucketNotFound{Bucket: bucket} - case "NoSuchKey": - if object != "" { - err = ObjectNotFound{Bucket: bucket, Object: object} - } else { - err = BucketNotFound{Bucket: bucket} - } - case "XMinioInvalidObjectName": - err = ObjectNameInvalid{} - case "AccessDenied": - err = PrefixAccessDenied{ - Bucket: bucket, - Object: object, - } - case "XAmzContentSHA256Mismatch": - err = hash.SHA256Mismatch{} - case "NoSuchUpload": - err = InvalidUploadID{} - case "EntityTooSmall": - err = PartTooSmall{} - } - - switch minioErr.StatusCode { - case http.StatusMethodNotAllowed: - err = toObjectErr(errMethodNotAllowed, bucket, object) - case http.StatusBadGateway: - return BackendDown{Err: err.Error()} - } - return err -} - -// ComputeCompleteMultipartMD5 calculates MD5 ETag for complete multipart responses -func ComputeCompleteMultipartMD5(parts []CompletePart) string { - return getCompleteMultipartMD5(parts) -} - -// parse gateway sse env variable -func parseGatewaySSE(s string) (gatewaySSE, error) { - l := strings.Split(s, ";") - var gwSlice gatewaySSE - for _, val := range l { - v := strings.ToUpper(val) - switch v { - case "": - continue - case gatewaySSES3: - fallthrough - case gatewaySSEC: - gwSlice = append(gwSlice, v) - continue - default: - return nil, config.ErrInvalidGWSSEValue(nil).Msg("gateway SSE cannot be (%s) ", v) - } - } - return gwSlice, nil -} - -// handle gateway env vars -func gatewayHandleEnvVars() { - // Handle common env vars. - handleCommonEnvVars() - - if !globalActiveCred.IsValid() { - logger.Fatal(config.ErrInvalidCredentials(nil), - "Unable to validate credentials inherited from the shell environment") - } - - gwsseVal := env.Get("MINIO_GATEWAY_SSE", "") - if gwsseVal != "" { - var err error - GlobalGatewaySSE, err = parseGatewaySSE(gwsseVal) - if err != nil { - logger.Fatal(err, "Unable to parse MINIO_GATEWAY_SSE value (`%s`)", gwsseVal) - } - } -} - -// shouldMeterRequest checks whether incoming request should be added to prometheus gateway metrics -func shouldMeterRequest(req *http.Request) bool { - return req.URL != nil && !strings.HasPrefix(req.URL.Path, minioReservedBucketPath+slashSeparator) -} - -// MetricsTransport is a custom wrapper around Transport to track metrics -type MetricsTransport struct { - Transport *http.Transport - Metrics *BackendMetrics -} - -// RoundTrip implements the RoundTrip method for MetricsTransport -func (m MetricsTransport) RoundTrip(r *http.Request) (*http.Response, error) { - metered := shouldMeterRequest(r) - if metered && (r.Method == http.MethodPost || r.Method == http.MethodPut) { - m.Metrics.IncRequests(r.Method) - if r.ContentLength > 0 { - m.Metrics.IncBytesSent(uint64(r.ContentLength)) - } - } - // Make the request to the server. - resp, err := m.Transport.RoundTrip(r) - if err != nil { - return nil, err - } - if metered && (r.Method == http.MethodGet || r.Method == http.MethodHead) { - m.Metrics.IncRequests(r.Method) - if resp.ContentLength > 0 { - m.Metrics.IncBytesReceived(uint64(resp.ContentLength)) - } - } - return resp, nil -} diff --git a/cmd/gateway-common_test.go b/cmd/gateway-common_test.go deleted file mode 100644 index 4e6b821cf..000000000 --- a/cmd/gateway-common_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "reflect" - "testing" -) - -// Tests cache exclude parsing. -func TestParseGatewaySSE(t *testing.T) { - testCases := []struct { - gwSSEStr string - expected gatewaySSE - success bool - }{ - // valid input - {"c;S3", []string{"C", "S3"}, true}, - {"S3", []string{"S3"}, true}, - {"c,S3", []string{}, false}, - {"c;S3;KMS", []string{}, false}, - {"C;s3", []string{"C", "S3"}, true}, - } - - for i, testCase := range testCases { - gwSSE, err := parseGatewaySSE(testCase.gwSSEStr) - if err != nil && testCase.success { - t.Errorf("Test %d: Expected success but failed instead %s", i+1, err) - } - if err == nil && !testCase.success { - t.Errorf("Test %d: Expected failure but passed instead", i+1) - } - if err == nil { - if !reflect.DeepEqual(gwSSE, testCase.expected) { - t.Errorf("Test %d: Expected %v, got %v", i+1, testCase.expected, gwSSE) - } - } - } -} diff --git a/cmd/gateway-env.go b/cmd/gateway-env.go deleted file mode 100644 index 02e964515..000000000 --- a/cmd/gateway-env.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -type gatewaySSE []string - -const ( - // GatewaySSES3 is set when SSE-S3 encryption needed on both gateway and backend - gatewaySSES3 = "S3" - // GatewaySSEC is set when SSE-C encryption needed on both gateway and backend - gatewaySSEC = "C" -) - -func (sse gatewaySSE) SSES3() bool { - for _, v := range sse { - if v == gatewaySSES3 { - return true - } - } - return false -} - -func (sse gatewaySSE) SSEC() bool { - for _, v := range sse { - if v == gatewaySSEC { - return true - } - } - return false -} - -func (sse gatewaySSE) IsSet() bool { - return sse.SSES3() || sse.SSEC() -} diff --git a/cmd/gateway-interface.go b/cmd/gateway-interface.go deleted file mode 100644 index 621c3fa1b..000000000 --- a/cmd/gateway-interface.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import "github.com/minio/madmin-go" - -// Gateway name backends -const ( - NASBackendGateway = "nas" - S3BackendGateway = "s3" -) - -// Gateway represents a gateway backend. -type Gateway interface { - // Name returns the unique name of the gateway. - Name() string - - // NewGatewayLayer returns a new ObjectLayer. - NewGatewayLayer(creds madmin.Credentials) (ObjectLayer, error) -} diff --git a/cmd/gateway-main.go b/cmd/gateway-main.go deleted file mode 100644 index 01767e1f5..000000000 --- a/cmd/gateway-main.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "errors" - "fmt" - "io" - "log" - "net/url" - "os" - "os/signal" - "strings" - "syscall" - - "github.com/gorilla/mux" - "github.com/minio/cli" - "github.com/minio/madmin-go" - "github.com/minio/minio/internal/config" - xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/certs" - "github.com/minio/pkg/env" -) - -var gatewayCmd = cli.Command{ - Name: "gateway", - Usage: "start object storage gateway", - Flags: append(ServerFlags, GlobalFlags...), - HideHelpCommand: true, -} - -// GatewayLocker implements custom NewNSLock implementation -type GatewayLocker struct { - ObjectLayer - nsMutex *nsLockMap -} - -// NewNSLock - implements gateway level locker -func (l *GatewayLocker) NewNSLock(bucket string, objects ...string) RWLocker { - return l.nsMutex.NewNSLock(nil, bucket, objects...) -} - -// Walk - implements common gateway level Walker, to walk on all objects recursively at a prefix -func (l *GatewayLocker) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error { - walk := func(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo) error { - go func() { - // Make sure the results channel is ready to be read when we're done. - defer close(results) - - var marker string - - for { - // set maxKeys to '0' to list maximum possible objects in single call. - loi, err := l.ObjectLayer.ListObjects(ctx, bucket, prefix, marker, "", 0) - if err != nil { - logger.LogIf(ctx, err) - return - } - marker = loi.NextMarker - for _, obj := range loi.Objects { - select { - case results <- obj: - case <-ctx.Done(): - return - } - } - if !loi.IsTruncated { - break - } - } - }() - return nil - } - - if err := l.ObjectLayer.Walk(ctx, bucket, prefix, results, opts); err != nil { - if _, ok := err.(NotImplemented); ok { - return walk(ctx, bucket, prefix, results) - } - return err - } - - return nil -} - -// NewGatewayLayerWithLocker - initialize gateway with locker. -func NewGatewayLayerWithLocker(gwLayer ObjectLayer) ObjectLayer { - return &GatewayLocker{ObjectLayer: gwLayer, nsMutex: newNSLock(false)} -} - -// RegisterGatewayCommand registers a new command for gateway. -func RegisterGatewayCommand(cmd cli.Command) error { - cmd.Flags = append(append(cmd.Flags, ServerFlags...), GlobalFlags...) - gatewayCmd.Subcommands = append(gatewayCmd.Subcommands, cmd) - return nil -} - -// ParseGatewayEndpoint - Return endpoint. -func ParseGatewayEndpoint(arg string) (endPoint string, secure bool, err error) { - schemeSpecified := len(strings.Split(arg, "://")) > 1 - if !schemeSpecified { - // Default connection will be "secure". - arg = "https://" + arg - } - - u, err := url.Parse(arg) - if err != nil { - return "", false, err - } - - switch u.Scheme { - case "http": - return u.Host, false, nil - case "https": - return u.Host, true, nil - default: - return "", false, fmt.Errorf("Unrecognized scheme %s", u.Scheme) - } -} - -// ValidateGatewayArguments - Validate gateway arguments. -func ValidateGatewayArguments(serverAddr, endpointAddr string) error { - if err := CheckLocalServerAddr(serverAddr); err != nil { - return err - } - - if endpointAddr != "" { - // Reject the endpoint if it points to the gateway handler itself. - sameTarget, err := sameLocalAddrs(endpointAddr, serverAddr) - if err != nil { - return err - } - if sameTarget { - return fmt.Errorf("endpoint points to the local gateway") - } - } - return nil -} - -// StartGateway - handler for 'minio gateway '. -func StartGateway(ctx *cli.Context, gw Gateway) { - signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) - - go handleSignals() - - signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT) - // This is only to uniquely identify each gateway deployments. - globalDeploymentID = env.Get("MINIO_GATEWAY_DEPLOYMENT_ID", mustGetUUID()) - xhttp.SetDeploymentID(globalDeploymentID) - - if gw == nil { - logger.FatalIf(errUnexpected, "Gateway implementation not initialized") - } - - // Validate if we have access, secret set through environment. - globalGatewayName = gw.Name() - gatewayName := gw.Name() - if ctx.Args().First() == "help" { - cli.ShowCommandHelpAndExit(ctx, gatewayName, 1) - } - - // Initialize globalConsoleSys system - globalConsoleSys = NewConsoleLogger(GlobalContext) - logger.AddSystemTarget(globalConsoleSys) - - // Handle common command args. - handleCommonCmdArgs(ctx) - - // Check and load TLS certificates. - var err error - globalPublicCerts, globalTLSCerts, globalIsTLS, err = getTLSConfig() - logger.FatalIf(err, "Invalid TLS certificate file") - - // Check and load Root CAs. - globalRootCAs, err = certs.GetRootCAs(globalCertsCADir.Get()) - logger.FatalIf(err, "Failed to read root CAs (%v)", err) - - // Add the global public crts as part of global root CAs - for _, publicCrt := range globalPublicCerts { - globalRootCAs.AddCert(publicCrt) - } - - // Register root CAs for remote ENVs - env.RegisterGlobalCAs(globalRootCAs) - - // Initialize all help - initHelp() - - // On macOS, if a process already listens on LOCALIPADDR:PORT, net.Listen() falls back - // to IPv6 address ie minio will start listening on IPv6 address whereas another - // (non-)minio process is listening on IPv4 of given port. - // To avoid this error situation we check for port availability. - logger.FatalIf(checkPortAvailability(globalMinioHost, globalMinioPort), "Unable to start the gateway") - - // Handle gateway specific env - gatewayHandleEnvVars() - - // Initialize KMS configuration - handleKMSConfig() - - // Set system resources to maximum. - setMaxResources() - - // Set when gateway is enabled - globalIsGateway = true - - // Initialize router. `SkipClean(true)` stops gorilla/mux from - // normalizing URL path minio/minio#3256 - // avoid URL path encoding minio/minio#8950 - router := mux.NewRouter().SkipClean(true).UseEncodedPath() - - // Enable STS router if etcd is enabled. - registerSTSRouter(router) - - // Enable IAM admin APIs if etcd is enabled, if not just enable basic - // operations such as profiling, server info etc. - registerAdminRouter(router, false) - - // Add healthcheck router - registerHealthCheckRouter(router) - - // Add server metrics router - registerMetricsRouter(router) - - // Add API router. - registerAPIRouter(router) - - // Use all the middlewares - router.Use(globalHandlers...) - - var getCert certs.GetCertificateFunc - if globalTLSCerts != nil { - getCert = globalTLSCerts.GetCertificate - } - - httpServer := xhttp.NewServer(getServerListenAddrs()). - UseHandler(setCriticalErrorHandler(corsHandler(router))). - UseTLSConfig(newTLSConfig(getCert)). - UseShutdownTimeout(ctx.Duration("shutdown-timeout")). - UseBaseContext(GlobalContext). - UseCustomLogger(log.New(io.Discard, "", 0)) // Turn-off random logging by Go stdlib - - go func() { - globalHTTPServerErrorCh <- httpServer.Start(GlobalContext) - }() - - setHTTPServer(httpServer) - - newObject, err := gw.NewGatewayLayer(madmin.Credentials{ - AccessKey: globalActiveCred.AccessKey, - SecretKey: globalActiveCred.SecretKey, - }) - if err != nil { - if errors.Is(err, errFreshDisk) { - err = config.ErrInvalidFSValue(err) - } - logger.FatalIf(err, "Unable to initialize gateway backend") - } - newObject = NewGatewayLayerWithLocker(newObject) - - // Calls all New() for all sub-systems. - initAllSubsystems(GlobalContext) - - // Once endpoints are finalized, initialize the new object api in safe mode. - globalObjLayerMutex.Lock() - globalObjectAPI = newObject - globalObjLayerMutex.Unlock() - - // Initialize server config. - srvCfg := newServerConfig() - - // Override any values from ENVs. - lookupConfigs(srvCfg, newObject) - - // hold the mutex lock before a new config is assigned. - globalServerConfigMu.Lock() - globalServerConfig = srvCfg - globalServerConfigMu.Unlock() - - go globalIAMSys.Init(GlobalContext, newObject, globalEtcdClient, globalRefreshIAMInterval) - - if gatewayName == NASBackendGateway { - buckets, err := newObject.ListBuckets(GlobalContext, BucketOptions{}) - if err != nil { - logger.Fatal(err, "Unable to list buckets") - } - logger.FatalIf(globalBucketMetadataSys.Init(GlobalContext, buckets, newObject), "Unable to initialize bucket metadata") - - logger.FatalIf(globalEventNotifier.InitBucketTargets(GlobalContext, newObject), "Unable to initialize bucket targets for notification system") - } - - if globalCacheConfig.Enabled { - // initialize the new disk cache objects. - var cacheAPI CacheObjectLayer - cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig) - logger.FatalIf(err, "Unable to initialize drive caching") - - globalObjLayerMutex.Lock() - globalCacheObjectAPI = cacheAPI - globalObjLayerMutex.Unlock() - } - - // Populate existing buckets to the etcd backend - if globalDNSConfig != nil { - buckets, err := newObject.ListBuckets(GlobalContext, BucketOptions{}) - if err != nil { - logger.Fatal(err, "Unable to list buckets") - } - initFederatorBackend(buckets, newObject) - } - - // Verify if object layer supports - // - encryption - // - compression - verifyObjectLayerFeatures("gateway "+gatewayName, newObject) - - // Check for updates in non-blocking manner. - go func() { - if !globalCLIContext.Quiet && !globalInplaceUpdateDisabled { - // Check for new updates from dl.min.io. - checkUpdate(getMinioMode()) - } - }() - - if !globalCLIContext.Quiet { - // Print gateway startup message. - printGatewayStartupMessage(getAPIEndpoints(), gatewayName) - } - - if globalBrowserEnabled { - srv, err := initConsoleServer() - if err != nil { - logger.FatalIf(err, "Unable to initialize console service") - } - - setConsoleSrv(srv) - - go func() { - logger.FatalIf(newConsoleServerFn().Serve(), "Unable to initialize console server") - }() - } - - if serverDebugLog { - logger.Info("== DEBUG Mode enabled ==") - logger.Info("Currently set environment settings:") - for _, v := range os.Environ() { - logger.Info(v) - } - logger.Info("======") - } - - <-globalOSSignalCh -} diff --git a/cmd/gateway-main_test.go b/cmd/gateway-main_test.go deleted file mode 100644 index cf524aab5..000000000 --- a/cmd/gateway-main_test.go +++ /dev/null @@ -1,130 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "fmt" - "strings" - "testing" - - "github.com/minio/cli" -) - -// Test RegisterGatewayCommand -func TestRegisterGatewayCommand(t *testing.T) { - var err error - - cmd := cli.Command{Name: "test"} - err = RegisterGatewayCommand(cmd) - if err != nil { - t.Errorf("RegisterGatewayCommand got unexpected error: %s", err) - } -} - -// Test running a registered gateway command with a flag -func TestRunRegisteredGatewayCommand(t *testing.T) { - var err error - - flagName := "test-flag" - flagValue := "foo" - - cmd := cli.Command{ - Name: "test-run-with-flag", - Flags: []cli.Flag{ - cli.StringFlag{Name: flagName}, - }, - Action: func(ctx *cli.Context) { - if actual := ctx.String(flagName); actual != flagValue { - t.Errorf("value of %s expects %s, but got %s", flagName, flagValue, actual) - } - }, - } - - err = RegisterGatewayCommand(cmd) - if err != nil { - t.Errorf("RegisterGatewayCommand got unexpected error: %s", err) - } - - if err = newApp("minio").Run( - []string{"minio", "gateway", cmd.Name, fmt.Sprintf("--%s", flagName), flagValue}); err != nil { - t.Errorf("running registered gateway command got unexpected error: %s", err) - } -} - -// Test parseGatewayEndpoint -func TestParseGatewayEndpoint(t *testing.T) { - testCases := []struct { - arg string - endPoint string - secure bool - errReturned bool - }{ - {"http://127.0.0.1:9000", "127.0.0.1:9000", false, false}, - {"https://127.0.0.1:9000", "127.0.0.1:9000", true, false}, - {"http://play.min.io:9000", "play.min.io:9000", false, false}, - {"https://play.min.io:9000", "play.min.io:9000", true, false}, - {"ftp://127.0.0.1:9000", "", false, true}, - {"ftp://play.min.io:9000", "", false, true}, - {"play.min.io:9000", "play.min.io:9000", true, false}, - } - - for i, test := range testCases { - endPoint, secure, err := ParseGatewayEndpoint(test.arg) - errReturned := err != nil - - if endPoint != test.endPoint || - secure != test.secure || - errReturned != test.errReturned { - t.Errorf("Test %d: expected %s,%t,%t got %s,%t,%t", - i+1, test.endPoint, test.secure, test.errReturned, - endPoint, secure, errReturned) - } - } -} - -// Test validateGatewayArguments -func TestValidateGatewayArguments(t *testing.T) { - nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool { - return !strings.HasPrefix(ip, "127.") - }, "") - if len(nonLoopBackIPs) == 0 { - t.Fatalf("No non-loop back IP address found for this host") - } - nonLoopBackIP := nonLoopBackIPs.ToSlice()[0] - - testCases := []struct { - serverAddr string - endpointAddr string - valid bool - }{ - {":9000", "http://localhost:9001", true}, - {":9000", "http://google.com", true}, - {"123.123.123.123:9000", "http://localhost:9000", false}, - {":9000", "http://localhost:9000", false}, - {":9000", nonLoopBackIP + ":9000", false}, - } - for i, test := range testCases { - err := ValidateGatewayArguments(test.serverAddr, test.endpointAddr) - if test.valid && err != nil { - t.Errorf("Test %d expected not to return error but got %s", i+1, err) - } - if !test.valid && err == nil { - t.Errorf("Test %d expected to fail but it did not", i+1) - } - } -} diff --git a/cmd/gateway-metrics.go b/cmd/gateway-metrics.go deleted file mode 100644 index bea531e53..000000000 --- a/cmd/gateway-metrics.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "net/http" - "sync/atomic" -) - -// RequestStats - counts for Get and Head requests -type RequestStats struct { - Get uint64 `json:"Get"` - Head uint64 `json:"Head"` - Put uint64 `json:"Put"` - Post uint64 `json:"Post"` -} - -// IncBytesReceived - Increase total bytes received from gateway backend -func (s *BackendMetrics) IncBytesReceived(n uint64) { - atomic.AddUint64(&s.bytesReceived, n) -} - -// GetBytesReceived - Get total bytes received from gateway backend -func (s *BackendMetrics) GetBytesReceived() uint64 { - return atomic.LoadUint64(&s.bytesReceived) -} - -// IncBytesSent - Increase total bytes sent to gateway backend -func (s *BackendMetrics) IncBytesSent(n uint64) { - atomic.AddUint64(&s.bytesSent, n) -} - -// GetBytesSent - Get total bytes received from gateway backend -func (s *BackendMetrics) GetBytesSent() uint64 { - return atomic.LoadUint64(&s.bytesSent) -} - -// IncRequests - Increase request count sent to gateway backend by 1 -func (s *BackendMetrics) IncRequests(method string) { - // Only increment for Head & Get requests, else no op - if method == http.MethodGet { - atomic.AddUint64(&s.requestStats.Get, 1) - } else if method == http.MethodHead { - atomic.AddUint64(&s.requestStats.Head, 1) - } else if method == http.MethodPut { - atomic.AddUint64(&s.requestStats.Put, 1) - } else if method == http.MethodPost { - atomic.AddUint64(&s.requestStats.Post, 1) - } -} - -// GetRequests - Get total number of Get & Headrequests sent to gateway backend -func (s *BackendMetrics) GetRequests() RequestStats { - return s.requestStats -} - -// NewMetrics - Prepare new BackendMetrics structure -func NewMetrics() *BackendMetrics { - return &BackendMetrics{} -} diff --git a/cmd/gateway-startup-msg.go b/cmd/gateway-startup-msg.go deleted file mode 100644 index 8075a0841..000000000 --- a/cmd/gateway-startup-msg.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "fmt" - "strings" - - "github.com/minio/minio/internal/color" - "github.com/minio/minio/internal/logger" -) - -// Prints the formatted startup message. -func printGatewayStartupMessage(apiEndPoints []string, backendType string) { - if len(globalSubnetConfig.APIKey) == 0 { - var builder strings.Builder - startupBanner(&builder) - logger.Info("\n" + builder.String()) - } - - strippedAPIEndpoints := stripStandardPorts(apiEndPoints, globalMinioHost) - // If cache layer is enabled, print cache capacity. - cacheAPI := newCachedObjectLayerFn() - if cacheAPI != nil { - printCacheStorageInfo(cacheAPI.StorageInfo(GlobalContext)) - } - // Prints credential. - printGatewayCommonMsg(strippedAPIEndpoints) - - // Prints `mc` cli configuration message chooses - // first endpoint as default. - printCLIAccessMsg(strippedAPIEndpoints[0], fmt.Sprintf("my%s", backendType)) - - // Prints documentation message. - printObjectAPIMsg() -} - -// Prints common server startup message. Prints credential, region and browser access. -func printGatewayCommonMsg(apiEndpoints []string) { - // Get saved credentials. - cred := globalActiveCred - - apiEndpointStr := strings.Join(apiEndpoints, " ") - - // Colorize the message and print. - logger.Info(color.Blue("API: ") + color.Bold(fmt.Sprintf("%s ", apiEndpointStr))) - if color.IsTerminal() && !globalCLIContext.Anonymous { - logger.Info(color.Blue("RootUser: ") + color.Bold(fmt.Sprintf("%s ", cred.AccessKey))) - logger.Info(color.Blue("RootPass: ") + color.Bold(fmt.Sprintf("%s ", cred.SecretKey))) - } - printEventNotifiers() - - if globalBrowserEnabled { - consoleEndpointStr := strings.Join(stripStandardPorts(getConsoleEndpoints(), globalMinioConsoleHost), " ") - logger.Info(color.Blue("\nConsole: ") + color.Bold(fmt.Sprintf("%s ", consoleEndpointStr))) - if color.IsTerminal() && !globalCLIContext.Anonymous { - logger.Info(color.Blue("RootUser: ") + color.Bold(fmt.Sprintf("%s ", cred.AccessKey))) - logger.Info(color.Blue("RootPass: ") + color.Bold(fmt.Sprintf("%s ", cred.SecretKey))) - } - } -} diff --git a/cmd/gateway-startup-msg_test.go b/cmd/gateway-startup-msg_test.go deleted file mode 100644 index 7e0bde1eb..000000000 --- a/cmd/gateway-startup-msg_test.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "os" - "testing" -) - -// Test printing Gateway common message. -func TestPrintGatewayCommonMessage(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - obj, fsDir, err := prepareFS(ctx) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(fsDir) - if err = newTestConfig(globalMinioDefaultRegion, obj); err != nil { - t.Fatal(err) - } - - apiEndpoints := []string{"http://127.0.0.1:9000"} - printGatewayCommonMsg(apiEndpoints) -} - -// Test print gateway startup message. -func TestPrintGatewayStartupMessage(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - obj, fsDir, err := prepareFS(ctx) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(fsDir) - if err = newTestConfig(globalMinioDefaultRegion, obj); err != nil { - t.Fatal(err) - } - - apiEndpoints := []string{"http://127.0.0.1:9000"} - printGatewayStartupMessage(apiEndpoints, "s3") -} diff --git a/cmd/gateway-unsupported.go b/cmd/gateway-unsupported.go deleted file mode 100644 index 6367bbea5..000000000 --- a/cmd/gateway-unsupported.go +++ /dev/null @@ -1,286 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "errors" - - "github.com/minio/minio/internal/logger" - - "github.com/minio/minio-go/v7/pkg/tags" - bucketsse "github.com/minio/minio/internal/bucket/encryption" - "github.com/minio/minio/internal/bucket/lifecycle" - "github.com/minio/minio/internal/bucket/versioning" - "github.com/minio/pkg/bucket/policy" - - "github.com/minio/madmin-go" -) - -// GatewayUnsupported list of unsupported call stubs for gateway. -type GatewayUnsupported struct{} - -// BackendInfo returns the underlying backend information -func (a GatewayUnsupported) BackendInfo() madmin.BackendInfo { - return madmin.BackendInfo{Type: madmin.Gateway} -} - -// LocalStorageInfo returns the local disks information, mainly used -// in prometheus - for gateway this just a no-op -func (a GatewayUnsupported) LocalStorageInfo(ctx context.Context) (StorageInfo, []error) { - logger.CriticalIf(ctx, errors.New("not implemented")) - return StorageInfo{}, nil -} - -// NSScanner - scanner is not implemented for gateway -func (a GatewayUnsupported) NSScanner(ctx context.Context, bf *bloomFilter, updates chan<- DataUsageInfo, wantCycle uint32, scanMode madmin.HealScanMode) error { - logger.CriticalIf(ctx, errors.New("not implemented")) - return NotImplemented{} -} - -// PutObjectMetadata - not implemented for gateway. -func (a GatewayUnsupported) PutObjectMetadata(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { - logger.CriticalIf(ctx, errors.New("not implemented")) - return ObjectInfo{}, NotImplemented{} -} - -// NewNSLock is a dummy stub for gateway. -func (a GatewayUnsupported) NewNSLock(bucket string, objects ...string) RWLocker { - logger.CriticalIf(context.Background(), errors.New("not implemented")) - return nil -} - -// SetDriveCounts no-op -func (a GatewayUnsupported) SetDriveCounts() []int { - return nil -} - -// ListMultipartUploads lists all multipart uploads. -func (a GatewayUnsupported) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) { - return lmi, NotImplemented{} -} - -// NewMultipartUpload upload object in multiple parts -func (a GatewayUnsupported) NewMultipartUpload(ctx context.Context, bucket string, object string, opts ObjectOptions) (uploadID string, err error) { - return "", NotImplemented{} -} - -// CopyObjectPart copy part of object to uploadID for another object -func (a GatewayUnsupported) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, partID int, startOffset, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, err error) { - return pi, NotImplemented{} -} - -// PutObjectPart puts a part of object in bucket -func (a GatewayUnsupported) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) { - logger.LogIf(ctx, NotImplemented{}) - return pi, NotImplemented{} -} - -// GetMultipartInfo returns metadata associated with the uploadId -func (a GatewayUnsupported) GetMultipartInfo(ctx context.Context, bucket string, object string, uploadID string, opts ObjectOptions) (MultipartInfo, error) { - logger.LogIf(ctx, NotImplemented{}) - return MultipartInfo{}, NotImplemented{} -} - -// ListObjectVersions returns all object parts for specified object in specified bucket -func (a GatewayUnsupported) ListObjectVersions(ctx context.Context, bucket, prefix, marker, versionMarker, delimiter string, maxKeys int) (ListObjectVersionsInfo, error) { - logger.LogIf(ctx, NotImplemented{}) - return ListObjectVersionsInfo{}, NotImplemented{} -} - -// ListObjectParts returns all object parts for specified object in specified bucket -func (a GatewayUnsupported) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts ObjectOptions) (lpi ListPartsInfo, err error) { - logger.LogIf(ctx, NotImplemented{}) - return lpi, NotImplemented{} -} - -// AbortMultipartUpload aborts a ongoing multipart upload -func (a GatewayUnsupported) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, opts ObjectOptions) error { - return NotImplemented{} -} - -// CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (a GatewayUnsupported) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (oi ObjectInfo, err error) { - logger.LogIf(ctx, NotImplemented{}) - return oi, NotImplemented{} -} - -// SetBucketPolicy sets policy on bucket -func (a GatewayUnsupported) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { - logger.LogIf(ctx, NotImplemented{}) - return NotImplemented{} -} - -// GetBucketPolicy will get policy on bucket -func (a GatewayUnsupported) GetBucketPolicy(ctx context.Context, bucket string) (bucketPolicy *policy.Policy, err error) { - return nil, NotImplemented{} -} - -// DeleteBucketPolicy deletes all policies on bucket -func (a GatewayUnsupported) DeleteBucketPolicy(ctx context.Context, bucket string) error { - return NotImplemented{} -} - -// SetBucketVersioning enables versioning on a bucket. -func (a GatewayUnsupported) SetBucketVersioning(ctx context.Context, bucket string, v *versioning.Versioning) error { - logger.LogIf(ctx, NotImplemented{}) - return NotImplemented{} -} - -// GetBucketVersioning retrieves versioning configuration of a bucket. -func (a GatewayUnsupported) GetBucketVersioning(ctx context.Context, bucket string) (*versioning.Versioning, error) { - logger.LogIf(ctx, NotImplemented{}) - return nil, NotImplemented{} -} - -// SetBucketLifecycle enables lifecycle policies on a bucket. -func (a GatewayUnsupported) SetBucketLifecycle(ctx context.Context, bucket string, lifecycle *lifecycle.Lifecycle) error { - logger.LogIf(ctx, NotImplemented{}) - return NotImplemented{} -} - -// GetBucketLifecycle retrieves lifecycle configuration of a bucket. -func (a GatewayUnsupported) GetBucketLifecycle(ctx context.Context, bucket string) (*lifecycle.Lifecycle, error) { - return nil, NotImplemented{} -} - -// DeleteBucketLifecycle deletes all lifecycle policies on a bucket -func (a GatewayUnsupported) DeleteBucketLifecycle(ctx context.Context, bucket string) error { - return NotImplemented{} -} - -// GetBucketSSEConfig returns bucket encryption config on a bucket -func (a GatewayUnsupported) GetBucketSSEConfig(ctx context.Context, bucket string) (*bucketsse.BucketSSEConfig, error) { - return nil, NotImplemented{} -} - -// SetBucketSSEConfig sets bucket encryption config on a bucket -func (a GatewayUnsupported) SetBucketSSEConfig(ctx context.Context, bucket string, config *bucketsse.BucketSSEConfig) error { - return NotImplemented{} -} - -// DeleteBucketSSEConfig deletes bucket encryption config on a bucket -func (a GatewayUnsupported) DeleteBucketSSEConfig(ctx context.Context, bucket string) error { - return NotImplemented{} -} - -// HealFormat - Not implemented stub -func (a GatewayUnsupported) HealFormat(ctx context.Context, dryRun bool) (madmin.HealResultItem, error) { - return madmin.HealResultItem{}, NotImplemented{} -} - -// HealBucket - Not implemented stub -func (a GatewayUnsupported) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) { - return madmin.HealResultItem{}, NotImplemented{} -} - -// HealObject - Not implemented stub -func (a GatewayUnsupported) HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (h madmin.HealResultItem, e error) { - return h, NotImplemented{} -} - -// ListObjectsV2 - Not implemented stub -func (a GatewayUnsupported) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (result ListObjectsV2Info, err error) { - return result, NotImplemented{} -} - -// Walk - Not implemented stub -func (a GatewayUnsupported) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts ObjectOptions) error { - return NotImplemented{} -} - -// HealObjects - Not implemented stub -func (a GatewayUnsupported) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) (e error) { - return NotImplemented{} -} - -// CopyObject copies a blob from source container to destination container. -func (a GatewayUnsupported) CopyObject(ctx context.Context, srcBucket string, srcObject string, destBucket string, destObject string, - srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions, -) (objInfo ObjectInfo, err error) { - return objInfo, NotImplemented{} -} - -// GetMetrics - no op -func (a GatewayUnsupported) GetMetrics(ctx context.Context) (*BackendMetrics, error) { - logger.LogIf(ctx, NotImplemented{}) - return &BackendMetrics{}, NotImplemented{} -} - -// PutObjectTags - not implemented. -func (a GatewayUnsupported) PutObjectTags(ctx context.Context, bucket, object string, tags string, opts ObjectOptions) (ObjectInfo, error) { - logger.LogIf(ctx, NotImplemented{}) - return ObjectInfo{}, NotImplemented{} -} - -// GetObjectTags - not implemented. -func (a GatewayUnsupported) GetObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (*tags.Tags, error) { - logger.LogIf(ctx, NotImplemented{}) - return nil, NotImplemented{} -} - -// DeleteObjectTags - not implemented. -func (a GatewayUnsupported) DeleteObjectTags(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) { - logger.LogIf(ctx, NotImplemented{}) - return ObjectInfo{}, NotImplemented{} -} - -// IsNotificationSupported returns whether bucket notification is applicable for this layer. -func (a GatewayUnsupported) IsNotificationSupported() bool { - return false -} - -// IsListenSupported returns whether listen bucket notification is applicable for this layer. -func (a GatewayUnsupported) IsListenSupported() bool { - return false -} - -// IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (a GatewayUnsupported) IsEncryptionSupported() bool { - return false -} - -// IsTaggingSupported returns whether object tagging is supported or not for this layer. -func (a GatewayUnsupported) IsTaggingSupported() bool { - return false -} - -// IsCompressionSupported returns whether compression is applicable for this layer. -func (a GatewayUnsupported) IsCompressionSupported() bool { - return false -} - -// Health - No Op. -func (a GatewayUnsupported) Health(_ context.Context, _ HealthOptions) HealthResult { - return HealthResult{} -} - -// ReadHealth - No Op. -func (a GatewayUnsupported) ReadHealth(_ context.Context) bool { - return true -} - -// TransitionObject - transition object content to target tier. -func (a GatewayUnsupported) TransitionObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { - return NotImplemented{} -} - -// RestoreTransitionedObject - restore transitioned object content locally on this cluster. -func (a GatewayUnsupported) RestoreTransitionedObject(ctx context.Context, bucket, object string, opts ObjectOptions) error { - return NotImplemented{} -} diff --git a/cmd/gateway/gateway.go b/cmd/gateway/gateway.go deleted file mode 100644 index 72a382dbd..000000000 --- a/cmd/gateway/gateway.go +++ /dev/null @@ -1,30 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package gateway - -import ( - // Import all gateways please keep the order - - // NAS - _ "github.com/minio/minio/cmd/gateway/nas" - - // S3 - _ "github.com/minio/minio/cmd/gateway/s3" - // gateway functionality is frozen, no new gateways are being implemented - // or considered for upstream inclusion at this point in time. if needed - // please keep a fork of the project. -) diff --git a/cmd/gateway/nas/gateway-nas.go b/cmd/gateway/nas/gateway-nas.go deleted file mode 100644 index f41e09c2e..000000000 --- a/cmd/gateway/nas/gateway-nas.go +++ /dev/null @@ -1,116 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package nas - -import ( - "context" - - "github.com/minio/cli" - "github.com/minio/madmin-go" - minio "github.com/minio/minio/cmd" -) - -func init() { - const nasGatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} PATH -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -PATH: - path to NAS mount point - -EXAMPLES: - 1. Start minio gateway server for NAS backend - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.HelpName}} /shared/nasvol - - 2. Start minio gateway server for NAS with edge caching enabled - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - {{.Prompt}} {{.HelpName}} /shared/nasvol -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: minio.NASBackendGateway, - Usage: "Network-attached storage (NAS)", - Action: nasGatewayMain, - CustomHelpTemplate: nasGatewayTemplate, - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway nas' command line. -func nasGatewayMain(ctx *cli.Context) { - // Validate gateway arguments. - if !ctx.Args().Present() || ctx.Args().First() == "help" { - cli.ShowCommandHelpAndExit(ctx, minio.NASBackendGateway, 1) - } - - minio.StartGateway(ctx, &NAS{ctx.Args().First()}) -} - -// NAS implements Gateway. -type NAS struct { - path string -} - -// Name implements Gateway interface. -func (g *NAS) Name() string { - return minio.NASBackendGateway -} - -// NewGatewayLayer returns nas gatewaylayer. -func (g *NAS) NewGatewayLayer(creds madmin.Credentials) (minio.ObjectLayer, error) { - var err error - newObject, err := minio.NewFSObjectLayer(minio.GlobalContext, g.path) - if err != nil { - return nil, err - } - return &nasObjects{newObject}, nil -} - -// IsListenSupported returns whether listen bucket notification is applicable for this gateway. -func (n *nasObjects) IsListenSupported() bool { - return false -} - -func (n *nasObjects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) { - si, errs := n.ObjectLayer.StorageInfo(ctx) - si.Backend.GatewayOnline = si.Backend.Type == madmin.FS - si.Backend.Type = madmin.Gateway - return si, errs -} - -// nasObjects implements gateway for MinIO and S3 compatible object storage servers. -type nasObjects struct { - minio.ObjectLayer -} - -func (n *nasObjects) IsTaggingSupported() bool { - return true -} diff --git a/cmd/gateway/s3/gateway-s3-chain.go b/cmd/gateway/s3/gateway-s3-chain.go deleted file mode 100644 index daacd349a..000000000 --- a/cmd/gateway/s3/gateway-s3-chain.go +++ /dev/null @@ -1,101 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "fmt" - "reflect" - - "github.com/minio/minio-go/v7/pkg/credentials" -) - -// A Chain will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The Chain provides a way of chaining multiple providers together -// which will pick the first available using priority order of the -// Providers in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the no credentials value. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again after IsExpired() is true. -// -// creds := credentials.NewChainCredentials( -// []credentials.Provider{ -// &credentials.EnvAWSS3{}, -// &credentials.EnvMinio{}, -// }) -// -// // Usage of ChainCredentials. -// mc, err := minio.NewWithCredentials(endpoint, creds, secure, "us-east-1") -// if err != nil { -// log.Fatalln(err) -// } -type Chain struct { - Providers []credentials.Provider - curr credentials.Provider -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []credentials.Provider) *credentials.Credentials { - for _, p := range providers { - if p == nil { - panic("providers cannot be uninitialized") - } - } - return credentials.New(&Chain{ - Providers: append([]credentials.Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value, returns no credentials(anonymous) -// if no credentials provider returned any value. -// -// If a provider is found with credentials, it will be cached and any calls -// to IsExpired() will return the expired state of the cached provider. -func (c *Chain) Retrieve() (credentials.Value, error) { - for _, p := range c.Providers { - creds, _ := p.Retrieve() - if creds.AccessKeyID != "" && !p.IsExpired() { - // Only return credentials that are - // available and not expired. - c.curr = p - return creds, nil - } - } - - providers := make([]string, 0, len(c.Providers)) - for _, p := range c.Providers { - providers = append(providers, reflect.TypeOf(p).String()) - } - - return credentials.Value{}, fmt.Errorf("no credentials found in %s cannot proceed", providers) -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *Chain) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/cmd/gateway/s3/gateway-s3-metadata.go b/cmd/gateway/s3/gateway-s3-metadata.go deleted file mode 100644 index c82dd055c..000000000 --- a/cmd/gateway/s3/gateway-s3-metadata.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "net/http" - "time" - - jsoniter "github.com/json-iterator/go" - minio "github.com/minio/minio/cmd" - "github.com/minio/minio/internal/hash" - "github.com/minio/minio/internal/logger" -) - -var ( - errGWMetaNotFound = errors.New("dare.meta file not found") - errGWMetaInvalidFormat = errors.New("dare.meta format is invalid") -) - -// A gwMetaV1 represents `gw.json` metadata header. -type gwMetaV1 struct { - Version string `json:"version"` // Version of the current `gw.json`. - Format string `json:"format"` // Format of the current `gw.json`. - Stat minio.StatInfo `json:"stat"` // Stat of the current object `gw.json`. - ETag string `json:"etag"` // ETag of the current object - - // Metadata map for current object `gw.json`. - Meta map[string]string `json:"meta,omitempty"` - // Captures all the individual object `gw.json`. - Parts []minio.ObjectPartInfo `json:"parts,omitempty"` -} - -// Gateway metadata constants. -const ( - // Gateway meta version. - gwMetaVersion = "1.0.0" - - // Gateway meta version. - gwMetaVersion100 = "1.0.0" - - // Gateway meta format string. - gwMetaFormat = "gw" - - // Add new constants here. -) - -// newGWMetaV1 - initializes new gwMetaV1, adds version. -func newGWMetaV1() (gwMeta gwMetaV1) { - gwMeta = gwMetaV1{} - gwMeta.Version = gwMetaVersion - gwMeta.Format = gwMetaFormat - return gwMeta -} - -// IsValid - tells if the format is sane by validating the version -// string, format fields. -func (m gwMetaV1) IsValid() bool { - return ((m.Version == gwMetaVersion || m.Version == gwMetaVersion100) && - m.Format == gwMetaFormat) -} - -// Converts metadata to object info. -func (m gwMetaV1) ToObjectInfo(bucket, object string) minio.ObjectInfo { - filterKeys := append([]string{ - "ETag", - "Content-Length", - "Last-Modified", - "Content-Type", - "Expires", - }, defaultFilterKeys...) - objInfo := minio.ObjectInfo{ - IsDir: false, - Bucket: bucket, - Name: object, - Size: m.Stat.Size, - ModTime: m.Stat.ModTime, - ContentType: m.Meta["content-type"], - ContentEncoding: m.Meta["content-encoding"], - ETag: minio.CanonicalizeETag(m.ETag), - UserDefined: minio.CleanMinioInternalMetadataKeys(minio.CleanMetadataKeys(m.Meta, filterKeys...)), - Parts: m.Parts, - } - - if sc, ok := m.Meta["x-amz-storage-class"]; ok { - objInfo.StorageClass = sc - } - var ( - t time.Time - e error - ) - if exp, ok := m.Meta["expires"]; ok { - if t, e = time.Parse(http.TimeFormat, exp); e == nil { - objInfo.Expires = t.UTC() - } - } - // Success. - return objInfo -} - -// ObjectToPartOffset - translate offset of an object to offset of its individual part. -func (m gwMetaV1) ObjectToPartOffset(ctx context.Context, offset int64) (partIndex int, partOffset int64, err error) { - if offset == 0 { - // Special case - if offset is 0, then partIndex and partOffset are always 0. - return 0, 0, nil - } - partOffset = offset - // Seek until object offset maps to a particular part offset. - for i, part := range m.Parts { - partIndex = i - // Offset is smaller than size we have reached the proper part offset. - if partOffset < part.Size { - return partIndex, partOffset, nil - } - // Continue to towards the next part. - partOffset -= part.Size - } - logger.LogIf(ctx, minio.InvalidRange{}) - // Offset beyond the size of the object return InvalidRange. - return 0, 0, minio.InvalidRange{} -} - -// Constructs GWMetaV1 using `jsoniter` lib to retrieve each field. -func gwMetaUnmarshalJSON(ctx context.Context, gwMetaBuf []byte) (gwMeta gwMetaV1, err error) { - json := jsoniter.ConfigCompatibleWithStandardLibrary - err = json.Unmarshal(gwMetaBuf, &gwMeta) - return gwMeta, err -} - -// readGWMeta reads `dare.meta` and returns back GW metadata structure. -func readGWMetadata(ctx context.Context, buf bytes.Buffer) (gwMeta gwMetaV1, err error) { - if buf.Len() == 0 { - return gwMetaV1{}, errGWMetaNotFound - } - gwMeta, err = gwMetaUnmarshalJSON(ctx, buf.Bytes()) - if err != nil { - return gwMetaV1{}, err - } - if !gwMeta.IsValid() { - return gwMetaV1{}, errGWMetaInvalidFormat - } - // Return structured `dare.meta`. - return gwMeta, nil -} - -// getGWMetadata - unmarshals dare.meta into a *minio.PutObjReader -func getGWMetadata(ctx context.Context, bucket, prefix string, gwMeta gwMetaV1) (*minio.PutObjReader, error) { - // Marshal json. - metadataBytes, err := json.Marshal(&gwMeta) - if err != nil { - logger.LogIf(ctx, err) - return nil, err - } - hashReader, err := hash.NewReader(bytes.NewReader(metadataBytes), int64(len(metadataBytes)), "", "", int64(len(metadataBytes))) - if err != nil { - return nil, err - } - return minio.NewPutObjReader(hashReader), nil -} diff --git a/cmd/gateway/s3/gateway-s3-metadata_test.go b/cmd/gateway/s3/gateway-s3-metadata_test.go deleted file mode 100644 index d4de6ecc0..000000000 --- a/cmd/gateway/s3/gateway-s3-metadata_test.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "bytes" - "testing" - - minio "github.com/minio/minio/cmd" -) - -// Tests for GW metadata format validity. -func TestGWMetaFormatValid(t *testing.T) { - tests := []struct { - name int - version string - format string - want bool - }{ - {1, "123", "fs", false}, - {2, "123", gwMetaFormat, false}, - {3, gwMetaVersion, "test", false}, - {4, gwMetaVersion100, "hello", false}, - {5, gwMetaVersion, gwMetaFormat, true}, - {6, gwMetaVersion100, gwMetaFormat, true}, - } - for _, tt := range tests { - m := newGWMetaV1() - m.Version = tt.version - m.Format = tt.format - if got := m.IsValid(); got != tt.want { - t.Errorf("Test %d: Expected %v but received %v", tt.name, got, tt.want) - } - } -} - -// Tests for reading GW metadata info. -func TestReadGWMetadata(t *testing.T) { - tests := []struct { - metaStr string - pass bool - }{ - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 132, "modTime": "2018-08-31T22:25:39.23626461Z" }}`, true}, - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 132, "modTime": "0000-00-00T00:00:00.00000000Z" }}`, false}, - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 5242880, "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"content-type":"application/octet-stream","etag":"57c743902b2fc8eea6ba3bb4fc58c8e8"},"parts":[{"number":1,"name":"part.1","etag":"","size":5242880}]}`, true}, - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": 68190720, "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":[{"number":1,"name":"part.1","etag":"c5cac075eefdab801a5198812f51b36e","size":67141632},{"number":2,"name":"part.2","etag":"ccdf4b774bc3be8eef9a8987309e8171","size":1049088}]}`, true}, - {`{"version": "` + gwMetaVersion + `", "format":"` + gwMetaFormat + `", "stat": {"size": "68190720", "modTime": "2018-08-31T22:25:39.23626461Z" },"meta":{"X-Minio-Internal-Encrypted-Multipart":"","X-Minio-Internal-Server-Side-Encryption-Iv":"kdbOcKdXD3Sew8tOiHe5eI9xkX1oQ2W9JURz0oslCZA=","X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm":"DAREv2-HMAC-SHA256","X-Minio-Internal-Server-Side-Encryption-Sealed-Key":"IAAfAMfqKrxMXC9LuiI7ENP+p0xArepzAiIeB/MftFp7Xmq2OzDkKlmNbj5RKI89RrjiAbOVLSSEMvqQsrIrTQ==","content-type":"text/plain; charset=utf-8","etag":"2b137fa4ab80126af54623b010c98de6-2"},"parts":"123"}`, false}, - } - - for i, tt := range tests { - buf := bytes.NewBufferString(tt.metaStr) - m, err := readGWMetadata(minio.GlobalContext, *buf) - if err != nil && tt.pass { - t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed, %s", i+1, err) - } - if err == nil && !tt.pass { - t.Errorf("Test %d: Expected parse gw metadata to succeed, but failed", i+1) - } - if err == nil { - if m.Version != gwMetaVersion { - t.Errorf("Test %d: Expected version %s, but failed with %s", i+1, gwMetaVersion, m.Version) - } - } - } -} diff --git a/cmd/gateway/s3/gateway-s3-sse.go b/cmd/gateway/s3/gateway-s3-sse.go deleted file mode 100644 index 90158d457..000000000 --- a/cmd/gateway/s3/gateway-s3-sse.go +++ /dev/null @@ -1,819 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "bytes" - "context" - "io" - "net/http" - "path" - "strconv" - "strings" - "time" - - "github.com/minio/minio-go/v7/pkg/encrypt" - minio "github.com/minio/minio/cmd" - - "github.com/minio/minio/internal/logger" -) - -const ( - // name of custom multipart metadata file for s3 backend. - gwdareMetaJSON string = "dare.meta" - - // name of temporary per part metadata file - gwpartMetaJSON string = "part.meta" - // custom multipart files are stored under the defaultMinioGWPrefix - defaultMinioGWPrefix = ".minio" - defaultGWContentFileName = "data" -) - -// s3EncObjects is a wrapper around s3Objects and implements gateway calls for -// custom large objects encrypted at the gateway -type s3EncObjects struct { - s3Objects -} - -/* - NOTE: - Custom gateway encrypted objects are stored on backend as follows: - obj/.minio/data <= encrypted content - obj/.minio/dare.meta <= metadata - - When a multipart upload operation is in progress, the metadata set during - NewMultipartUpload is stored in obj/.minio/uploadID/dare.meta and each - UploadPart operation saves additional state of the part's encrypted ETag and - encrypted size in obj/.minio/uploadID/part1/part.meta - - All the part metadata and temp dare.meta are cleaned up when upload completes -*/ - -// ListObjects lists all blobs in S3 bucket filtered by prefix -func (l *s3EncObjects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) { - var startAfter string - res, err := l.ListObjectsV2(ctx, bucket, prefix, marker, delimiter, maxKeys, false, startAfter) - if err != nil { - return loi, err - } - loi.IsTruncated = res.IsTruncated - loi.NextMarker = res.NextContinuationToken - loi.Objects = res.Objects - loi.Prefixes = res.Prefixes - return loi, nil -} - -// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix -func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { - var objects []minio.ObjectInfo - var prefixes []string - var isTruncated bool - - // filter out objects that contain a .minio prefix, but is not a dare.meta metadata file. - for { - loi, e = l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, fetchOwner, startAfter) - if e != nil { - return loi, minio.ErrorRespToObjectError(e, bucket) - } - - continuationToken = loi.NextContinuationToken - isTruncated = loi.IsTruncated - - for _, obj := range loi.Objects { - startAfter = obj.Name - - if !isGWObject(obj.Name) { - continue - } - // get objectname and ObjectInfo from the custom metadata file - if strings.HasSuffix(obj.Name, gwdareMetaJSON) { - objSlice := strings.Split(obj.Name, minio.SlashSeparator+defaultMinioGWPrefix) - gwMeta, e := l.getGWMetadata(ctx, bucket, getDareMetaPath(objSlice[0])) - if e != nil { - continue - } - oInfo := gwMeta.ToObjectInfo(bucket, objSlice[0]) - objects = append(objects, oInfo) - } else { - objects = append(objects, obj) - } - if maxKeys > 0 && len(objects) > maxKeys { - break - } - } - for _, p := range loi.Prefixes { - objName := strings.TrimSuffix(p, minio.SlashSeparator) - gm, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(objName)) - // if prefix is actually a custom multi-part object, append it to objects - if err == nil { - objects = append(objects, gm.ToObjectInfo(bucket, objName)) - continue - } - isPrefix := l.isPrefix(ctx, bucket, p, fetchOwner, startAfter) - if isPrefix { - prefixes = append(prefixes, p) - } - } - if (maxKeys > 0 && len(objects) > maxKeys) || !loi.IsTruncated { - break - } - } - - loi.IsTruncated = isTruncated - loi.ContinuationToken = continuationToken - loi.Objects = make([]minio.ObjectInfo, 0) - loi.Prefixes = make([]string, 0) - loi.Objects = append(loi.Objects, objects...) - - for _, pfx := range prefixes { - if pfx != prefix { - loi.Prefixes = append(loi.Prefixes, pfx) - } - } - // Set continuation token if s3 returned truncated list - if isTruncated { - if len(objects) > 0 { - loi.NextContinuationToken = objects[len(objects)-1].Name - } - } - return loi, nil -} - -// isGWObject returns true if it is a custom object -func isGWObject(objName string) bool { - isEncrypted := strings.Contains(objName, defaultMinioGWPrefix) - if !isEncrypted { - return true - } - // ignore temp part.meta files - if strings.Contains(objName, gwpartMetaJSON) { - return false - } - - pfxSlice := strings.Split(objName, minio.SlashSeparator) - var i1, i2 int - for i := len(pfxSlice) - 1; i >= 0; i-- { - p := pfxSlice[i] - if p == defaultMinioGWPrefix { - i1 = i - } - if p == gwdareMetaJSON { - i2 = i - } - if i1 > 0 && i2 > 0 { - break - } - } - // incomplete uploads would have a uploadID between defaultMinioGWPrefix and gwdareMetaJSON - return i2 > 0 && i1 > 0 && i2-i1 == 1 -} - -// isPrefix returns true if prefix exists and is not an incomplete multipart upload entry -func (l *s3EncObjects) isPrefix(ctx context.Context, bucket, prefix string, fetchOwner bool, startAfter string) bool { - var continuationToken, delimiter string - - for { - loi, e := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, fetchOwner, startAfter) - if e != nil { - return false - } - for _, obj := range loi.Objects { - if isGWObject(obj.Name) { - return true - } - } - - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated { - break - } - } - return false -} - -// GetObject reads an object from S3. Supports additional -// parameters like offset and length which are synonymous with -// HTTP Range requests. -func (l *s3EncObjects) GetObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - return l.getObject(ctx, bucket, key, startOffset, length, writer, etag, opts) -} - -func (l *s3EncObjects) isGWEncrypted(ctx context.Context, bucket, object string) bool { - _, err := l.s3Objects.GetObjectInfo(ctx, bucket, getDareMetaPath(object), minio.ObjectOptions{}) - return err == nil -} - -// getDaremetadata fetches dare.meta from s3 backend and marshals into a structured format. -func (l *s3EncObjects) getGWMetadata(ctx context.Context, bucket, metaFileName string) (m gwMetaV1, err error) { - oi, err1 := l.s3Objects.GetObjectInfo(ctx, bucket, metaFileName, minio.ObjectOptions{}) - if err1 != nil { - return m, err1 - } - var buffer bytes.Buffer - err = l.s3Objects.getObject(ctx, bucket, metaFileName, 0, oi.Size, &buffer, oi.ETag, minio.ObjectOptions{}) - if err != nil { - return m, err - } - return readGWMetadata(ctx, buffer) -} - -// writes dare metadata to the s3 backend -func (l *s3EncObjects) writeGWMetadata(ctx context.Context, bucket, metaFileName string, m gwMetaV1, o minio.ObjectOptions) error { - reader, err := getGWMetadata(ctx, bucket, metaFileName, m) - if err != nil { - logger.LogIf(ctx, err) - return err - } - _, err = l.s3Objects.PutObject(ctx, bucket, metaFileName, reader, o) - return err -} - -// returns path of temporary metadata json file for the upload -func getTmpDareMetaPath(object, uploadID string) string { - return path.Join(getGWMetaPath(object), uploadID, gwdareMetaJSON) -} - -// returns path of metadata json file for encrypted objects -func getDareMetaPath(object string) string { - return path.Join(getGWMetaPath(object), gwdareMetaJSON) -} - -// returns path of temporary part metadata file for multipart uploads -func getPartMetaPath(object, uploadID string, partID int) string { - return path.Join(object, defaultMinioGWPrefix, uploadID, strconv.Itoa(partID), gwpartMetaJSON) -} - -// deletes the custom dare metadata file saved at the backend -func (l *s3EncObjects) deleteGWMetadata(ctx context.Context, bucket, metaFileName string) (minio.ObjectInfo, error) { - return l.s3Objects.DeleteObject(ctx, bucket, metaFileName, minio.ObjectOptions{}) -} - -func (l *s3EncObjects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, opts minio.ObjectOptions) error { - var o minio.ObjectOptions - if minio.GlobalGatewaySSE.SSEC() { - o = opts - } - dmeta, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(key)) - if err != nil { - // unencrypted content - return l.s3Objects.getObject(ctx, bucket, key, startOffset, length, writer, etag, o) - } - if startOffset < 0 { - logger.LogIf(ctx, minio.InvalidRange{}) - } - - // For negative length read everything. - if length < 0 { - length = dmeta.Stat.Size - startOffset - } - // Reply back invalid range if the input offset and length fall out of range. - if startOffset > dmeta.Stat.Size || startOffset+length > dmeta.Stat.Size { - logger.LogIf(ctx, minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size}) - return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size} - } - // Get start part index and offset. - _, partOffset, err := dmeta.ObjectToPartOffset(ctx, startOffset) - if err != nil { - return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size} - } - - // Calculate endOffset according to length - endOffset := startOffset - if length > 0 { - endOffset += length - 1 - } - - // Get last part index to read given length. - if _, _, err := dmeta.ObjectToPartOffset(ctx, endOffset); err != nil { - return minio.InvalidRange{OffsetBegin: startOffset, OffsetEnd: length, ResourceSize: dmeta.Stat.Size} - } - return l.s3Objects.getObject(ctx, bucket, key, partOffset, endOffset, writer, dmeta.ETag, o) -} - -// GetObjectNInfo - returns object info and locked object ReadCloser -func (l *s3EncObjects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, o minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - var opts minio.ObjectOptions - if minio.GlobalGatewaySSE.SSEC() { - opts = o - } - objInfo, err := l.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return l.s3Objects.GetObjectNInfo(ctx, bucket, object, rs, h, lockType, opts) - } - fn, off, length, err := minio.NewGetObjectReader(rs, objInfo, opts) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - if l.isGWEncrypted(ctx, bucket, object) { - object = getGWContentPath(object) - } - pr, pw := io.Pipe() - go func() { - // Do not set an `If-Match` header for the ETag when - // the ETag is encrypted. The ETag at the backend never - // matches an encrypted ETag and there is in any case - // no way to make two consecutive S3 calls safe for concurrent - // access. - // However, the encrypted object changes concurrently then the - // gateway will not be able to decrypt it since the key (obtained - // from dare.meta) will not work for any new created object. Therefore, - // we will in any case not return invalid data to the client. - etag := objInfo.ETag - if len(etag) > 32 && strings.Count(etag, "-") == 0 { - etag = "" - } - err := l.getObject(ctx, bucket, object, off, length, pw, etag, opts) - pw.CloseWithError(err) - }() - - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return fn(pr, h, pipeCloser) -} - -// GetObjectInfo reads object info and replies back ObjectInfo -// For custom gateway encrypted large objects, the ObjectInfo is retrieved from the dare.meta file. -func (l *s3EncObjects) GetObjectInfo(ctx context.Context, bucket string, object string, o minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - var opts minio.ObjectOptions - if minio.GlobalGatewaySSE.SSEC() { - opts = o - } - - gwMeta, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)) - if err != nil { - return l.s3Objects.GetObjectInfo(ctx, bucket, object, opts) - } - return gwMeta.ToObjectInfo(bucket, object), nil -} - -// CopyObject copies an object from source bucket to a destination bucket. -func (l *s3EncObjects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, s, d minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - cpSrcDstSame := path.Join(srcBucket, srcObject) == path.Join(dstBucket, dstObject) - userDefined := minio.CloneMSS(srcInfo.UserDefined) - if cpSrcDstSame { - var gwMeta gwMetaV1 - if s.ServerSideEncryption != nil && d.ServerSideEncryption != nil && - ((s.ServerSideEncryption.Type() == encrypt.SSEC && d.ServerSideEncryption.Type() == encrypt.SSEC) || - (s.ServerSideEncryption.Type() == encrypt.S3 && d.ServerSideEncryption.Type() == encrypt.S3)) { - gwMeta, err = l.getGWMetadata(ctx, srcBucket, getDareMetaPath(srcObject)) - if err != nil { - return - } - header := make(http.Header) - if d.ServerSideEncryption != nil { - d.ServerSideEncryption.Marshal(header) - } - for k, v := range header { - userDefined[k] = v[0] - } - gwMeta.Meta = userDefined - if err = l.writeGWMetadata(ctx, dstBucket, getDareMetaPath(dstObject), gwMeta, minio.ObjectOptions{}); err != nil { - return objInfo, minio.ErrorRespToObjectError(err) - } - return gwMeta.ToObjectInfo(dstBucket, dstObject), nil - } - } - dstOpts := minio.ObjectOptions{ServerSideEncryption: d.ServerSideEncryption, UserDefined: userDefined} - return l.PutObject(ctx, dstBucket, dstObject, srcInfo.PutObjReader, dstOpts) -} - -// DeleteObject deletes a blob in bucket -// For custom gateway encrypted large objects, cleans up encrypted content and metadata files -// from the backend. -func (l *s3EncObjects) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - // Get dare meta json - if _, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(object)); err != nil { - logger.LogIf(minio.GlobalContext, err) - return l.s3Objects.DeleteObject(ctx, bucket, object, opts) - } - // delete encrypted object - l.s3Objects.DeleteObject(ctx, bucket, getGWContentPath(object), opts) - return l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) -} - -func (l *s3EncObjects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { - errs := make([]error, len(objects)) - dobjects := make([]minio.DeletedObject, len(objects)) - for idx, object := range objects { - _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts) - if errs[idx] == nil { - dobjects[idx] = minio.DeletedObject{ - ObjectName: object.ObjectName, - } - } - } - return dobjects, errs -} - -// ListMultipartUploads lists all multipart uploads. -func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) { - lmi, e = l.s3Objects.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) - if e != nil { - return - } - lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath(minio.SlashSeparator)) - lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath(minio.SlashSeparator)) - for i := range lmi.Uploads { - lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath(minio.SlashSeparator)) - } - return -} - -// NewMultipartUpload uploads object in multiple parts -func (l *s3EncObjects) NewMultipartUpload(ctx context.Context, bucket, object string, o minio.ObjectOptions) (result *minio.NewMultipartUploadResult, err error) { - var sseOpts encrypt.ServerSide - if o.ServerSideEncryption == nil { - return l.s3Objects.NewMultipartUpload(ctx, bucket, object, minio.ObjectOptions{UserDefined: o.UserDefined}) - } - // Decide if sse options needed to be passed to backend - if (minio.GlobalGatewaySSE.SSEC() && o.ServerSideEncryption.Type() == encrypt.SSEC) || - (minio.GlobalGatewaySSE.SSES3() && o.ServerSideEncryption.Type() == encrypt.S3) { - sseOpts = o.ServerSideEncryption - } - - result, err = l.s3Objects.NewMultipartUpload(ctx, bucket, getGWContentPath(object), minio.ObjectOptions{ServerSideEncryption: sseOpts}) - if err != nil { - return - } - // Create uploadID and write a temporary dare.meta object under object/uploadID prefix - gwmeta := newGWMetaV1() - gwmeta.Meta = o.UserDefined - gwmeta.Stat.ModTime = time.Now().UTC() - err = l.writeGWMetadata(ctx, bucket, getTmpDareMetaPath(object, result.UploadID), gwmeta, minio.ObjectOptions{}) - if err != nil { - return nil, minio.ErrorRespToObjectError(err) - } - return result, nil -} - -// PutObject creates a new object with the incoming data, -func (l *s3EncObjects) PutObject(ctx context.Context, bucket string, object string, data *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - var sseOpts encrypt.ServerSide - // Decide if sse options needed to be passed to backend - if opts.ServerSideEncryption != nil && - ((minio.GlobalGatewaySSE.SSEC() && opts.ServerSideEncryption.Type() == encrypt.SSEC) || - (minio.GlobalGatewaySSE.SSES3() && opts.ServerSideEncryption.Type() == encrypt.S3) || - opts.ServerSideEncryption.Type() == encrypt.KMS) { - sseOpts = opts.ServerSideEncryption - } - if opts.ServerSideEncryption == nil { - defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) - defer l.DeleteObject(ctx, bucket, getGWContentPath(object), opts) - return l.s3Objects.PutObject(ctx, bucket, object, data, minio.ObjectOptions{UserDefined: opts.UserDefined}) - } - - oi, err := l.s3Objects.PutObject(ctx, bucket, getGWContentPath(object), data, minio.ObjectOptions{ServerSideEncryption: sseOpts}) - if err != nil { - return objInfo, minio.ErrorRespToObjectError(err) - } - - gwMeta := newGWMetaV1() - gwMeta.Meta = make(map[string]string) - for k, v := range opts.UserDefined { - gwMeta.Meta[k] = v - } - encMD5 := data.MD5CurrentHexString() - - gwMeta.ETag = encMD5 - gwMeta.Stat.Size = oi.Size - gwMeta.Stat.ModTime = time.Now().UTC() - if err = l.writeGWMetadata(ctx, bucket, getDareMetaPath(object), gwMeta, minio.ObjectOptions{}); err != nil { - return objInfo, minio.ErrorRespToObjectError(err) - } - objInfo = gwMeta.ToObjectInfo(bucket, object) - // delete any unencrypted content of the same name created previously - l.s3Objects.DeleteObject(ctx, bucket, object, opts) - return objInfo, nil -} - -// PutObjectPart puts a part of object in bucket -func (l *s3EncObjects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, data *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) { - if opts.ServerSideEncryption == nil { - return l.s3Objects.PutObjectPart(ctx, bucket, object, uploadID, partID, data, opts) - } - - var s3Opts minio.ObjectOptions - // for sse-s3 encryption options should not be passed to backend - if opts.ServerSideEncryption != nil && opts.ServerSideEncryption.Type() == encrypt.SSEC && minio.GlobalGatewaySSE.SSEC() { - s3Opts = opts - } - - uploadPath := getTmpGWMetaPath(object, uploadID) - tmpDareMeta := path.Join(uploadPath, gwdareMetaJSON) - _, err := l.s3Objects.GetObjectInfo(ctx, bucket, tmpDareMeta, minio.ObjectOptions{}) - if err != nil { - return pi, minio.InvalidUploadID{UploadID: uploadID} - } - - pi, e = l.s3Objects.PutObjectPart(ctx, bucket, getGWContentPath(object), uploadID, partID, data, s3Opts) - if e != nil { - return - } - gwMeta := newGWMetaV1() - gwMeta.Parts = make([]minio.ObjectPartInfo, 1) - // Add incoming part. - gwMeta.Parts[0] = minio.ObjectPartInfo{ - Number: partID, - ETag: pi.ETag, - Size: pi.Size, - } - gwMeta.ETag = data.MD5CurrentHexString() // encrypted ETag - gwMeta.Stat.Size = pi.Size - gwMeta.Stat.ModTime = pi.LastModified - - if err = l.writeGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, partID), gwMeta, minio.ObjectOptions{}); err != nil { - return pi, minio.ErrorRespToObjectError(err) - } - return minio.PartInfo{ - Size: gwMeta.Stat.Size, - ETag: minio.CanonicalizeETag(gwMeta.ETag), - LastModified: gwMeta.Stat.ModTime, - PartNumber: partID, - }, nil -} - -// CopyObjectPart creates a part in a multipart upload by copying -// existing object or a part of it. -func (l *s3EncObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, - partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions, -) (p minio.PartInfo, err error) { - return l.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, srcInfo.PutObjReader, dstOpts) -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (l *s3EncObjects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - // We do not store parts uploaded so far in the dare.meta. Only CompleteMultipartUpload finalizes the parts under upload prefix.Otherwise, - // there could be situations of dare.meta getting corrupted by competing upload parts. - dm, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)) - if err != nil { - return l.s3Objects.GetMultipartInfo(ctx, bucket, object, uploadID, opts) - } - result.UserDefined = dm.ToObjectInfo(bucket, object).UserDefined - return result, nil -} - -// ListObjectParts returns all object parts for specified object in specified bucket -func (l *s3EncObjects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, e error) { - // We do not store parts uploaded so far in the dare.meta. Only CompleteMultipartUpload finalizes the parts under upload prefix.Otherwise, - // there could be situations of dare.meta getting corrupted by competing upload parts. - dm, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)) - if err != nil { - return l.s3Objects.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts, opts) - } - - lpi, err = l.s3Objects.ListObjectParts(ctx, bucket, getGWContentPath(object), uploadID, partNumberMarker, maxParts, opts) - if err != nil { - return lpi, err - } - for i, part := range lpi.Parts { - partMeta, err := l.getGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, part.PartNumber)) - if err != nil || len(partMeta.Parts) == 0 { - return lpi, minio.InvalidPart{} - } - lpi.Parts[i].ETag = partMeta.ETag - } - lpi.UserDefined = dm.ToObjectInfo(bucket, object).UserDefined - lpi.Object = object - return lpi, nil -} - -// AbortMultipartUpload aborts a ongoing multipart upload -func (l *s3EncObjects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, opts minio.ObjectOptions) error { - if _, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)); err != nil { - return l.s3Objects.AbortMultipartUpload(ctx, bucket, object, uploadID, opts) - } - - if err := l.s3Objects.AbortMultipartUpload(ctx, bucket, getGWContentPath(object), uploadID, opts); err != nil { - return err - } - - uploadPrefix := getTmpGWMetaPath(object, uploadID) - var continuationToken, startAfter, delimiter string - for { - loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, uploadPrefix, continuationToken, delimiter, 1000, false, startAfter) - if err != nil { - return minio.InvalidUploadID{UploadID: uploadID} - } - for _, obj := range loi.Objects { - if _, err := l.s3Objects.DeleteObject(ctx, bucket, obj.Name, minio.ObjectOptions{}); err != nil { - return minio.ErrorRespToObjectError(err) - } - startAfter = obj.Name - } - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated { - break - } - } - return nil -} - -// CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (l *s3EncObjects) CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) { - tmpMeta, err := l.getGWMetadata(ctx, bucket, getTmpDareMetaPath(object, uploadID)) - if err != nil { - oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, object, uploadID, uploadedParts, opts) - if e == nil { - // delete any encrypted version of object that might exist - defer l.deleteGWMetadata(ctx, bucket, getDareMetaPath(object)) - defer l.DeleteObject(ctx, bucket, getGWContentPath(object), opts) - } - return oi, e - } - gwMeta := newGWMetaV1() - gwMeta.Meta = make(map[string]string) - for k, v := range tmpMeta.Meta { - gwMeta.Meta[k] = v - } - // Allocate parts similar to incoming slice. - gwMeta.Parts = make([]minio.ObjectPartInfo, len(uploadedParts)) - - bkUploadedParts := make([]minio.CompletePart, len(uploadedParts)) - // Calculate full object size. - var objectSize int64 - - // Validate each part and then commit to disk. - for i, part := range uploadedParts { - partMeta, err := l.getGWMetadata(ctx, bucket, getPartMetaPath(object, uploadID, part.PartNumber)) - if err != nil || len(partMeta.Parts) == 0 { - return oi, minio.InvalidPart{} - } - bkUploadedParts[i] = minio.CompletePart{PartNumber: part.PartNumber, ETag: partMeta.Parts[0].ETag} - gwMeta.Parts[i] = partMeta.Parts[0] - objectSize += partMeta.Parts[0].Size - } - oi, e = l.s3Objects.CompleteMultipartUpload(ctx, bucket, getGWContentPath(object), uploadID, bkUploadedParts, opts) - if e != nil { - return oi, e - } - - // delete any unencrypted version of object that might be on the backend - defer l.s3Objects.DeleteObject(ctx, bucket, object, opts) - - // Save the final object size and modtime. - gwMeta.Stat.Size = objectSize - gwMeta.Stat.ModTime = time.Now().UTC() - gwMeta.ETag = oi.ETag - - if err = l.writeGWMetadata(ctx, bucket, getDareMetaPath(object), gwMeta, minio.ObjectOptions{}); err != nil { - return oi, minio.ErrorRespToObjectError(err) - } - // Clean up any uploaded parts that are not being committed by this CompleteMultipart operation - var continuationToken, startAfter, delimiter string - uploadPrefix := getTmpGWMetaPath(object, uploadID) - done := false - for { - loi, lerr := l.s3Objects.ListObjectsV2(ctx, bucket, uploadPrefix, continuationToken, delimiter, 1000, false, startAfter) - if lerr != nil { - break - } - for _, obj := range loi.Objects { - if !strings.HasPrefix(obj.Name, uploadPrefix) { - done = true - break - } - startAfter = obj.Name - l.s3Objects.DeleteObject(ctx, bucket, obj.Name, opts) - } - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated || done { - break - } - } - - return gwMeta.ToObjectInfo(bucket, object), nil -} - -// getTmpGWMetaPath returns the prefix under which uploads in progress are stored on backend -func getTmpGWMetaPath(object, uploadID string) string { - return path.Join(object, defaultMinioGWPrefix, uploadID) -} - -// getGWMetaPath returns the prefix under which custom object metadata and object are stored on backend after upload completes -func getGWMetaPath(object string) string { - return path.Join(object, defaultMinioGWPrefix) -} - -// getGWContentPath returns the prefix under which custom object is stored on backend after upload completes -func getGWContentPath(object string) string { - return path.Join(object, defaultMinioGWPrefix, defaultGWContentFileName) -} - -// Clean-up the stale incomplete encrypted multipart uploads. Should be run in a Go routine. -func (l *s3EncObjects) cleanupStaleEncMultipartUploads(ctx context.Context, cleanupInterval, expiry time.Duration) { - ticker := time.NewTicker(cleanupInterval) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - l.cleanupStaleUploads(ctx, expiry) - } - } -} - -// cleanupStaleUploads removes old custom encryption multipart uploads on backend -func (l *s3EncObjects) cleanupStaleUploads(ctx context.Context, expiry time.Duration) { - buckets, err := l.s3Objects.ListBuckets(ctx, minio.BucketOptions{}) - if err != nil { - logger.LogIf(ctx, err) - return - } - for _, b := range buckets { - expParts := l.getStalePartsForBucket(ctx, b.Name, expiry) - for k := range expParts { - l.s3Objects.DeleteObject(ctx, b.Name, k, minio.ObjectOptions{}) - } - } -} - -func (l *s3EncObjects) getStalePartsForBucket(ctx context.Context, bucket string, expiry time.Duration) (expParts map[string]string) { - var prefix, continuationToken, delimiter, startAfter string - expParts = make(map[string]string) - now := time.Now() - for { - loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, false, startAfter) - if err != nil { - logger.LogIf(ctx, err) - break - } - for _, obj := range loi.Objects { - startAfter = obj.Name - if !strings.Contains(obj.Name, defaultMinioGWPrefix) { - continue - } - - if isGWObject(obj.Name) { - continue - } - - // delete temporary part.meta or dare.meta files for incomplete uploads that are past expiry - if (strings.HasSuffix(obj.Name, gwpartMetaJSON) || strings.HasSuffix(obj.Name, gwdareMetaJSON)) && - now.Sub(obj.ModTime) > expiry { - expParts[obj.Name] = "" - } - } - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated { - break - } - } - return -} - -func (l *s3EncObjects) DeleteBucket(ctx context.Context, bucket string, opts minio.DeleteBucketOptions) error { - var prefix, continuationToken, delimiter, startAfter string - expParts := make(map[string]string) - - for { - loi, err := l.s3Objects.ListObjectsV2(ctx, bucket, prefix, continuationToken, delimiter, 1000, false, startAfter) - if err != nil { - break - } - for _, obj := range loi.Objects { - startAfter = obj.Name - if !strings.Contains(obj.Name, defaultMinioGWPrefix) { - return minio.BucketNotEmpty{} - } - if isGWObject(obj.Name) { - return minio.BucketNotEmpty{} - } - // delete temporary part.meta or dare.meta files for incomplete uploads - if strings.HasSuffix(obj.Name, gwpartMetaJSON) || strings.HasSuffix(obj.Name, gwdareMetaJSON) { - expParts[obj.Name] = "" - } - } - continuationToken = loi.NextContinuationToken - if !loi.IsTruncated { - break - } - } - for k := range expParts { - l.s3Objects.DeleteObject(ctx, bucket, k, minio.ObjectOptions{}) - } - err := l.Client.RemoveBucket(ctx, bucket) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket) - } - return nil -} diff --git a/cmd/gateway/s3/gateway-s3-utils.go b/cmd/gateway/s3/gateway-s3-utils.go deleted file mode 100644 index 8eb769977..000000000 --- a/cmd/gateway/s3/gateway-s3-utils.go +++ /dev/null @@ -1,49 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - minio "github.com/minio/minio/cmd" -) - -// List of header keys to be filtered, usually -// from all S3 API http responses. -var defaultFilterKeys = []string{ - "Connection", - "Transfer-Encoding", - "Accept-Ranges", - "Date", - "Server", - "Vary", - "x-amz-bucket-region", - "x-amz-request-id", - "x-amz-id-2", - "Content-Security-Policy", - "X-Xss-Protection", - - // Add new headers to be ignored. -} - -// FromGatewayObjectPart converts ObjectInfo for custom part stored as object to PartInfo -func FromGatewayObjectPart(partID int, oi minio.ObjectInfo) (pi minio.PartInfo) { - return minio.PartInfo{ - Size: oi.Size, - ETag: minio.CanonicalizeETag(oi.ETag), - LastModified: oi.ModTime, - PartNumber: partID, - } -} diff --git a/cmd/gateway/s3/gateway-s3.go b/cmd/gateway/s3/gateway-s3.go deleted file mode 100644 index 44666cb66..000000000 --- a/cmd/gateway/s3/gateway-s3.go +++ /dev/null @@ -1,822 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "context" - "encoding/json" - "io" - "math/rand" - "net/http" - "net/url" - "os" - "strings" - "time" - - "github.com/minio/cli" - "github.com/minio/madmin-go" - miniogo "github.com/minio/minio-go/v7" - "github.com/minio/minio-go/v7/pkg/credentials" - "github.com/minio/minio-go/v7/pkg/encrypt" - "github.com/minio/minio-go/v7/pkg/s3utils" - "github.com/minio/minio-go/v7/pkg/tags" - minio "github.com/minio/minio/cmd" - "github.com/minio/minio/internal/config" - xhttp "github.com/minio/minio/internal/http" - "github.com/minio/minio/internal/logger" - "github.com/minio/pkg/bucket/policy" - "github.com/minio/pkg/env" -) - -func init() { - const s3GatewayTemplate = `NAME: - {{.HelpName}} - {{.Usage}} - -USAGE: - {{.HelpName}} {{if .VisibleFlags}}[FLAGS]{{end}} [ENDPOINT] -{{if .VisibleFlags}} -FLAGS: - {{range .VisibleFlags}}{{.}} - {{end}}{{end}} -ENDPOINT: - s3 server endpoint. Default ENDPOINT is https://s3.amazonaws.com - -EXAMPLES: - 1. Start minio gateway server for AWS S3 backend - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.HelpName}} - - 2. Start minio gateway server for AWS S3 backend with edge caching enabled - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_USER{{.AssignmentOperator}}accesskey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_ROOT_PASSWORD{{.AssignmentOperator}}secretkey - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_DRIVES{{.AssignmentOperator}}"/mnt/drive1,/mnt/drive2,/mnt/drive3,/mnt/drive4" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_EXCLUDE{{.AssignmentOperator}}"bucket1/*,*.png" - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_QUOTA{{.AssignmentOperator}}90 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_AFTER{{.AssignmentOperator}}3 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_LOW{{.AssignmentOperator}}75 - {{.Prompt}} {{.EnvVarSetCommand}} MINIO_CACHE_WATERMARK_HIGH{{.AssignmentOperator}}85 - {{.Prompt}} {{.HelpName}} -` - - minio.RegisterGatewayCommand(cli.Command{ - Name: minio.S3BackendGateway, - Usage: "Amazon Simple Storage Service (S3)", - Action: s3GatewayMain, - CustomHelpTemplate: s3GatewayTemplate, - HideHelpCommand: true, - }) -} - -// Handler for 'minio gateway s3' command line. -func s3GatewayMain(ctx *cli.Context) { - args := ctx.Args() - if !ctx.Args().Present() { - args = cli.Args{"https://s3.amazonaws.com"} - } - - serverAddr := ctx.GlobalString("address") - if serverAddr == "" || serverAddr == ":"+minio.GlobalMinioDefaultPort { - serverAddr = ctx.String("address") - } - // Validate gateway arguments. - logger.FatalIf(minio.ValidateGatewayArguments(serverAddr, args.First()), "Invalid argument") - - // Start the gateway.. - minio.StartGateway(ctx, &S3{ - host: args.First(), - debug: env.Get("_MINIO_SERVER_DEBUG", config.EnableOff) == config.EnableOn, - }) -} - -// S3 implements Gateway. -type S3 struct { - host string - debug bool -} - -// Name implements Gateway interface. -func (g *S3) Name() string { - return minio.S3BackendGateway -} - -const letterBytes = "abcdefghijklmnopqrstuvwxyz01234569" -const ( - letterIdxBits = 6 // 6 bits to represent a letter index - letterIdxMask = 1<= 0; { - if remain == 0 { - cache, remain = src.Int63(), letterIdxMax - } - if idx := int(cache & letterIdxMask); idx < len(letterBytes) { - b[i] = letterBytes[idx] - i-- - } - cache >>= letterIdxBits - remain-- - } - return prefix + string(b[0:30-len(prefix)]) -} - -// Chains all credential types, in the following order: -// - AWS env vars (i.e. AWS_ACCESS_KEY_ID) -// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials) -// - Static credentials provided by user (i.e. MINIO_ROOT_USER/MINIO_ACCESS_KEY) -var defaultProviders = []credentials.Provider{ - &credentials.EnvAWS{}, - &credentials.FileAWSCredentials{}, -} - -// Chains all credential types, in the following order: -// - AWS env vars (i.e. AWS_ACCESS_KEY_ID) -// - AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials) -// - IAM profile based credentials. (performs an HTTP -// call to a pre-defined endpoint, only valid inside -// configured ec2 instances) -// - Static credentials provided by user (i.e. MINIO_ROOT_USER/MINIO_ACCESS_KEY) -var defaultAWSCredProviders = []credentials.Provider{ - &credentials.EnvAWS{}, - &credentials.FileAWSCredentials{}, - &credentials.IAM{ - // you can specify a custom STS endpoint. - Endpoint: env.Get("MINIO_GATEWAY_S3_STS_ENDPOINT", ""), - Client: &http.Client{ - Transport: minio.NewGatewayHTTPTransport(), - }, - }, -} - -// new - Initializes a new client by auto probing S3 server signature. -func (g *S3) new(creds madmin.Credentials, transport http.RoundTripper) (*miniogo.Core, error) { - urlStr := g.host - if urlStr == "" { - urlStr = "https://s3.amazonaws.com" - } - - u, err := url.Parse(urlStr) - if err != nil { - return nil, err - } - - // Override default params if the host is provided - endpoint, secure, err := minio.ParseGatewayEndpoint(urlStr) - if err != nil { - return nil, err - } - - var chainCreds *credentials.Credentials - if s3utils.IsAmazonEndpoint(*u) { - // If we see an Amazon S3 endpoint, then we use more ways to fetch backend credentials. - // Specifically IAM style rotating credentials are only supported with AWS S3 endpoint. - chainCreds = NewChainCredentials(defaultAWSCredProviders) - } else { - chainCreds = NewChainCredentials(defaultProviders) - } - - optionsStaticCreds := &miniogo.Options{ - Creds: credentials.NewStaticV4(creds.AccessKey, creds.SecretKey, creds.SessionToken), - Secure: secure, - Region: s3utils.GetRegionFromURL(*u), - BucketLookup: miniogo.BucketLookupAuto, - Transport: transport, - } - - optionsChainCreds := &miniogo.Options{ - Creds: chainCreds, - Secure: secure, - Region: s3utils.GetRegionFromURL(*u), - BucketLookup: miniogo.BucketLookupAuto, - Transport: transport, - } - - clntChain, err := miniogo.New(endpoint, optionsChainCreds) - if err != nil { - return nil, err - } - - clntStatic, err := miniogo.New(endpoint, optionsStaticCreds) - if err != nil { - return nil, err - } - - if g.debug { - clntChain.TraceOn(os.Stderr) - clntStatic.TraceOn(os.Stderr) - } - - probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-bucket-sign-") - - if _, err = clntStatic.BucketExists(context.Background(), probeBucketName); err != nil { - switch miniogo.ToErrorResponse(err).Code { - case "InvalidAccessKeyId": - // Check if the provided keys are valid for chain. - if _, err = clntChain.BucketExists(context.Background(), probeBucketName); err != nil { - if miniogo.ToErrorResponse(err).Code != "AccessDenied" { - return nil, err - } - } - return &miniogo.Core{Client: clntChain}, nil - case "AccessDenied": - // this is a good error means backend is reachable - // and credentials are valid but credentials don't - // have access to 'probeBucketName' which is harmless. - return &miniogo.Core{Client: clntStatic}, nil - default: - return nil, err - } - } - - // if static keys are valid always use static keys. - return &miniogo.Core{Client: clntStatic}, nil -} - -// NewGatewayLayer returns s3 ObjectLayer. -func (g *S3) NewGatewayLayer(creds madmin.Credentials) (minio.ObjectLayer, error) { - metrics := minio.NewMetrics() - - t := &minio.MetricsTransport{ - Transport: minio.NewGatewayHTTPTransport(), - Metrics: metrics, - } - - // creds are ignored here, since S3 gateway implements chaining - // all credentials. - clnt, err := g.new(creds, t) - if err != nil { - return nil, err - } - - s := s3Objects{ - Client: clnt, - Metrics: metrics, - HTTPClient: &http.Client{ - Transport: t, - }, - } - - // Enables single encryption of KMS is configured. - if minio.GlobalKMS != nil { - encS := s3EncObjects{s} - - // Start stale enc multipart uploads cleanup routine. - go encS.cleanupStaleEncMultipartUploads(minio.GlobalContext, - minio.GlobalStaleUploadsCleanupInterval, minio.GlobalStaleUploadsExpiry) - - return &encS, nil - } - return &s, nil -} - -// s3Objects implements gateway for MinIO and S3 compatible object storage servers. -type s3Objects struct { - minio.GatewayUnsupported - Client *miniogo.Core - HTTPClient *http.Client - Metrics *minio.BackendMetrics -} - -// GetMetrics returns this gateway's metrics -func (l *s3Objects) GetMetrics(ctx context.Context) (*minio.BackendMetrics, error) { - return l.Metrics, nil -} - -// Shutdown saves any gateway metadata to disk -// if necessary and reload upon next restart. -func (l *s3Objects) Shutdown(ctx context.Context) error { - return nil -} - -// StorageInfo is not relevant to S3 backend. -func (l *s3Objects) StorageInfo(ctx context.Context) (si minio.StorageInfo, _ []error) { - si.Backend.Type = madmin.Gateway - probeBucketName := randString(60, rand.NewSource(time.Now().UnixNano()), "probe-bucket-sign-") - - // check if bucket exists. - _, err := l.Client.BucketExists(ctx, probeBucketName) - switch miniogo.ToErrorResponse(err).Code { - case "", "AccessDenied": - si.Backend.GatewayOnline = true - default: - logger.LogIf(ctx, err) - si.Backend.GatewayOnline = false - } - return si, nil -} - -// MakeBucket creates a new container on S3 backend. -func (l *s3Objects) MakeBucketWithLocation(ctx context.Context, bucket string, opts minio.MakeBucketOptions) error { - if opts.LockEnabled || opts.VersioningEnabled { - return minio.NotImplemented{} - } - - // Verify if bucket name is valid. - // We are using a separate helper function here to validate bucket - // names instead of IsValidBucketName() because there is a possibility - // that certains users might have buckets which are non-DNS compliant - // in us-east-1 and we might severely restrict them by not allowing - // access to these buckets. - // Ref - http://docs.aws.amazon.com/AmazonS3/latest/dev/BucketRestrictions.html - if s3utils.CheckValidBucketName(bucket) != nil { - return minio.BucketNameInvalid{Bucket: bucket} - } - err := l.Client.MakeBucket(ctx, bucket, miniogo.MakeBucketOptions{Region: opts.Location}) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket) - } - return err -} - -// GetBucketInfo gets bucket metadata.. -func (l *s3Objects) GetBucketInfo(ctx context.Context, bucket string, opts minio.BucketOptions) (bi minio.BucketInfo, e error) { - buckets, err := l.Client.ListBuckets(ctx) - if err != nil { - // Listbuckets may be disallowed, proceed to check if - // bucket indeed exists, if yes return success. - var ok bool - if ok, err = l.Client.BucketExists(ctx, bucket); err != nil { - return bi, minio.ErrorRespToObjectError(err, bucket) - } - if !ok { - return bi, minio.BucketNotFound{Bucket: bucket} - } - return minio.BucketInfo{ - Name: bi.Name, - Created: time.Now().UTC(), - }, nil - } - - for _, bi := range buckets { - if bi.Name != bucket { - continue - } - - return minio.BucketInfo{ - Name: bi.Name, - Created: bi.CreationDate, - }, nil - } - - return bi, minio.BucketNotFound{Bucket: bucket} -} - -// ListBuckets lists all S3 buckets -func (l *s3Objects) ListBuckets(ctx context.Context, opts minio.BucketOptions) ([]minio.BucketInfo, error) { - buckets, err := l.Client.ListBuckets(ctx) - if err != nil { - return nil, minio.ErrorRespToObjectError(err) - } - - b := make([]minio.BucketInfo, len(buckets)) - for i, bi := range buckets { - b[i] = minio.BucketInfo{ - Name: bi.Name, - Created: bi.CreationDate, - } - } - - return b, err -} - -// DeleteBucket deletes a bucket on S3 -func (l *s3Objects) DeleteBucket(ctx context.Context, bucket string, opts minio.DeleteBucketOptions) error { - err := l.Client.RemoveBucket(ctx, bucket) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket) - } - return nil -} - -// ListObjects lists all blobs in S3 bucket filtered by prefix -func (l *s3Objects) ListObjects(ctx context.Context, bucket string, prefix string, marker string, delimiter string, maxKeys int) (loi minio.ListObjectsInfo, e error) { - result, err := l.Client.ListObjects(bucket, prefix, marker, delimiter, maxKeys) - if err != nil { - return loi, minio.ErrorRespToObjectError(err, bucket) - } - - return minio.FromMinioClientListBucketResult(bucket, result), nil -} - -// ListObjectsV2 lists all blobs in S3 bucket filtered by prefix -func (l *s3Objects) ListObjectsV2(ctx context.Context, bucket, prefix, continuationToken, delimiter string, maxKeys int, fetchOwner bool, startAfter string) (loi minio.ListObjectsV2Info, e error) { - result, err := l.Client.ListObjectsV2(bucket, prefix, startAfter, continuationToken, delimiter, maxKeys) - if err != nil { - return loi, minio.ErrorRespToObjectError(err, bucket) - } - - return minio.FromMinioClientListBucketV2Result(bucket, result), nil -} - -// GetObjectNInfo - returns object info and locked object ReadCloser -func (l *s3Objects) GetObjectNInfo(ctx context.Context, bucket, object string, rs *minio.HTTPRangeSpec, h http.Header, lockType minio.LockType, opts minio.ObjectOptions) (gr *minio.GetObjectReader, err error) { - var objInfo minio.ObjectInfo - objInfo, err = l.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - - fn, off, length, err := minio.NewGetObjectReader(rs, objInfo, opts) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - - pr, pw := io.Pipe() - go func() { - err := l.getObject(ctx, bucket, object, off, length, pw, objInfo.ETag, opts) - pw.CloseWithError(err) - }() - - // Setup cleanup function to cause the above go-routine to - // exit in case of partial read - pipeCloser := func() { pr.Close() } - return fn(pr, h, pipeCloser) -} - -// GetObject reads an object from S3. Supports additional -// parameters like offset and length which are synonymous with -// HTTP Range requests. -// -// startOffset indicates the starting read location of the object. -// length indicates the total length of the object. -func (l *s3Objects) getObject(ctx context.Context, bucket string, key string, startOffset int64, length int64, writer io.Writer, etag string, o minio.ObjectOptions) error { - if length < 0 && length != -1 { - return minio.ErrorRespToObjectError(minio.InvalidRange{}, bucket, key) - } - - opts := miniogo.GetObjectOptions{} - opts.ServerSideEncryption = o.ServerSideEncryption - - if startOffset >= 0 && length >= 0 { - if err := opts.SetRange(startOffset, startOffset+length-1); err != nil { - return minio.ErrorRespToObjectError(err, bucket, key) - } - } - - if etag != "" { - opts.SetMatchETag(etag) - } - - object, _, _, err := l.Client.GetObject(ctx, bucket, key, opts) - if err != nil { - return minio.ErrorRespToObjectError(err, bucket, key) - } - defer object.Close() - if _, err := io.Copy(writer, object); err != nil { - return minio.ErrorRespToObjectError(err, bucket, key) - } - return nil -} - -// GetObjectInfo reads object info and replies back ObjectInfo -func (l *s3Objects) GetObjectInfo(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - oi, err := l.Client.StatObject(ctx, bucket, object, miniogo.StatObjectOptions{ - ServerSideEncryption: opts.ServerSideEncryption, - }) - if err != nil { - return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) - } - - return minio.FromMinioClientObjectInfo(bucket, oi), nil -} - -// PutObject creates a new object with the incoming data, -func (l *s3Objects) PutObject(ctx context.Context, bucket string, object string, r *minio.PutObjReader, opts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - data := r.Reader - - userDefined := minio.CloneMSS(opts.UserDefined) - - var tagMap map[string]string - if tagstr, ok := userDefined[xhttp.AmzObjectTagging]; ok && tagstr != "" { - tagObj, err := tags.ParseObjectTags(tagstr) - if err != nil { - return objInfo, minio.ErrorRespToObjectError(err, bucket, object) - } - tagMap = tagObj.ToMap() - delete(userDefined, xhttp.AmzObjectTagging) - } - putOpts := miniogo.PutObjectOptions{ - UserMetadata: userDefined, - ServerSideEncryption: opts.ServerSideEncryption, - UserTags: tagMap, - // Content-Md5 is needed for buckets with object locking, - // instead of spending an extra API call to detect this - // we can set md5sum to be calculated always. - SendContentMd5: true, - } - ui, err := l.Client.PutObject(ctx, bucket, object, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), putOpts) - if err != nil { - return objInfo, minio.ErrorRespToObjectError(err, bucket, object) - } - // On success, populate the key & metadata so they are present in the notification - oi := miniogo.ObjectInfo{ - ETag: ui.ETag, - Size: ui.Size, - Key: object, - Metadata: minio.ToMinioClientObjectInfoMetadata(userDefined), - } - - return minio.FromMinioClientObjectInfo(bucket, oi), nil -} - -// CopyObject copies an object from source bucket to a destination bucket. -func (l *s3Objects) CopyObject(ctx context.Context, srcBucket string, srcObject string, dstBucket string, dstObject string, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions) (objInfo minio.ObjectInfo, err error) { - if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) { - return minio.ObjectInfo{}, minio.PreConditionFailed{} - } - // Set this header such that following CopyObject() always sets the right metadata on the destination. - // metadata input is already a trickled down value from interpreting x-amz-metadata-directive at - // handler layer. So what we have right now is supposed to be applied on the destination object anyways. - // So preserve it by adding "REPLACE" directive to save all the metadata set by CopyObject API. - userDefined := minio.CloneMSS(srcInfo.UserDefined) - userDefined["x-amz-metadata-directive"] = "REPLACE" - userDefined["x-amz-copy-source-if-match"] = srcInfo.ETag - header := make(http.Header) - if srcOpts.ServerSideEncryption != nil { - encrypt.SSECopy(srcOpts.ServerSideEncryption).Marshal(header) - } - - if dstOpts.ServerSideEncryption != nil { - dstOpts.ServerSideEncryption.Marshal(header) - } - - for k, v := range header { - userDefined[k] = v[0] - } - - if _, err = l.Client.CopyObject(ctx, srcBucket, srcObject, dstBucket, dstObject, userDefined, miniogo.CopySrcOptions{}, miniogo.PutObjectOptions{}); err != nil { - return objInfo, minio.ErrorRespToObjectError(err, srcBucket, srcObject) - } - return l.GetObjectInfo(ctx, dstBucket, dstObject, dstOpts) -} - -// DeleteObject deletes a blob in bucket -func (l *s3Objects) DeleteObject(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - err := l.Client.RemoveObject(ctx, bucket, object, miniogo.RemoveObjectOptions{}) - if err != nil { - return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) - } - - return minio.ObjectInfo{ - Bucket: bucket, - Name: object, - }, nil -} - -func (l *s3Objects) DeleteObjects(ctx context.Context, bucket string, objects []minio.ObjectToDelete, opts minio.ObjectOptions) ([]minio.DeletedObject, []error) { - errs := make([]error, len(objects)) - dobjects := make([]minio.DeletedObject, len(objects)) - for idx, object := range objects { - _, errs[idx] = l.DeleteObject(ctx, bucket, object.ObjectName, opts) - if errs[idx] == nil { - dobjects[idx] = minio.DeletedObject{ - ObjectName: object.ObjectName, - } - } - } - return dobjects, errs -} - -// ListMultipartUploads lists all multipart uploads. -func (l *s3Objects) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi minio.ListMultipartsInfo, e error) { - result, err := l.Client.ListMultipartUploads(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, maxUploads) - if err != nil { - return lmi, err - } - - return minio.FromMinioClientListMultipartsInfo(result), nil -} - -// NewMultipartUpload upload object in multiple parts -func (l *s3Objects) NewMultipartUpload(ctx context.Context, bucket, object string, o minio.ObjectOptions) (result *minio.NewMultipartUploadResult, err error) { - var tagMap map[string]string - userDefined := minio.CloneMSS(o.UserDefined) - if tagStr, ok := userDefined[xhttp.AmzObjectTagging]; ok { - tagObj, err := tags.Parse(tagStr, true) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - tagMap = tagObj.ToMap() - delete(userDefined, xhttp.AmzObjectTagging) - } - // Create PutObject options - opts := miniogo.PutObjectOptions{ - UserMetadata: userDefined, - ServerSideEncryption: o.ServerSideEncryption, - UserTags: tagMap, - } - uploadID, err := l.Client.NewMultipartUpload(ctx, bucket, object, opts) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - return &minio.NewMultipartUploadResult{UploadID: uploadID}, nil -} - -// PutObjectPart puts a part of object in bucket -func (l *s3Objects) PutObjectPart(ctx context.Context, bucket string, object string, uploadID string, partID int, r *minio.PutObjReader, opts minio.ObjectOptions) (pi minio.PartInfo, e error) { - data := r.Reader - info, err := l.Client.PutObjectPart(ctx, bucket, object, uploadID, partID, data, data.Size(), data.MD5Base64String(), data.SHA256HexString(), opts.ServerSideEncryption) - if err != nil { - return pi, minio.ErrorRespToObjectError(err, bucket, object) - } - - return minio.FromMinioClientObjectPart(info), nil -} - -// CopyObjectPart creates a part in a multipart upload by copying -// existing object or a part of it. -func (l *s3Objects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject, uploadID string, - partID int, startOffset, length int64, srcInfo minio.ObjectInfo, srcOpts, dstOpts minio.ObjectOptions, -) (p minio.PartInfo, err error) { - if srcOpts.CheckPrecondFn != nil && srcOpts.CheckPrecondFn(srcInfo) { - return minio.PartInfo{}, minio.PreConditionFailed{} - } - userDefined := minio.CloneMSS(srcInfo.UserDefined) - userDefined["x-amz-copy-source-if-match"] = srcInfo.ETag - header := make(http.Header) - if srcOpts.ServerSideEncryption != nil { - encrypt.SSECopy(srcOpts.ServerSideEncryption).Marshal(header) - } - - if dstOpts.ServerSideEncryption != nil { - dstOpts.ServerSideEncryption.Marshal(header) - } - for k, v := range header { - userDefined[k] = v[0] - } - - completePart, err := l.Client.CopyObjectPart(ctx, srcBucket, srcObject, destBucket, destObject, - uploadID, partID, startOffset, length, userDefined) - if err != nil { - return p, minio.ErrorRespToObjectError(err, srcBucket, srcObject) - } - p.PartNumber = completePart.PartNumber - p.ETag = completePart.ETag - return p, nil -} - -// GetMultipartInfo returns multipart info of the uploadId of the object -func (l *s3Objects) GetMultipartInfo(ctx context.Context, bucket, object, uploadID string, opts minio.ObjectOptions) (result minio.MultipartInfo, err error) { - result.Bucket = bucket - result.Object = object - result.UploadID = uploadID - return result, nil -} - -// ListObjectParts returns all object parts for specified object in specified bucket -func (l *s3Objects) ListObjectParts(ctx context.Context, bucket string, object string, uploadID string, partNumberMarker int, maxParts int, opts minio.ObjectOptions) (lpi minio.ListPartsInfo, e error) { - result, err := l.Client.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts) - if err != nil { - return lpi, err - } - lpi = minio.FromMinioClientListPartsInfo(result) - if lpi.IsTruncated && maxParts > len(lpi.Parts) { - partNumberMarker = lpi.NextPartNumberMarker - for { - result, err = l.Client.ListObjectParts(ctx, bucket, object, uploadID, partNumberMarker, maxParts) - if err != nil { - return lpi, err - } - - nlpi := minio.FromMinioClientListPartsInfo(result) - - partNumberMarker = nlpi.NextPartNumberMarker - - lpi.Parts = append(lpi.Parts, nlpi.Parts...) - if !nlpi.IsTruncated { - break - } - } - } - return lpi, nil -} - -// AbortMultipartUpload aborts a ongoing multipart upload -func (l *s3Objects) AbortMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, opts minio.ObjectOptions) error { - err := l.Client.AbortMultipartUpload(ctx, bucket, object, uploadID) - return minio.ErrorRespToObjectError(err, bucket, object) -} - -// CompleteMultipartUpload completes ongoing multipart upload and finalizes object -func (l *s3Objects) CompleteMultipartUpload(ctx context.Context, bucket string, object string, uploadID string, uploadedParts []minio.CompletePart, opts minio.ObjectOptions) (oi minio.ObjectInfo, e error) { - etag, err := l.Client.CompleteMultipartUpload(ctx, bucket, object, uploadID, minio.ToMinioClientCompleteParts(uploadedParts), miniogo.PutObjectOptions{}) - if err != nil { - return oi, minio.ErrorRespToObjectError(err, bucket, object) - } - - return minio.ObjectInfo{Bucket: bucket, Name: object, ETag: strings.Trim(etag, "\"")}, nil -} - -// SetBucketPolicy sets policy on bucket -func (l *s3Objects) SetBucketPolicy(ctx context.Context, bucket string, bucketPolicy *policy.Policy) error { - data, err := json.Marshal(bucketPolicy) - if err != nil { - // This should not happen. - logger.LogIf(ctx, err) - return minio.ErrorRespToObjectError(err, bucket) - } - - if err := l.Client.SetBucketPolicy(ctx, bucket, string(data)); err != nil { - return minio.ErrorRespToObjectError(err, bucket) - } - - return nil -} - -// GetBucketPolicy will get policy on bucket -func (l *s3Objects) GetBucketPolicy(ctx context.Context, bucket string) (*policy.Policy, error) { - data, err := l.Client.GetBucketPolicy(ctx, bucket) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket) - } - - bucketPolicy, err := policy.ParseConfig(strings.NewReader(data), bucket) - return bucketPolicy, minio.ErrorRespToObjectError(err, bucket) -} - -// DeleteBucketPolicy deletes all policies on bucket -func (l *s3Objects) DeleteBucketPolicy(ctx context.Context, bucket string) error { - if err := l.Client.SetBucketPolicy(ctx, bucket, ""); err != nil { - return minio.ErrorRespToObjectError(err, bucket, "") - } - return nil -} - -// GetObjectTags gets the tags set on the object -func (l *s3Objects) GetObjectTags(ctx context.Context, bucket string, object string, opts minio.ObjectOptions) (*tags.Tags, error) { - var err error - if _, err = l.GetObjectInfo(ctx, bucket, object, opts); err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - - t, err := l.Client.GetObjectTagging(ctx, bucket, object, miniogo.GetObjectTaggingOptions{}) - if err != nil { - return nil, minio.ErrorRespToObjectError(err, bucket, object) - } - - return t, nil -} - -// PutObjectTags attaches the tags to the object -func (l *s3Objects) PutObjectTags(ctx context.Context, bucket, object string, tagStr string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - tagObj, err := tags.Parse(tagStr, true) - if err != nil { - return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) - } - if err = l.Client.PutObjectTagging(ctx, bucket, object, tagObj, miniogo.PutObjectTaggingOptions{VersionID: opts.VersionID}); err != nil { - return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) - } - - objInfo, err := l.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) - } - - return objInfo, nil -} - -// DeleteObjectTags removes the tags attached to the object -func (l *s3Objects) DeleteObjectTags(ctx context.Context, bucket, object string, opts minio.ObjectOptions) (minio.ObjectInfo, error) { - if err := l.Client.RemoveObjectTagging(ctx, bucket, object, miniogo.RemoveObjectTaggingOptions{}); err != nil { - return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) - } - objInfo, err := l.GetObjectInfo(ctx, bucket, object, opts) - if err != nil { - return minio.ObjectInfo{}, minio.ErrorRespToObjectError(err, bucket, object) - } - - return objInfo, nil -} - -// IsCompressionSupported returns whether compression is applicable for this layer. -func (l *s3Objects) IsCompressionSupported() bool { - return false -} - -// IsEncryptionSupported returns whether server side encryption is implemented for this layer. -func (l *s3Objects) IsEncryptionSupported() bool { - return minio.GlobalKMS != nil || minio.GlobalGatewaySSE.IsSet() -} - -func (l *s3Objects) IsTaggingSupported() bool { - return true -} diff --git a/cmd/gateway/s3/gateway-s3_test.go b/cmd/gateway/s3/gateway-s3_test.go deleted file mode 100644 index f8ceb275a..000000000 --- a/cmd/gateway/s3/gateway-s3_test.go +++ /dev/null @@ -1,124 +0,0 @@ -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package s3 - -import ( - "fmt" - "testing" - - miniogo "github.com/minio/minio-go/v7" - "github.com/minio/minio/internal/hash" - - minio "github.com/minio/minio/cmd" -) - -func errResponse(code string) miniogo.ErrorResponse { - return miniogo.ErrorResponse{ - Code: code, - } -} - -func TestS3ToObjectError(t *testing.T) { - testCases := []struct { - inputErr error - expectedErr error - bucket, object string - }{ - { - inputErr: errResponse("BucketAlreadyOwnedByYou"), - expectedErr: minio.BucketAlreadyOwnedByYou{}, - }, - { - inputErr: errResponse("BucketNotEmpty"), - expectedErr: minio.BucketNotEmpty{}, - }, - { - inputErr: errResponse("InvalidBucketName"), - expectedErr: minio.BucketNameInvalid{}, - }, - { - inputErr: errResponse("InvalidPart"), - expectedErr: minio.InvalidPart{}, - }, - { - inputErr: errResponse("NoSuchBucketPolicy"), - expectedErr: minio.BucketPolicyNotFound{}, - }, - { - inputErr: errResponse("NoSuchBucket"), - expectedErr: minio.BucketNotFound{}, - }, - // with empty Object in miniogo.ErrorRepsonse, NoSuchKey - // is interpreted as BucketNotFound - { - inputErr: errResponse("NoSuchKey"), - expectedErr: minio.BucketNotFound{}, - }, - { - inputErr: errResponse("NoSuchUpload"), - expectedErr: minio.InvalidUploadID{}, - }, - { - inputErr: errResponse("XMinioInvalidObjectName"), - expectedErr: minio.ObjectNameInvalid{}, - }, - { - inputErr: errResponse("AccessDenied"), - expectedErr: minio.PrefixAccessDenied{}, - }, - { - inputErr: errResponse("XAmzContentSHA256Mismatch"), - expectedErr: hash.SHA256Mismatch{}, - }, - { - inputErr: errResponse("EntityTooSmall"), - expectedErr: minio.PartTooSmall{}, - }, - { - inputErr: nil, - expectedErr: nil, - }, - // Special test case for NoSuchKey with object name - { - inputErr: miniogo.ErrorResponse{ - Code: "NoSuchKey", - }, - expectedErr: minio.ObjectNotFound{ - Bucket: "bucket", - Object: "object", - }, - bucket: "bucket", - object: "object", - }, - - // N B error values that aren't of expected types - // should be left untouched. - // Special test case for error that is not of type - // miniogo.ErrorResponse - { - inputErr: fmt.Errorf("not a ErrorResponse"), - expectedErr: fmt.Errorf("not a ErrorResponse"), - }, - } - - for i, tc := range testCases { - actualErr := minio.ErrorRespToObjectError(tc.inputErr, tc.bucket, tc.object) - if actualErr != nil && tc.expectedErr != nil && actualErr.Error() != tc.expectedErr.Error() { - t.Errorf("Test case %d: Expected error %v but received error %v", i+1, tc.expectedErr, actualErr) - } - } -} diff --git a/cmd/globals.go b/cmd/globals.go index 7de8a0567..c43fa0f47 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -79,7 +79,6 @@ const ( globalMinioModeErasureSD = "mode-server-xl-single" globalMinioModeErasure = "mode-server-xl" globalMinioModeDistErasure = "mode-server-distributed-xl" - globalMinioModeGatewayPrefix = "mode-gateway-" globalDirSuffix = "__XLDIR__" globalDirSuffixWithSlash = globalDirSuffix + slashSeparator @@ -147,15 +146,9 @@ var ( // Indicates if the running minio server is in single drive XL mode. globalIsErasureSD = false - // Indicates if the running minio is in gateway mode. - globalIsGateway = false - // Indicates if server code should go through testing path. globalIsTesting = false - // Name of gateway server, e.g S3, NAS etc - globalGatewayName = "" - // This flag is set to 'true' by default globalBrowserEnabled = true @@ -319,9 +312,6 @@ var ( // Deployment ID - unique per deployment globalDeploymentID string - // GlobalGatewaySSE sse options - GlobalGatewaySSE gatewaySSE - globalAllHealState *allHealState // The always present healing routine ready to heal objects diff --git a/cmd/healthcheck-handler.go b/cmd/healthcheck-handler.go index fc59f14dd..26e2fbd06 100644 --- a/cmd/healthcheck-handler.go +++ b/cmd/healthcheck-handler.go @@ -19,7 +19,6 @@ package cmd import ( "context" - "errors" "net/http" "strconv" @@ -34,11 +33,6 @@ func shouldProxy() bool { // ClusterCheckHandler returns if the server is ready for requests. func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) { - if globalIsGateway { - writeResponse(w, http.StatusOK, nil, mimeNone) - return - } - ctx := newContext(r, w, "ClusterCheckHandler") if shouldProxy() { @@ -77,11 +71,6 @@ func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) { // ClusterReadCheckHandler returns if the server is ready for requests. func ClusterReadCheckHandler(w http.ResponseWriter, r *http.Request) { - if globalIsGateway { - writeResponse(w, http.StatusOK, nil, mimeNone) - return - } - ctx := newContext(r, w, "ClusterReadCheckHandler") if shouldProxy() { @@ -116,49 +105,22 @@ func LivenessCheckHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set(xhttp.MinIOServerStatus, unavailable) } - if globalIsGateway { - objLayer := newObjectLayerFn() - if objLayer == nil { - apiErr := toAPIError(r.Context(), errServerNotInitialized) + if globalEtcdClient != nil { + // Borrowed from + // https://github.com/etcd-io/etcd/blob/main/etcdctl/ctlv3/command/ep_command.go#L118 + ctx, cancel := context.WithTimeout(r.Context(), defaultContextTimeout) + defer cancel() + if _, err := globalEtcdClient.Get(ctx, "health"); err != nil { + // etcd unreachable throw an error.. switch r.Method { case http.MethodHead: + apiErr := toAPIError(r.Context(), err) writeResponse(w, apiErr.HTTPStatusCode, nil, mimeNone) case http.MethodGet: - writeErrorResponse(r.Context(), w, apiErr, r.URL) + writeErrorResponse(r.Context(), w, toAPIError(r.Context(), err), r.URL) } return } - - storageInfo, _ := objLayer.StorageInfo(r.Context()) - if !storageInfo.Backend.GatewayOnline { - err := errors.New("gateway backend is not reachable") - apiErr := toAPIError(r.Context(), err) - switch r.Method { - case http.MethodHead: - writeResponse(w, apiErr.HTTPStatusCode, nil, mimeNone) - case http.MethodGet: - writeErrorResponse(r.Context(), w, apiErr, r.URL) - } - return - } - - if globalEtcdClient != nil { - // Borrowed from - // https://github.com/etcd-io/etcd/blob/main/etcdctl/ctlv3/command/ep_command.go#L118 - ctx, cancel := context.WithTimeout(r.Context(), defaultContextTimeout) - defer cancel() - if _, err := globalEtcdClient.Get(ctx, "health"); err != nil { - // etcd unreachable throw an error.. - switch r.Method { - case http.MethodHead: - apiErr := toAPIError(r.Context(), err) - writeResponse(w, apiErr.HTTPStatusCode, nil, mimeNone) - case http.MethodGet: - writeErrorResponse(r.Context(), w, toAPIError(r.Context(), err), r.URL) - } - return - } - } } writeResponse(w, http.StatusOK, nil, mimeNone) diff --git a/cmd/iam-dummy-store.go b/cmd/iam-dummy-store.go deleted file mode 100644 index 455ed17c1..000000000 --- a/cmd/iam-dummy-store.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "sync" -) - -type iamDummyStore struct { - sync.RWMutex - *iamCache - usersSysType UsersSysType -} - -func newIAMDummyStore(usersSysType UsersSysType) *iamDummyStore { - return &iamDummyStore{ - iamCache: newIamCache(), - usersSysType: usersSysType, - } -} - -func (ids *iamDummyStore) rlock() *iamCache { - ids.RLock() - return ids.iamCache -} - -func (ids *iamDummyStore) runlock() { - ids.RUnlock() -} - -func (ids *iamDummyStore) lock() *iamCache { - ids.Lock() - return ids.iamCache -} - -func (ids *iamDummyStore) unlock() { - ids.Unlock() -} - -func (ids *iamDummyStore) getUsersSysType() UsersSysType { - return ids.usersSysType -} - -func (ids *iamDummyStore) loadPolicyDoc(ctx context.Context, policy string, m map[string]PolicyDoc) error { - v, ok := ids.iamPolicyDocsMap[policy] - if !ok { - return errNoSuchPolicy - } - m[policy] = v - return nil -} - -func (ids *iamDummyStore) loadPolicyDocs(ctx context.Context, m map[string]PolicyDoc) error { - for k, v := range ids.iamPolicyDocsMap { - m[k] = v - } - return nil -} - -func (ids *iamDummyStore) loadUser(ctx context.Context, user string, userType IAMUserType, m map[string]UserIdentity) error { - u, ok := ids.iamUsersMap[user] - if !ok { - return errNoSuchUser - } - ids.iamUsersMap[user] = u - return nil -} - -func (ids *iamDummyStore) loadUsers(ctx context.Context, userType IAMUserType, m map[string]UserIdentity) error { - for k, v := range ids.iamUsersMap { - m[k] = v - } - return nil -} - -func (ids *iamDummyStore) loadGroup(ctx context.Context, group string, m map[string]GroupInfo) error { - g, ok := ids.iamGroupsMap[group] - if !ok { - return errNoSuchGroup - } - m[group] = g - return nil -} - -func (ids *iamDummyStore) loadGroups(ctx context.Context, m map[string]GroupInfo) error { - for k, v := range ids.iamGroupsMap { - m[k] = v - } - return nil -} - -func (ids *iamDummyStore) loadMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error { - if isGroup { - g, ok := ids.iamGroupPolicyMap[name] - if !ok { - return errNoSuchPolicy - } - m[name] = g - } else { - u, ok := ids.iamUserPolicyMap[name] - if !ok { - return errNoSuchPolicy - } - m[name] = u - } - return nil -} - -func (ids *iamDummyStore) loadMappedPolicies(ctx context.Context, userType IAMUserType, isGroup bool, m map[string]MappedPolicy) error { - if !isGroup { - for k, v := range ids.iamUserPolicyMap { - m[k] = v - } - } else { - for k, v := range ids.iamGroupPolicyMap { - m[k] = v - } - } - return nil -} - -func (ids *iamDummyStore) saveIAMConfig(ctx context.Context, item interface{}, path string, opts ...options) error { - return nil -} - -func (ids *iamDummyStore) loadIAMConfig(ctx context.Context, item interface{}, path string) error { - return nil -} - -func (ids *iamDummyStore) deleteIAMConfig(ctx context.Context, path string) error { - return nil -} - -func (ids *iamDummyStore) savePolicyDoc(ctx context.Context, policyName string, p PolicyDoc) error { - return nil -} - -func (ids *iamDummyStore) saveMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool, mp MappedPolicy, opts ...options) error { - return nil -} - -func (ids *iamDummyStore) saveUserIdentity(ctx context.Context, name string, userType IAMUserType, u UserIdentity, opts ...options) error { - return nil -} - -func (ids *iamDummyStore) saveGroupInfo(ctx context.Context, group string, gi GroupInfo) error { - return nil -} - -func (ids *iamDummyStore) deletePolicyDoc(ctx context.Context, policyName string) error { - return nil -} - -func (ids *iamDummyStore) deleteMappedPolicy(ctx context.Context, name string, userType IAMUserType, isGroup bool) error { - return nil -} - -func (ids *iamDummyStore) deleteUserIdentity(ctx context.Context, name string, userType IAMUserType) error { - return nil -} - -func (ids *iamDummyStore) deleteGroupInfo(ctx context.Context, name string) error { - return nil -} diff --git a/cmd/iam.go b/cmd/iam.go index e7096f65a..ddba60d69 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -168,17 +168,7 @@ func (sys *IAMSys) initStore(objAPI ObjectLayer, etcdClient *etcd.Client) { } if etcdClient == nil { - if globalIsGateway { - if globalGatewayName == NASBackendGateway { - sys.store = &IAMStoreSys{newIAMObjectStore(objAPI, sys.usersSysType)} - } else { - sys.store = &IAMStoreSys{newIAMDummyStore(sys.usersSysType)} - logger.Info("WARNING: %s gateway is running in-memory IAM store, for persistence please configure etcd", - globalGatewayName) - } - } else { - sys.store = &IAMStoreSys{newIAMObjectStore(objAPI, sys.usersSysType)} - } + sys.store = &IAMStoreSys{newIAMObjectStore(objAPI, sys.usersSysType)} } else { sys.store = &IAMStoreSys{newIAMEtcdStore(etcdClient, sys.usersSysType)} } @@ -224,7 +214,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc var err error globalOpenIDConfig, err = openid.LookupConfig(s, - NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region) + NewHTTPTransport(), xhttp.DrainBody, globalSite.Region) if err != nil { logger.LogIf(ctx, fmt.Errorf("Unable to initialize OpenID: %w", err)) } @@ -236,7 +226,7 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc } authNPluginCfg, err := idplugin.LookupConfig(s[config.IdentityPluginSubSys][config.Default], - NewGatewayHTTPTransport(), xhttp.DrainBody, globalSite.Region) + NewHTTPTransport(), xhttp.DrainBody, globalSite.Region) if err != nil { logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthNPlugin: %w", err)) } @@ -244,14 +234,14 @@ func (sys *IAMSys) Init(ctx context.Context, objAPI ObjectLayer, etcdClient *etc setGlobalAuthNPlugin(idplugin.New(authNPluginCfg)) authZPluginCfg, err := polplugin.LookupConfig(s[config.PolicyPluginSubSys][config.Default], - NewGatewayHTTPTransport(), xhttp.DrainBody) + NewHTTPTransport(), xhttp.DrainBody) if err != nil { logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin: %w", err)) } if authZPluginCfg.URL == nil { opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default], - NewGatewayHTTPTransport(), xhttp.DrainBody) + NewHTTPTransport(), xhttp.DrainBody) if err != nil { logger.LogIf(ctx, fmt.Errorf("Unable to initialize AuthZPlugin from legacy OPA config: %w", err)) } else { diff --git a/cmd/main.go b/cmd/main.go index e727d0914..c60912dd5 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -131,7 +131,6 @@ func newApp(name string) *cli.App { // Register all commands. registerCommand(serverCmd) - registerCommand(gatewayCmd) // Set up app. cli.HelpFlag = cli.BoolFlag{ diff --git a/cmd/metacache-manager.go b/cmd/metacache-manager.go index dc967f5e7..d23a8aa14 100644 --- a/cmd/metacache-manager.go +++ b/cmd/metacache-manager.go @@ -56,10 +56,6 @@ func (m *metacacheManager) initManager() { objAPI = newObjectLayerFn() } - if globalIsGateway { - return - } - t := time.NewTicker(time.Minute) defer t.Stop() diff --git a/cmd/metacache-walk.go b/cmd/metacache-walk.go index 699f73864..8b1c2a5ee 100644 --- a/cmd/metacache-walk.go +++ b/cmd/metacache-walk.go @@ -267,7 +267,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ if contextCanceled(ctx) { return ctx.Err() } - meta := metaCacheEntry{name: PathJoin(current, entry)} + meta := metaCacheEntry{name: pathJoin(current, entry)} // If directory entry on stack before this, pop it now. for len(dirStack) > 0 && dirStack[len(dirStack)-1] < meta.name { diff --git a/cmd/metrics-v2.go b/cmd/metrics-v2.go index 4821b0c53..c1793190a 100644 --- a/cmd/metrics-v2.go +++ b/cmd/metrics-v2.go @@ -1404,9 +1404,6 @@ func getMinioVersionMetrics() *MetricsGroup { func getNodeHealthMetrics() *MetricsGroup { mg := &MetricsGroup{} mg.RegisterRead(func(_ context.Context) (metrics []Metric) { - if globalIsGateway { - return - } metrics = make([]Metric, 0, 16) nodesUp, nodesDown := globalNotificationSys.GetPeerOnlineCount() metrics = append(metrics, Metric{ @@ -1426,9 +1423,6 @@ func getMinioHealingMetrics() *MetricsGroup { mg := &MetricsGroup{} mg.RegisterRead(func(_ context.Context) (metrics []Metric) { metrics = make([]Metric, 0, 5) - if globalIsGateway { - return - } bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) if !exists { return @@ -1672,7 +1666,7 @@ func getBucketUsageMetrics() *MetricsGroup { mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { objLayer := newObjectLayerFn() // Service not initialized yet - if objLayer == nil || globalIsGateway { + if objLayer == nil { return } @@ -1817,7 +1811,7 @@ func getClusterTierMetrics() *MetricsGroup { } mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { objLayer := newObjectLayerFn() - if objLayer == nil || globalIsGateway { + if objLayer == nil { return } if globalTierConfigMgr.Empty() { @@ -1845,7 +1839,7 @@ func getLocalStorageMetrics() *MetricsGroup { mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { objLayer := newObjectLayerFn() // Service not initialized yet - if objLayer == nil || globalIsGateway { + if objLayer == nil { return } @@ -1889,7 +1883,7 @@ func getLocalDiskStorageMetrics() *MetricsGroup { mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { objLayer := newObjectLayerFn() // Service not initialized yet - if objLayer == nil || globalIsGateway { + if objLayer == nil { return } @@ -1922,7 +1916,7 @@ func getClusterStorageMetrics() *MetricsGroup { mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { objLayer := newObjectLayerFn() // Service not initialized yet - if objLayer == nil || globalIsGateway { + if objLayer == nil { return } @@ -1979,7 +1973,7 @@ func getKMSNodeMetrics() *MetricsGroup { mg.RegisterRead(func(ctx context.Context) (metrics []Metric) { objLayer := newObjectLayerFn() // Service not initialized yet - if objLayer == nil || globalIsGateway || GlobalKMS == nil { + if objLayer == nil || GlobalKMS == nil { return } @@ -2017,7 +2011,7 @@ func getKMSMetrics() *MetricsGroup { mg.RegisterRead(func(ctx context.Context) []Metric { objLayer := newObjectLayerFn() // Service not initialized yet - if objLayer == nil || globalIsGateway || GlobalKMS == nil { + if objLayer == nil || GlobalKMS == nil { return []Metric{} } diff --git a/cmd/metrics.go b/cmd/metrics.go index 4e7423878..5d032ad4b 100644 --- a/cmd/metrics.go +++ b/cmd/metrics.go @@ -20,7 +20,6 @@ package cmd import ( "net/http" "strings" - "sync/atomic" "time" "github.com/minio/minio/internal/logger" @@ -55,7 +54,6 @@ var ( const ( healMetricsNamespace = "self_heal" - gatewayNamespace = "gateway" cacheNamespace = "cache" s3Namespace = "s3" bucketNamespace = "bucket" @@ -101,15 +99,10 @@ func (c *minioCollector) Collect(ch chan<- prometheus.Metric) { networkMetricsPrometheus(ch) httpMetricsPrometheus(ch) cacheMetricsPrometheus(ch) - gatewayMetricsPrometheus(ch) healingMetricsPrometheus(ch) } func nodeHealthMetricsPrometheus(ch chan<- prometheus.Metric) { - if globalIsGateway { - return - } - nodesUp, nodesDown := globalNotificationSys.GetPeerOnlineCount() ch <- prometheus.MustNewConstMetric( prometheus.NewDesc( @@ -132,9 +125,6 @@ func nodeHealthMetricsPrometheus(ch chan<- prometheus.Metric) { // collects healing specific metrics for MinIO instance in Prometheus specific format // and sends to given channel func healingMetricsPrometheus(ch chan<- prometheus.Metric) { - if globalIsGateway { - return - } bgSeq, exists := globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID) if !exists { return @@ -188,79 +178,6 @@ func healingMetricsPrometheus(ch chan<- prometheus.Metric) { } } -// collects gateway specific metrics for MinIO instance in Prometheus specific format -// and sends to given channel -func gatewayMetricsPrometheus(ch chan<- prometheus.Metric) { - if !globalIsGateway || (globalGatewayName != S3BackendGateway) { - return - } - - objLayer := newObjectLayerFn() - // Service not initialized yet - if objLayer == nil { - return - } - - m, err := objLayer.GetMetrics(GlobalContext) - if err != nil { - return - } - - ch <- prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(gatewayNamespace, globalGatewayName, "bytes_received"), - "Total number of bytes received by current MinIO Gateway "+globalGatewayName+" backend", - nil, nil), - prometheus.CounterValue, - float64(m.GetBytesReceived()), - ) - ch <- prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(gatewayNamespace, globalGatewayName, "bytes_sent"), - "Total number of bytes sent by current MinIO Gateway to "+globalGatewayName+" backend", - nil, nil), - prometheus.CounterValue, - float64(m.GetBytesSent()), - ) - s := m.GetRequests() - ch <- prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(gatewayNamespace, globalGatewayName, "requests"), - "Total number of requests made to "+globalGatewayName+" by current MinIO Gateway", - []string{"method"}, nil), - prometheus.CounterValue, - float64(atomic.LoadUint64(&s.Get)), - http.MethodGet, - ) - ch <- prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(gatewayNamespace, globalGatewayName, "requests"), - "Total number of requests made to "+globalGatewayName+" by current MinIO Gateway", - []string{"method"}, nil), - prometheus.CounterValue, - float64(atomic.LoadUint64(&s.Head)), - http.MethodHead, - ) - ch <- prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(gatewayNamespace, globalGatewayName, "requests"), - "Total number of requests made to "+globalGatewayName+" by current MinIO Gateway", - []string{"method"}, nil), - prometheus.CounterValue, - float64(atomic.LoadUint64(&s.Put)), - http.MethodPut, - ) - ch <- prometheus.MustNewConstMetric( - prometheus.NewDesc( - prometheus.BuildFQName(gatewayNamespace, globalGatewayName, "requests"), - "Total number of requests made to "+globalGatewayName+" by current MinIO Gateway", - []string{"method"}, nil), - prometheus.CounterValue, - float64(atomic.LoadUint64(&s.Post)), - http.MethodPost, - ) -} - // collects cache metrics for MinIO server in Prometheus specific format // and sends to given channel func cacheMetricsPrometheus(ch chan<- prometheus.Metric) { @@ -444,10 +361,6 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) { return } - if globalIsGateway { - return - } - dataUsageInfo, err := loadDataUsageFromBackend(GlobalContext, objLayer) if err != nil { return @@ -538,10 +451,6 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) { return } - if globalIsGateway { - return - } - server := getLocalServerProperty(globalEndpoints, &http.Request{ Host: globalLocalNodeName, }) diff --git a/cmd/notification-summary.go b/cmd/notification-summary.go index 57328ee4d..e257970fc 100644 --- a/cmd/notification-summary.go +++ b/cmd/notification-summary.go @@ -31,9 +31,6 @@ func GetTotalCapacity(diskInfo []madmin.Disk) (capacity uint64) { // GetTotalUsableCapacity gets the total usable capacity in the cluster. func GetTotalUsableCapacity(diskInfo []madmin.Disk, s StorageInfo) (capacity uint64) { - if globalIsGateway { - return 0 - } for _, disk := range diskInfo { // Ignore parity disks if disk.DiskIndex < s.Backend.StandardSCData[disk.PoolIndex] { @@ -53,10 +50,6 @@ func GetTotalCapacityFree(diskInfo []madmin.Disk) (capacity uint64) { // GetTotalUsableCapacityFree gets the total usable capacity free in the cluster. func GetTotalUsableCapacityFree(diskInfo []madmin.Disk, s StorageInfo) (capacity uint64) { - if globalIsGateway { - return 0 - } - for _, disk := range diskInfo { // Ignore parity disks if disk.DiskIndex < s.Backend.StandardSCData[disk.PoolIndex] { diff --git a/cmd/notification.go b/cmd/notification.go index 2e34f1c7d..cfbf2dc24 100644 --- a/cmd/notification.go +++ b/cmd/notification.go @@ -491,10 +491,6 @@ func (sys *NotificationSys) GetLocks(ctx context.Context, r *http.Request) []*Pe // LoadBucketMetadata - calls LoadBucketMetadata call on all peers func (sys *NotificationSys) LoadBucketMetadata(ctx context.Context, bucketName string) { - if globalIsGateway { - return - } - ng := WithNPeers(len(sys.peerClients)) for idx, client := range sys.peerClients { if client == nil { diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index 11e3eafab..fb3d387dd 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -18,12 +18,9 @@ package cmd import ( - "context" - "strings" "sync" "github.com/dustin/go-humanize" - "github.com/minio/minio/internal/sync/errgroup" ) const ( @@ -58,13 +55,6 @@ var globalObjectAPI ObjectLayer // Global cacheObjects, only accessed by newCacheObjectsFn(). var globalCacheObjectAPI CacheObjectLayer -// Checks if the object is a directory, this logic uses -// if size == 0 and object ends with SlashSeparator then -// returns true. -func isObjectDir(object string, size int64) bool { - return HasSuffix(object, SlashSeparator) && size == 0 -} - func newStorageAPIWithoutHealthCheck(endpoint Endpoint) (storage StorageAPI, err error) { if endpoint.IsLocal { storage, err := newXLStorage(endpoint) @@ -89,326 +79,3 @@ func newStorageAPI(endpoint Endpoint) (storage StorageAPI, err error) { return newStorageRESTClient(endpoint, true), nil } - -func listObjectsNonSlash(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) { - endWalkCh := make(chan struct{}) - defer close(endWalkCh) - recursive := true - walkResultCh := startTreeWalk(ctx, bucket, prefix, "", recursive, listDir, isLeaf, isLeafDir, endWalkCh) - - var objInfos []ObjectInfo - var eof bool - var prevPrefix string - - for { - if len(objInfos) == maxKeys { - break - } - result, ok := <-walkResultCh - if !ok { - eof = true - break - } - - var objInfo ObjectInfo - var err error - - index := strings.Index(strings.TrimPrefix(result.entry, prefix), delimiter) - if index == -1 { - objInfo, err = getObjInfo(ctx, bucket, result.entry) - if err != nil { - // Ignore errFileNotFound as the object might have got - // deleted in the interim period of listing and getObjectInfo(), - // ignore quorum error as it might be an entry from an outdated disk. - if IsErrIgnored(err, []error{ - errFileNotFound, - errErasureReadQuorum, - }...) { - continue - } - return loi, toObjectErr(err, bucket, prefix) - } - } else { - index = len(prefix) + index + len(delimiter) - currPrefix := result.entry[:index] - if currPrefix == prevPrefix { - continue - } - prevPrefix = currPrefix - - objInfo = ObjectInfo{ - Bucket: bucket, - Name: currPrefix, - IsDir: true, - } - } - - if objInfo.Name <= marker { - continue - } - - objInfos = append(objInfos, objInfo) - if result.end { - eof = true - break - } - } - - result := ListObjectsInfo{} - for _, objInfo := range objInfos { - if objInfo.IsDir { - result.Prefixes = append(result.Prefixes, objInfo.Name) - continue - } - result.Objects = append(result.Objects, objInfo) - } - - if !eof { - result.IsTruncated = true - if len(objInfos) > 0 { - result.NextMarker = objInfos[len(objInfos)-1].Name - } else if len(result.Prefixes) > 0 { - result.NextMarker = result.Prefixes[len(result.Prefixes)-1] - } - } - - return result, nil -} - -// Walk a bucket, optionally prefix recursively, until we have returned -// all the content to objectInfo channel, it is callers responsibility -// to allocate a receive channel for ObjectInfo, upon any unhandled -// error walker returns error. Optionally if context.Done() is received -// then Walk() stops the walker. -func fsWalk(ctx context.Context, obj ObjectLayer, bucket, prefix string, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, results chan<- ObjectInfo, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) error { - if err := checkListObjsArgs(ctx, bucket, prefix, "", obj); err != nil { - // Upon error close the channel. - close(results) - return err - } - - walkResultCh := startTreeWalk(ctx, bucket, prefix, "", true, listDir, isLeaf, isLeafDir, ctx.Done()) - - go func() { - defer close(results) - - for { - walkResult, ok := <-walkResultCh - if !ok { - break - } - - var objInfo ObjectInfo - var err error - if HasSuffix(walkResult.entry, SlashSeparator) { - for _, getObjectInfoDir := range getObjectInfoDirs { - objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry) - if err == nil { - break - } - if err == errFileNotFound { - err = nil - objInfo = ObjectInfo{ - Bucket: bucket, - Name: walkResult.entry, - IsDir: true, - } - } - } - } else { - objInfo, err = getObjInfo(ctx, bucket, walkResult.entry) - } - if err != nil { - continue - } - results <- objInfo - if walkResult.end { - break - } - } - }() - return nil -} - -func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) { - if delimiter != SlashSeparator && delimiter != "" { - return listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, isLeaf, isLeafDir, getObjInfo, getObjectInfoDirs...) - } - - if err := checkListObjsArgs(ctx, bucket, prefix, marker, obj); err != nil { - return loi, err - } - - // Marker is set validate pre-condition. - if marker != "" { - // Marker not common with prefix is not implemented. Send an empty response - if !HasPrefix(marker, prefix) { - return loi, nil - } - } - - // With max keys of zero we have reached eof, return right here. - if maxKeys == 0 { - return loi, nil - } - - if len(prefix) > 0 && maxKeys == 1 && delimiter == "" && marker == "" { - // Optimization for certain applications like - // - Cohesity - // - Actifio, Splunk etc. - // which send ListObjects requests where the actual object - // itself is the prefix and max-keys=1 in such scenarios - // we can simply verify locally if such an object exists - // to avoid the need for ListObjects(). - objInfo, err := obj.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true}) - if err == nil { - loi.Objects = append(loi.Objects, objInfo) - return loi, nil - } - } - - // For delimiter and prefix as '/' we do not list anything at all - // since according to s3 spec we stop at the 'delimiter' - // along // with the prefix. On a flat namespace with 'prefix' - // as '/' we don't have any entries, since all the keys are - // of form 'keyName/...' - if delimiter == SlashSeparator && prefix == SlashSeparator { - return loi, nil - } - - // Over flowing count - reset to maxObjectList. - if maxKeys < 0 || maxKeys > maxObjectList { - maxKeys = maxObjectList - } - - // Default is recursive, if delimiter is set then list non recursive. - recursive := true - if delimiter == SlashSeparator { - recursive = false - } - - walkResultCh, endWalkCh := tpool.Release(listParams{bucket, recursive, marker, prefix}) - if walkResultCh == nil { - endWalkCh = make(chan struct{}) - walkResultCh = startTreeWalk(ctx, bucket, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh) - } - - var eof bool - var nextMarker string - - maxConcurrent := maxKeys / 10 - if maxConcurrent == 0 { - maxConcurrent = maxKeys - } - - // List until maxKeys requested. - g := errgroup.WithNErrs(maxKeys).WithConcurrency(maxConcurrent) - - objInfoFound := make([]*ObjectInfo, maxKeys) - var i int - for i = 0; i < maxKeys; i++ { - i := i - walkResult, ok := <-walkResultCh - if !ok { - if HasSuffix(prefix, SlashSeparator) { - objInfo, err := obj.GetObjectInfo(ctx, bucket, prefix, ObjectOptions{NoLock: true}) - if err == nil { - loi.Objects = append(loi.Objects, objInfo) - return loi, nil - } - } - // Closed channel. - eof = true - break - } - - if HasSuffix(walkResult.entry, SlashSeparator) { - g.Go(func() error { - for _, getObjectInfoDir := range getObjectInfoDirs { - objInfo, err := getObjectInfoDir(ctx, bucket, walkResult.entry) - if err == nil { - objInfoFound[i] = &objInfo - // Done... - return nil - } - - // Add temp, may be overridden, - if err == errFileNotFound { - objInfoFound[i] = &ObjectInfo{ - Bucket: bucket, - Name: walkResult.entry, - IsDir: true, - } - continue - } - return toObjectErr(err, bucket, prefix) - } - return nil - }, i) - } else { - g.Go(func() error { - objInfo, err := getObjInfo(ctx, bucket, walkResult.entry) - if err != nil { - // Ignore errFileNotFound as the object might have got - // deleted in the interim period of listing and getObjectInfo(), - // ignore quorum error as it might be an entry from an outdated disk. - if IsErrIgnored(err, []error{ - errFileNotFound, - errErasureReadQuorum, - }...) { - return nil - } - return toObjectErr(err, bucket, prefix) - } - objInfoFound[i] = &objInfo - return nil - }, i) - } - - if walkResult.end { - eof = true - break - } - } - for _, err := range g.Wait() { - if err != nil { - return loi, err - } - } - // Copy found objects - objInfos := make([]ObjectInfo, 0, i+1) - for _, objInfo := range objInfoFound { - if objInfo == nil { - continue - } - objInfos = append(objInfos, *objInfo) - nextMarker = objInfo.Name - } - - // Save list routine for the next marker if we haven't reached EOF. - params := listParams{bucket, recursive, nextMarker, prefix} - if !eof { - tpool.Set(params, walkResultCh, endWalkCh) - } - - result := ListObjectsInfo{} - for _, objInfo := range objInfos { - if objInfo.IsDir && delimiter == SlashSeparator { - result.Prefixes = append(result.Prefixes, objInfo.Name) - continue - } - result.Objects = append(result.Objects, objInfo) - } - - if !eof { - result.IsTruncated = true - if len(objInfos) > 0 { - result.NextMarker = objInfos[len(objInfos)-1].Name - } else if len(result.Prefixes) > 0 { - result.NextMarker = result.Prefixes[len(result.Prefixes)-1] - } - } - - // Success. - return result, nil -} diff --git a/cmd/object-api-datatypes.go b/cmd/object-api-datatypes.go index e585c407e..d6c46585b 100644 --- a/cmd/object-api-datatypes.go +++ b/cmd/object-api-datatypes.go @@ -18,7 +18,6 @@ package cmd import ( - "encoding/base64" "io" "math" "time" @@ -39,8 +38,6 @@ const ( BackendFS = BackendType(madmin.FS) // Multi disk BackendErasure (single, distributed) backend. BackendErasure = BackendType(madmin.Erasure) - // Gateway backend. - BackendGateway = BackendType(madmin.Gateway) // Add your own backend. ) @@ -104,9 +101,6 @@ type ObjectInfo struct { // Hex encoded unique entity tag of the object. ETag string - // The ETag stored in the gateway backend - InnerETag string - // Version ID of this object. VersionID string @@ -196,14 +190,6 @@ func (o ObjectInfo) ArchiveInfo() []byte { if !ok { return nil } - if len(z) > 0 && z[0] >= 32 { - // FS/gateway mode does base64 encoding on roundtrip. - // zipindex has version as first byte, which is below any base64 value. - zipInfo, _ := base64.StdEncoding.DecodeString(z) - if len(zipInfo) != 0 { - return zipInfo - } - } return []byte(z) } @@ -216,7 +202,6 @@ func (o ObjectInfo) Clone() (cinfo ObjectInfo) { Size: o.Size, IsDir: o.IsDir, ETag: o.ETag, - InnerETag: o.InnerETag, VersionID: o.VersionID, IsLatest: o.IsLatest, DeleteMarker: o.DeleteMarker, diff --git a/cmd/object-api-errors.go b/cmd/object-api-errors.go index 5f895b4ac..4f6205cd3 100644 --- a/cmd/object-api-errors.go +++ b/cmd/object-api-errors.go @@ -642,6 +642,15 @@ func (e InvalidETag) Error() string { return "etag of the object has changed" } +// BackendDown is returned for network errors +type BackendDown struct { + Err string +} + +func (e BackendDown) Error() string { + return e.Err +} + // NotImplemented If a feature is not implemented type NotImplemented struct { Message string @@ -658,15 +667,6 @@ func (e UnsupportedMetadata) Error() string { return "Unsupported headers in Metadata" } -// BackendDown is returned for network errors or if the gateway's backend is down. -type BackendDown struct { - Err string -} - -func (e BackendDown) Error() string { - return e.Err -} - // isErrBucketNotFound - Check if error type is BucketNotFound. func isErrBucketNotFound(err error) bool { var bkNotFound BucketNotFound diff --git a/cmd/object-api-interface.go b/cmd/object-api-interface.go index 700bad891..9b3d6329c 100644 --- a/cmd/object-api-interface.go +++ b/cmd/object-api-interface.go @@ -27,7 +27,6 @@ import ( "github.com/minio/minio-go/v7/pkg/encrypt" "github.com/minio/minio-go/v7/pkg/tags" "github.com/minio/minio/internal/hash" - "github.com/minio/pkg/bucket/policy" "github.com/minio/minio/internal/bucket/replication" xioutil "github.com/minio/minio/internal/ioutil" @@ -183,13 +182,6 @@ const ( writeLock ) -// BackendMetrics - represents bytes served from backend -type BackendMetrics struct { - bytesReceived uint64 - bytesSent uint64 - requestStats RequestStats -} - // ObjectLayer implements primitives for object API layer. type ObjectLayer interface { // Locking operations on object. @@ -241,11 +233,6 @@ type ObjectLayer interface { AbortMultipartUpload(ctx context.Context, bucket, object, uploadID string, opts ObjectOptions) error CompleteMultipartUpload(ctx context.Context, bucket, object, uploadID string, uploadedParts []CompletePart, opts ObjectOptions) (objInfo ObjectInfo, err error) - // Policy operations - SetBucketPolicy(context.Context, string, *policy.Policy) error - GetBucketPolicy(context.Context, string) (*policy.Policy, error) - DeleteBucketPolicy(context.Context, string) error - // Supported operations check IsNotificationSupported() bool IsListenSupported() bool @@ -260,9 +247,6 @@ type ObjectLayer interface { HealObject(ctx context.Context, bucket, object, versionID string, opts madmin.HealOpts) (madmin.HealResultItem, error) HealObjects(ctx context.Context, bucket, prefix string, opts madmin.HealOpts, fn HealObjectFn) error - // Backend related metrics - GetMetrics(ctx context.Context) (*BackendMetrics, error) - // Returns health of the backend Health(ctx context.Context, opts HealthOptions) HealthResult ReadHealth(ctx context.Context) bool diff --git a/cmd/object-api-options.go b/cmd/object-api-options.go index 43ac62cc7..bd379d476 100644 --- a/cmd/object-api-options.go +++ b/cmd/object-api-options.go @@ -33,7 +33,6 @@ import ( "github.com/minio/minio/internal/logger" ) -// set encryption options for pass through to backend in the case of gateway and UserDefined metadata func getDefaultOpts(header http.Header, copySource bool, metadata map[string]string) (opts ObjectOptions, err error) { var clientKey [32]byte var sse encrypt.ServerSide @@ -81,10 +80,7 @@ func getDefaultOpts(header http.Header, copySource bool, metadata map[string]str // get ObjectOptions for GET calls from encryption headers func getOpts(ctx context.Context, r *http.Request, bucket, object string) (ObjectOptions, error) { - var ( - encryption encrypt.ServerSide - opts ObjectOptions - ) + var opts ObjectOptions var partNumber int var err error @@ -111,21 +107,6 @@ func getOpts(ctx context.Context, r *http.Request, bucket, object string) (Objec } } - if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { - key, err := crypto.SSEC.ParseHTTP(r.Header) - if err != nil { - return opts, err - } - derivedKey := deriveClientKey(key, bucket, object) - encryption, err = encrypt.NewSSEC(derivedKey[:]) - logger.CriticalIf(ctx, err) - return ObjectOptions{ - ServerSideEncryption: encryption, - VersionID: vid, - PartNumber: partNumber, - }, nil - } - deletePrefix := false if d := r.Header.Get(xhttp.MinIOForceDelete); d != "" { if b, err := strconv.ParseBool(d); err == nil { @@ -304,8 +285,6 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada metadata = make(map[string]string) } - etag := strings.TrimSpace(r.Header.Get(xhttp.MinIOSourceETag)) - wantCRC, err := hash.GetContentChecksum(r) if err != nil { return opts, InvalidArgument{ @@ -315,29 +294,6 @@ func putOpts(ctx context.Context, r *http.Request, bucket, object string, metada } } - // In the case of multipart custom format, the metadata needs to be checked in addition to header to see if it - // is SSE-S3 encrypted, primarily because S3 protocol does not require SSE-S3 headers in PutObjectPart calls - if GlobalGatewaySSE.SSES3() && (crypto.S3.IsRequested(r.Header) || crypto.S3.IsEncrypted(metadata)) { - return ObjectOptions{ - ServerSideEncryption: encrypt.NewSSE(), - UserDefined: metadata, - VersionID: vid, - Versioned: versioned, - VersionSuspended: versionSuspended, - MTime: mtime, - PreserveETag: etag, - WantChecksum: wantCRC, - }, nil - } - if GlobalGatewaySSE.SSEC() && crypto.SSEC.IsRequested(r.Header) { - opts, err = getOpts(ctx, r, bucket, object) - opts.VersionID = vid - opts.Versioned = versioned - opts.VersionSuspended = versionSuspended - opts.UserDefined = metadata - opts.WantChecksum = wantCRC - return - } if crypto.S3KMS.IsRequested(r.Header) { keyID, context, err := crypto.S3KMS.ParseHTTP(r.Header) if err != nil { @@ -381,23 +337,7 @@ func copyDstOpts(ctx context.Context, r *http.Request, bucket, object string, me // get ObjectOptions for Copy calls with encryption headers provided on the source side func copySrcOpts(ctx context.Context, r *http.Request, bucket, object string) (ObjectOptions, error) { - var ( - ssec encrypt.ServerSide - opts ObjectOptions - ) - - if GlobalGatewaySSE.SSEC() && crypto.SSECopy.IsRequested(r.Header) { - key, err := crypto.SSECopy.ParseHTTP(r.Header) - if err != nil { - return opts, err - } - derivedKey := deriveClientKey(key, bucket, object) - ssec, err = encrypt.NewSSEC(derivedKey[:]) - if err != nil { - return opts, err - } - return ObjectOptions{ServerSideEncryption: encrypt.SSECopy(ssec)}, nil - } + var opts ObjectOptions // default case of passing encryption headers to backend opts, err := getDefaultOpts(r.Header, false, nil) diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index ede145d4e..6891d95eb 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -940,20 +940,6 @@ func sealETagFn(key crypto.ObjectKey) SealMD5CurrFn { return fn } -// CleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal -// encryption metadata that was sent by minio gateway -func CleanMinioInternalMetadataKeys(metadata map[string]string) map[string]string { - newMeta := make(map[string]string, len(metadata)) - for k, v := range metadata { - if strings.HasPrefix(k, "X-Amz-Meta-X-Minio-Internal-") { - newMeta[strings.TrimPrefix(k, "X-Amz-Meta-")] = v - } else { - newMeta[k] = v - } - } - return newMeta -} - // compressOpts are the options for writing compressed data. var compressOpts []s2.WriterOption diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 3c47d4076..b4fbd157e 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -130,7 +130,6 @@ func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r return } - // get gateway encryption options opts, err := getOpts(ctx, r, bucket, object) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -335,7 +334,6 @@ func (api objectAPIHandlers) getObjectHandler(ctx context.Context, objectAPI Obj return } - // get gateway encryption options opts, err := getOpts(ctx, r, bucket, object) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -1020,16 +1018,9 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re } if crypto.Requested(r.Header) { - if globalIsGateway { - if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - } else { - if !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } + if !objectAPI.IsEncryptionSupported() { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) + return } } @@ -1102,7 +1093,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re sseConfig, _ := globalBucketSSEConfigSys.Get(dstBucket) sseConfig.Apply(r.Header, sse.ApplyOptions{ AutoEncrypt: globalAutoEncryption, - Passthrough: globalIsGateway && globalGatewayName == S3BackendGateway, }) var srcOpts, dstOpts ObjectOptions @@ -1405,9 +1395,6 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if globalIsGateway { - srcInfo.UserDefined[xhttp.AmzTagDirective] = replaceDirective - } } if objTags != "" { @@ -1645,16 +1632,9 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req } if crypto.Requested(r.Header) { - if globalIsGateway { - if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - } else { - if !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } + if !objectAPI.IsEncryptionSupported() { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) + return } } @@ -1790,7 +1770,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req sseConfig, _ := globalBucketSSEConfigSys.Get(bucket) sseConfig.Apply(r.Header, sse.ApplyOptions{ AutoEncrypt: globalAutoEncryption, - Passthrough: globalIsGateway && globalGatewayName == S3BackendGateway, }) actualSize := size @@ -1805,7 +1784,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if err = actualReader.AddChecksum(r, globalIsGateway); err != nil { + if err = actualReader.AddChecksum(r, false); err != nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) return } @@ -1826,7 +1805,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if err := hashReader.AddChecksum(r, size < 0 || globalIsGateway); err != nil { + if err := hashReader.AddChecksum(r, size < 0); err != nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) return } @@ -1834,7 +1813,6 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req rawReader := hashReader pReader := NewPutObjReader(rawReader) - // get gateway encryption options var opts ObjectOptions opts, err = putOpts(ctx, r, bucket, object, metadata) if err != nil { @@ -2025,16 +2003,9 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h } if crypto.Requested(r.Header) { - if globalIsGateway { - if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - } else { - if !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } + if !objectAPI.IsEncryptionSupported() { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) + return } } @@ -2139,7 +2110,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if err = hreader.AddChecksum(r, globalIsGateway); err != nil { + if err = hreader.AddChecksum(r, false); err != nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) return } @@ -2153,7 +2124,6 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h sseConfig, _ := globalBucketSSEConfigSys.Get(bucket) sseConfig.Apply(r.Header, sse.ApplyOptions{ AutoEncrypt: globalAutoEncryption, - Passthrough: globalIsGateway && globalGatewayName == S3BackendGateway, }) retPerms := isPutActionAllowed(ctx, getRequestAuthType(r), bucket, object, r, iampolicy.PutObjectRetentionAction) diff --git a/cmd/object-multipart-handlers.go b/cmd/object-multipart-handlers.go index d215c4255..5c499b6b2 100644 --- a/cmd/object-multipart-handlers.go +++ b/cmd/object-multipart-handlers.go @@ -68,16 +68,9 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r } if crypto.Requested(r.Header) { - if globalIsGateway { - if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - } else { - if !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } + if !objectAPI.IsEncryptionSupported() { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) + return } } @@ -98,7 +91,6 @@ func (api objectAPIHandlers) NewMultipartUploadHandler(w http.ResponseWriter, r sseConfig, _ := globalBucketSSEConfigSys.Get(bucket) sseConfig.Apply(r.Header, sse.ApplyOptions{ AutoEncrypt: globalAutoEncryption, - Passthrough: globalIsGateway && globalGatewayName == S3BackendGateway, }) // Validate storage class metadata if present @@ -245,16 +237,9 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http } if crypto.Requested(r.Header) { - if globalIsGateway { - if crypto.SSEC.IsRequested(r.Header) && !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - } else { - if !objectAPI.IsEncryptionSupported() { - writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } + if !objectAPI.IsEncryptionSupported() { + writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) + return } } @@ -390,7 +375,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if err = actualReader.AddChecksum(r, globalIsGateway); err != nil { + if err = actualReader.AddChecksum(r, false); err != nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) return } @@ -411,7 +396,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } - if err := hashReader.AddChecksum(r, size < 0 || globalIsGateway); err != nil { + if err := hashReader.AddChecksum(r, size < 0); err != nil { writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidChecksum), r.URL) return } diff --git a/cmd/s3-zip-handlers.go b/cmd/s3-zip-handlers.go index ffcafac6c..e7375ba95 100644 --- a/cmd/s3-zip-handlers.go +++ b/cmd/s3-zip-handlers.go @@ -20,7 +20,6 @@ package cmd import ( "bytes" "context" - "encoding/base64" "errors" "fmt" "io" @@ -78,7 +77,6 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context, return } - // get gateway encryption options opts, err := getOpts(ctx, r, bucket, zipPath) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -501,34 +499,20 @@ func updateObjectMetadataWithZipInfo(ctx context.Context, objectAPI ObjectLayer, } srcInfo.UserDefined[archiveTypeMetadataKey] = archiveType - var zipInfoStr string - if globalIsGateway { - zipInfoStr = base64.StdEncoding.EncodeToString(zipInfo) - } else { - zipInfoStr = string(zipInfo) + zipInfoStr := string(zipInfo) + popts := ObjectOptions{ + MTime: srcInfo.ModTime, + VersionID: srcInfo.VersionID, + EvalMetadataFn: func(oi ObjectInfo) error { + oi.UserDefined[archiveTypeMetadataKey] = archiveType + oi.UserDefined[archiveInfoMetadataKey] = zipInfoStr + return nil + }, } - if globalIsGateway { - srcInfo.UserDefined[archiveInfoMetadataKey] = zipInfoStr - - // Use CopyObject API only for Gateway mode. - if _, err = objectAPI.CopyObject(ctx, bucket, object, bucket, object, srcInfo, opts, opts); err != nil { - return nil, err - } - } else { - popts := ObjectOptions{ - MTime: srcInfo.ModTime, - VersionID: srcInfo.VersionID, - EvalMetadataFn: func(oi ObjectInfo) error { - oi.UserDefined[archiveTypeMetadataKey] = archiveType - oi.UserDefined[archiveInfoMetadataKey] = zipInfoStr - return nil - }, - } - // For all other modes use in-place update to update metadata on a specific version. - if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil { - return nil, err - } + // For all other modes use in-place update to update metadata on a specific version. + if _, err = objectAPI.PutObjectMetadata(ctx, bucket, object, popts); err != nil { + return nil, err } return zipInfo, nil diff --git a/cmd/server-main.go b/cmd/server-main.go index 83ff74518..7ad8ac511 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -657,7 +657,7 @@ func serverMain(ctx *cli.Context) { // initialize the new disk cache objects. if globalCacheConfig.Enabled { - logger.Info(color.Yellow("WARNING: Drive caching is deprecated for single/multi drive MinIO setups. Please migrate to using MinIO S3 gateway instead of drive caching")) + logger.Info(color.Yellow("WARNING: Drive caching is deprecated for single/multi drive MinIO setups.")) var cacheAPI CacheObjectLayer cacheAPI, err = newServerCacheObjects(GlobalContext, globalCacheConfig) logger.FatalIf(err, "Unable to initialize drive caching") @@ -705,17 +705,5 @@ func serverMain(ctx *cli.Context) { // Initialize object layer with the supplied disks, objectLayer is nil upon any error. func newObjectLayer(ctx context.Context, endpointServerPools EndpointServerPools) (newObject ObjectLayer, err error) { - // For FS only, directly use the disk. - if endpointServerPools.NEndpoints() == 1 { - // Initialize new FS object layer. - newObject, err = NewFSObjectLayer(ctx, endpointServerPools[0].Endpoints[0].Path) - if err == nil { - return newObject, nil - } - if err != nil && err != errFreshDisk { - return newObject, err - } - } - return newErasureServerPools(ctx, endpointServerPools) } diff --git a/cmd/setup-type.go b/cmd/setup-type.go index c540f2ecc..1da83da72 100644 --- a/cmd/setup-type.go +++ b/cmd/setup-type.go @@ -35,9 +35,6 @@ const ( // DistErasureSetupType - Distributed Erasure setup type enum. DistErasureSetupType - - // GatewaySetupType - gateway setup type enum. - GatewaySetupType ) func (setupType SetupType) String() string { @@ -50,8 +47,6 @@ func (setupType SetupType) String() string { return globalMinioModeErasure case DistErasureSetupType: return globalMinioModeDistErasure - case GatewaySetupType: - return globalMinioModeGatewayPrefix } return "unknown" diff --git a/cmd/signals.go b/cmd/signals.go index 6606fc240..ae58a9266 100644 --- a/cmd/signals.go +++ b/cmd/signals.go @@ -79,15 +79,11 @@ func handleSignals() { logger.LogIf(context.Background(), err) exit(stopProcess()) case osSignal := <-globalOSSignalCh: - if !globalIsGateway { - globalReplicationPool.SaveState(context.Background()) - } + globalReplicationPool.SaveState(context.Background()) logger.Info("Exiting on signal: %s", strings.ToUpper(osSignal.String())) exit(stopProcess()) case signal := <-globalServiceSignalCh: - if !globalIsGateway { - globalReplicationPool.SaveState(context.Background()) - } + globalReplicationPool.SaveState(context.Background()) switch signal { case serviceRestart: logger.Info("Restarting on service signal") diff --git a/cmd/signature-v4-utils.go b/cmd/signature-v4-utils.go index c090f1d2d..fea7420d6 100644 --- a/cmd/signature-v4-utils.go +++ b/cmd/signature-v4-utils.go @@ -141,7 +141,7 @@ func isValidRegion(reqRegion string, confRegion string) bool { // check if the access key is valid and recognized, additionally // also returns if the access key is owner/admin. func checkKeyValid(r *http.Request, accessKey string) (auth.Credentials, bool, APIErrorCode) { - if !globalIAMSys.Initialized() && !globalIsGateway { + if !globalIAMSys.Initialized() { // Check if server has initialized, then only proceed // to check for IAM users otherwise its okay for clients // to retry with 503 errors when server is coming up. diff --git a/cmd/storage-errors.go b/cmd/storage-errors.go index dfbdc854c..7686a29b6 100644 --- a/cmd/storage-errors.go +++ b/cmd/storage-errors.go @@ -103,9 +103,6 @@ var errBitrotHashAlgoInvalid = StorageErr("bit-rot hash algorithm is invalid") // errCrossDeviceLink - rename across devices not allowed. var errCrossDeviceLink = StorageErr("Rename across devices not allowed, please fix your backend configuration") -// errMinDiskSize - cannot create volume or files when disk size is less than threshold. -var errMinDiskSize = StorageErr("The drive size is less than 900MiB threshold") - // errLessData - returned when less data available than what was requested. var errLessData = StorageErr("less data available than what was requested") @@ -119,9 +116,6 @@ var errDoneForNow = errors.New("done for now") // to proceed to next entry. var errSkipFile = errors.New("skip this file") -// Returned by FS drive mode when a fresh disk is specified. -var errFreshDisk = errors.New("FS backend requires existing drive") - // errXLBackend XL drive mode requires fresh deployment. var errXLBackend = errors.New("XL backend requires fresh drive") diff --git a/cmd/tier-handlers.go b/cmd/tier-handlers.go index 068d9c9ba..4708f9b00 100644 --- a/cmd/tier-handlers.go +++ b/cmd/tier-handlers.go @@ -74,11 +74,6 @@ func (api adminAPIHandlers) AddTierHandler(w http.ResponseWriter, r *http.Reques defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - objAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SetTierAction) if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) @@ -132,11 +127,6 @@ func (api adminAPIHandlers) ListTierHandler(w http.ResponseWriter, r *http.Reque defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - objAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListTierAction) if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) @@ -158,11 +148,6 @@ func (api adminAPIHandlers) EditTierHandler(w http.ResponseWriter, r *http.Reque defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - objAPI, cred := validateAdminReq(ctx, w, r, iampolicy.SetTierAction) if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) @@ -210,11 +195,6 @@ func (api adminAPIHandlers) RemoveTierHandler(w http.ResponseWriter, r *http.Req defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - objAPI, _ := validateAdminReq(ctx, w, r, iampolicy.SetTierAction) if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) @@ -247,11 +227,6 @@ func (api adminAPIHandlers) VerifyTierHandler(w http.ResponseWriter, r *http.Req defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - objAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListTierAction) if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) @@ -273,11 +248,6 @@ func (api adminAPIHandlers) TierStatsHandler(w http.ResponseWriter, r *http.Requ defer logger.AuditLog(ctx, w, r, mustGetClaimsFromToken(r)) - if globalIsGateway { - writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL) - return - } - objAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListTierAction) if objAPI == nil || globalNotificationSys == nil || globalTierConfigMgr == nil { writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL) diff --git a/cmd/tier.go b/cmd/tier.go index 8ead2de46..27cf4972b 100644 --- a/cmd/tier.go +++ b/cmd/tier.go @@ -415,10 +415,5 @@ func (config *TierConfigMgr) Reset() { // Init initializes tier configuration reading from objAPI func (config *TierConfigMgr) Init(ctx context.Context, objAPI ObjectLayer) error { - // In gateway mode, we don't support ILM tier configuration. - if globalIsGateway { - return nil - } - return config.Reload(ctx, objAPI) } diff --git a/cmd/tree-walk.go b/cmd/tree-walk.go deleted file mode 100644 index 1636705b5..000000000 --- a/cmd/tree-walk.go +++ /dev/null @@ -1,296 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "sort" - "strings" -) - -// TreeWalkResult - Tree walk result carries results of tree walking. -type TreeWalkResult struct { - entry string - isEmptyDir bool - end bool -} - -// Return entries that have prefix prefixEntry. -// The supplied entries are modified and the returned string is a subslice of entries. -func filterMatchingPrefix(entries []string, prefixEntry string) []string { - if len(entries) == 0 || prefixEntry == "" { - return entries - } - // Write to the beginning of entries. - dst := entries[:0] - for _, s := range entries { - if !HasPrefix(s, prefixEntry) { - continue - } - dst = append(dst, s) - } - return dst -} - -// xl.ListDir returns entries with trailing "/" for directories. At the object layer -// we need to remove this trailing "/" for objects and retain "/" for prefixes before -// sorting because the trailing "/" can affect the sorting results for certain cases. -// Ex. lets say entries = ["a-b/", "a/"] and both are objects. -// -// sorting with out trailing "/" = ["a", "a-b"] -// sorting with trailing "/" = ["a-b/", "a/"] -// -// Hence if entries[] does not have a case like the above example then isLeaf() check -// can be delayed till the entry is pushed into the TreeWalkResult channel. -// delayIsLeafCheck() returns true if isLeaf can be delayed or false if -// isLeaf should be done in listDir() -func delayIsLeafCheck(entries []string) bool { - for i, entry := range entries { - if HasSuffix(entry, globalDirSuffixWithSlash) { - return false - } - if i == len(entries)-1 { - break - } - // If any byte in the "entry" string is less than '/' then the - // next "entry" should not contain '/' at the same same byte position. - for j := 0; j < len(entry); j++ { - if entry[j] < '/' { - if len(entries[i+1]) > j { - if entries[i+1][j] == '/' { - return false - } - } - } - } - } - return true -} - -// ListDirFunc - "listDir" function of type listDirFunc returned by listDirFactory() - explained below. -type ListDirFunc func(bucket, prefixDir, prefixEntry string) (emptyDir bool, entries []string, delayIsLeaf bool) - -// IsLeafFunc - A function isLeaf of type isLeafFunc is used to detect if an -// entry is a leaf entry. There are 2 scenarios where isLeaf should behave -// differently depending on the backend: -// 1. FS backend object listing - isLeaf is true if the entry -// has no trailing "/" -// 2. Erasure backend object listing - isLeaf is true if the entry -// is a directory and contains xl.meta -type IsLeafFunc func(string, string) bool - -// IsLeafDirFunc - A function isLeafDir of type isLeafDirFunc is used to detect -// if an entry is empty directory. -type IsLeafDirFunc func(string, string) bool - -func filterListEntries(bucket, prefixDir string, entries []string, prefixEntry string, isLeaf IsLeafFunc) ([]string, bool) { - // Filter entries that have the prefix prefixEntry. - entries = filterMatchingPrefix(entries, prefixEntry) - - // Listing needs to be sorted. - sort.Slice(entries, func(i, j int) bool { - if !HasSuffix(entries[i], globalDirSuffixWithSlash) && !HasSuffix(entries[j], globalDirSuffixWithSlash) { - return entries[i] < entries[j] - } - first := entries[i] - second := entries[j] - if HasSuffix(first, globalDirSuffixWithSlash) { - first = strings.TrimSuffix(first, globalDirSuffixWithSlash) + slashSeparator - } - if HasSuffix(second, globalDirSuffixWithSlash) { - second = strings.TrimSuffix(second, globalDirSuffixWithSlash) + slashSeparator - } - return first < second - }) - - // Can isLeaf() check be delayed till when it has to be sent down the - // TreeWalkResult channel? - delayIsLeaf := delayIsLeafCheck(entries) - if delayIsLeaf { - return entries, true - } - - // isLeaf() check has to happen here so that trailing "/" for objects can be removed. - for i, entry := range entries { - if isLeaf(bucket, pathJoin(prefixDir, entry)) { - entries[i] = strings.TrimSuffix(entry, slashSeparator) - } - } - - // Sort again after removing trailing "/" for objects as the previous sort - // does not hold good anymore. - sort.Slice(entries, func(i, j int) bool { - if !HasSuffix(entries[i], globalDirSuffix) && !HasSuffix(entries[j], globalDirSuffix) { - return entries[i] < entries[j] - } - first := entries[i] - second := entries[j] - if HasSuffix(first, globalDirSuffix) { - first = strings.TrimSuffix(first, globalDirSuffix) + slashSeparator - } - if HasSuffix(second, globalDirSuffix) { - second = strings.TrimSuffix(second, globalDirSuffix) + slashSeparator - } - if first == second { - return HasSuffix(entries[i], globalDirSuffix) - } - return first < second - }) - return entries, false -} - -// treeWalk walks directory tree recursively pushing TreeWalkResult into the channel as and when it encounters files. -func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker string, recursive bool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, resultCh chan TreeWalkResult, endWalkCh <-chan struct{}, isEnd bool) (emptyDir bool, treeErr error) { - // Example: - // if prefixDir="one/two/three/" and marker="four/five.txt" treeWalk is recursively - // called with prefixDir="one/two/three/four/" and marker="five.txt" - - var markerBase, markerDir string - if marker != "" { - // Ex: if marker="four/five.txt", markerDir="four/" markerBase="five.txt" - markerSplit := strings.SplitN(marker, SlashSeparator, 2) - markerDir = markerSplit[0] - if len(markerSplit) == 2 { - markerDir += SlashSeparator - markerBase = markerSplit[1] - } - } - - emptyDir, entries, delayIsLeaf := listDir(bucket, prefixDir, entryPrefixMatch) - // When isleaf check is delayed, make sure that it is set correctly here. - if delayIsLeaf && isLeaf == nil || isLeafDir == nil { - return false, errInvalidArgument - } - - // For an empty list return right here. - if emptyDir { - return true, nil - } - - // example: - // If markerDir="four/" Search() returns the index of "four/" in the sorted - // entries list so we skip all the entries till "four/" - idx := sort.Search(len(entries), func(i int) bool { - return entries[i] >= markerDir - }) - entries = entries[idx:] - // For an empty list after search through the entries, return right here. - if len(entries) == 0 { - return false, nil - } - - for i, entry := range entries { - var leaf, leafDir bool - - // Decision to do isLeaf check was pushed from listDir() to here. - if delayIsLeaf { - leaf = isLeaf(bucket, pathJoin(prefixDir, entry)) - if leaf { - entry = strings.TrimSuffix(entry, slashSeparator) - } - } else { - leaf = !HasSuffix(entry, slashSeparator) - } - - if HasSuffix(entry, slashSeparator) { - leafDir = isLeafDir(bucket, pathJoin(prefixDir, entry)) - } - - isDir := !leafDir && !leaf - - if i == 0 && markerDir == entry { - if !recursive { - // Skip as the marker would already be listed in the previous listing. - continue - } - if recursive && !isDir { - // We should not skip for recursive listing and if markerDir is a directory - // for ex. if marker is "four/five.txt" markerDir will be "four/" which - // should not be skipped, instead it will need to be treeWalk()'ed into. - - // Skip if it is a file though as it would be listed in previous listing. - continue - } - } - if recursive && isDir { - // If the entry is a directory, we will need recurse into it. - markerArg := "" - if entry == markerDir { - // We need to pass "five.txt" as marker only if we are - // recursing into "four/" - markerArg = markerBase - } - prefixMatch := "" // Valid only for first level treeWalk and empty for subdirectories. - // markIsEnd is passed to this entry's treeWalk() so that treeWalker.end can be marked - // true at the end of the treeWalk stream. - markIsEnd := i == len(entries)-1 && isEnd - emptyDir, err := doTreeWalk(ctx, bucket, pathJoin(prefixDir, entry), prefixMatch, markerArg, recursive, - listDir, isLeaf, isLeafDir, resultCh, endWalkCh, markIsEnd) - if err != nil { - return false, err - } - - // A nil totalFound means this is an empty directory that - // needs to be sent to the result channel, otherwise continue - // to the next entry. - if !emptyDir { - continue - } - } - - // EOF is set if we are at last entry and the caller indicated we at the end. - isEOF := ((i == len(entries)-1) && isEnd) - select { - case <-endWalkCh: - return false, errWalkAbort - case resultCh <- TreeWalkResult{entry: pathJoin(prefixDir, entry), isEmptyDir: leafDir, end: isEOF}: - } - } - - // Everything is listed. - return false, nil -} - -// Initiate a new treeWalk in a goroutine. -func startTreeWalk(ctx context.Context, bucket, prefix, marker string, recursive bool, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc, endWalkCh <-chan struct{}) chan TreeWalkResult { - // Example 1 - // If prefix is "one/two/three/" and marker is "one/two/three/four/five.txt" - // treeWalk is called with prefixDir="one/two/three/" and marker="four/five.txt" - // and entryPrefixMatch="" - - // Example 2 - // if prefix is "one/two/th" and marker is "one/two/three/four/five.txt" - // treeWalk is called with prefixDir="one/two/" and marker="three/four/five.txt" - // and entryPrefixMatch="th" - - resultCh := make(chan TreeWalkResult, maxObjectList) - entryPrefixMatch := prefix - prefixDir := "" - lastIndex := strings.LastIndex(prefix, SlashSeparator) - if lastIndex != -1 { - entryPrefixMatch = prefix[lastIndex+1:] - prefixDir = prefix[:lastIndex+1] - } - marker = strings.TrimPrefix(marker, prefixDir) - go func() { - isEnd := true // Indication to start walking the tree with end as true. - doTreeWalk(ctx, bucket, prefixDir, entryPrefixMatch, marker, recursive, listDir, isLeaf, isLeafDir, resultCh, endWalkCh, isEnd) - close(resultCh) - }() - return resultCh -} diff --git a/cmd/tree-walk_test.go b/cmd/tree-walk_test.go deleted file mode 100644 index 11cf5462f..000000000 --- a/cmd/tree-walk_test.go +++ /dev/null @@ -1,494 +0,0 @@ -// Copyright (c) 2015-2021 MinIO, Inc. -// -// This file is part of MinIO Object Storage stack -// -// This program is free software: you can redistribute it and/or modify -// it under the terms of the GNU Affero General Public License as published by -// the Free Software Foundation, either version 3 of the License, or -// (at your option) any later version. -// -// This program is distributed in the hope that it will be useful -// but WITHOUT ANY WARRANTY; without even the implied warranty of -// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -// GNU Affero General Public License for more details. -// -// You should have received a copy of the GNU Affero General Public License -// along with this program. If not, see . - -package cmd - -import ( - "context" - "fmt" - "reflect" - "sort" - "strings" - "testing" - "time" -) - -// Fixed volume name that could be used across tests -const volume = "testvolume" - -// Test for filterMatchingPrefix. -func TestFilterMatchingPrefix(t *testing.T) { - entries := []string{"a", "aab", "ab", "abbbb", "zzz"} - testCases := []struct { - prefixEntry string - result []string - }{ - { - // Empty prefix should match all entries. - "", - []string{"a", "aab", "ab", "abbbb", "zzz"}, - }, - { - "a", - []string{"a", "aab", "ab", "abbbb"}, - }, - { - "aa", - []string{"aab"}, - }, - { - // Does not match any of the entries. - "c", - []string{}, - }, - } - for i, testCase := range testCases { - expected := testCase.result - got := filterMatchingPrefix(entries, testCase.prefixEntry) - if !reflect.DeepEqual(expected, got) { - t.Errorf("Test %d : expected %v, got %v", i+1, expected, got) - } - } -} - -// Helper function that creates a volume and files in it. -func createNamespace(disk StorageAPI, volume string, files []string) error { - // Make a volume. - err := disk.MakeVol(context.Background(), volume) - if err != nil { - return err - } - - // Create files. - for _, file := range files { - err = disk.AppendFile(context.Background(), volume, file, []byte{}) - if err != nil { - return err - } - } - return err -} - -// Returns function "listDir" of the type listDirFunc. -// disks - used for doing disk.ListDir() -func listDirFactory(ctx context.Context, disk StorageAPI, isLeaf IsLeafFunc) ListDirFunc { - return func(volume, dirPath, dirEntry string) (emptyDir bool, entries []string, delayIsLeaf bool) { - entries, err := disk.ListDir(ctx, volume, dirPath, -1) - if err != nil { - return false, nil, false - } - if len(entries) == 0 { - return true, nil, false - } - entries, delayIsLeaf = filterListEntries(volume, dirPath, entries, dirEntry, isLeaf) - return false, entries, delayIsLeaf - } -} - -// Test if tree walker returns entries matching prefix alone are received -// when a non empty prefix is supplied. -func testTreeWalkPrefix(t *testing.T, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc) { - // Start the tree walk go-routine. - prefix := "d/" - endWalkCh := make(chan struct{}) - twResultCh := startTreeWalk(context.Background(), volume, prefix, "", true, listDir, isLeaf, isLeafDir, endWalkCh) - - // Check if all entries received on the channel match the prefix. - for res := range twResultCh { - if !HasPrefix(res.entry, prefix) { - t.Errorf("Entry %s doesn't match prefix %s", res.entry, prefix) - } - } -} - -// Test if entries received on tree walk's channel appear after the supplied marker. -func testTreeWalkMarker(t *testing.T, listDir ListDirFunc, isLeaf IsLeafFunc, isLeafDir IsLeafDirFunc) { - // Start the tree walk go-routine. - prefix := "" - endWalkCh := make(chan struct{}) - twResultCh := startTreeWalk(context.Background(), volume, prefix, "d/g", true, listDir, isLeaf, isLeafDir, endWalkCh) - - // Check if only 3 entries, namely d/g/h, i/j/k, lmn are received on the channel. - expectedCount := 3 - actualCount := 0 - for range twResultCh { - actualCount++ - } - if expectedCount != actualCount { - t.Errorf("Expected %d entries, actual no. of entries were %d", expectedCount, actualCount) - } -} - -// Test tree-walk. -func TestTreeWalk(t *testing.T) { - fsDir := t.TempDir() - - endpoints := mustGetNewEndpoints(fsDir) - disk, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Fatalf("Unable to create StorageAPI: %s", err) - } - - files := []string{ - "d/e", - "d/f", - "d/g/h", - "i/j/k", - "lmn", - } - err = createNamespace(disk, volume, files) - if err != nil { - t.Fatal(err) - } - - isLeaf := func(bucket, leafPath string) bool { - return !strings.HasSuffix(leafPath, slashSeparator) - } - - isLeafDir := func(bucket, leafPath string) bool { - entries, _ := disk.ListDir(context.Background(), bucket, leafPath, 1) - return len(entries) == 0 - } - - listDir := listDirFactory(context.Background(), disk, isLeaf) - - // Simple test for prefix based walk. - testTreeWalkPrefix(t, listDir, isLeaf, isLeafDir) - - // Simple test when marker is set. - testTreeWalkMarker(t, listDir, isLeaf, isLeafDir) -} - -// Test if tree walk go-routine exits cleanly if tree walk is aborted because of timeout. -func TestTreeWalkTimeout(t *testing.T) { - fsDir := t.TempDir() - endpoints := mustGetNewEndpoints(fsDir) - disk, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Fatalf("Unable to create StorageAPI: %s", err) - } - var myfiles []string - // Create maxObjectsList+1 number of entries. - for i := 0; i < maxObjectList+1; i++ { - myfiles = append(myfiles, fmt.Sprintf("file.%d", i)) - } - err = createNamespace(disk, volume, myfiles) - if err != nil { - t.Fatal(err) - } - - isLeaf := func(bucket, leafPath string) bool { - return !strings.HasSuffix(leafPath, slashSeparator) - } - - isLeafDir := func(bucket, leafPath string) bool { - entries, _ := disk.ListDir(context.Background(), bucket, leafPath, 1) - return len(entries) == 0 - } - - listDir := listDirFactory(context.Background(), disk, isLeaf) - - // TreeWalk pool with 2 seconds timeout for tree-walk go routines. - pool := NewTreeWalkPool(2 * time.Second) - - endWalkCh := make(chan struct{}) - prefix := "" - marker := "" - recursive := true - resultCh := startTreeWalk(context.Background(), volume, prefix, marker, recursive, listDir, isLeaf, isLeafDir, endWalkCh) - - params := listParams{ - bucket: volume, - recursive: recursive, - } - // Add Treewalk to the pool. - pool.Set(params, resultCh, endWalkCh) - - // Wait for the Treewalk to timeout. - <-time.After(3 * time.Second) - - // Read maxObjectList number of entries from the channel. - // maxObjectsList number of entries would have been filled into the resultCh - // buffered channel. After the timeout resultCh would get closed and hence the - // maxObjectsList+1 entry would not be sent in the channel. - i := 0 - for range resultCh { - i++ - if i == maxObjectList { - break - } - } - - // The last entry will not be received as the Treewalk goroutine would have exited. - _, ok := <-resultCh - if ok { - t.Error("Tree-walk go routine has not exited after timeout.") - } -} - -// TestRecursiveWalk - tests if treeWalk returns entries correctly with and -// without recursively traversing prefixes. -func TestRecursiveTreeWalk(t *testing.T) { - // Create a backend directories fsDir1. - fsDir1 := t.TempDir() - - endpoints := mustGetNewEndpoints(fsDir1) - disk1, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Fatalf("Unable to create StorageAPI: %s", err) - } - - isLeaf := func(bucket, leafPath string) bool { - return !strings.HasSuffix(leafPath, slashSeparator) - } - - isLeafDir := func(bucket, leafPath string) bool { - entries, _ := disk1.ListDir(context.Background(), bucket, leafPath, 1) - return len(entries) == 0 - } - - // Create listDir function. - listDir := listDirFactory(context.Background(), disk1, isLeaf) - - // Create the namespace. - files := []string{ - "d/e", - "d/f", - "d/g/h", - "i/j/k", - "lmn", - } - err = createNamespace(disk1, volume, files) - if err != nil { - t.Fatal(err) - } - - endWalkCh := make(chan struct{}) - testCases := []struct { - prefix string - marker string - recursive bool - expected map[string]struct{} - }{ - // with no prefix, no marker and no recursive traversal - {"", "", false, map[string]struct{}{ - "d/": {}, - "i/": {}, - "lmn": {}, - }}, - // with no prefix, no marker and recursive traversal - {"", "", true, map[string]struct{}{ - "d/f": {}, - "d/g/h": {}, - "d/e": {}, - "i/j/k": {}, - "lmn": {}, - }}, - // with no prefix, marker and no recursive traversal - {"", "d/e", false, map[string]struct{}{ - "d/f": {}, - "d/g/": {}, - "i/": {}, - "lmn": {}, - }}, - // with no prefix, marker and recursive traversal - {"", "d/e", true, map[string]struct{}{ - "d/f": {}, - "d/g/h": {}, - "i/j/k": {}, - "lmn": {}, - }}, - // with prefix, no marker and no recursive traversal - {"d/", "", false, map[string]struct{}{ - "d/e": {}, - "d/f": {}, - "d/g/": {}, - }}, - // with prefix, no marker and no recursive traversal - {"d/", "", true, map[string]struct{}{ - "d/e": {}, - "d/f": {}, - "d/g/h": {}, - }}, - // with prefix, marker and no recursive traversal - {"d/", "d/e", false, map[string]struct{}{ - "d/f": {}, - "d/g/": {}, - }}, - // with prefix, marker and recursive traversal - {"d/", "d/e", true, map[string]struct{}{ - "d/f": {}, - "d/g/h": {}, - }}, - } - for i, testCase := range testCases { - testCase := testCase - t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) { - for entry := range startTreeWalk(context.Background(), volume, - testCase.prefix, testCase.marker, testCase.recursive, - listDir, isLeaf, isLeafDir, endWalkCh) { - if _, found := testCase.expected[entry.entry]; !found { - t.Errorf("Expected %s, but couldn't find", entry.entry) - } - } - }) - } -} - -func TestSortedness(t *testing.T) { - // Create a backend directories fsDir1. - fsDir1 := t.TempDir() - - endpoints := mustGetNewEndpoints(fsDir1) - disk1, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Fatalf("Unable to create StorageAPI: %s", err) - } - - isLeaf := func(bucket, leafPath string) bool { - return !strings.HasSuffix(leafPath, slashSeparator) - } - - isLeafDir := func(bucket, leafPath string) bool { - entries, _ := disk1.ListDir(context.Background(), bucket, leafPath, 1) - return len(entries) == 0 - } - - // Create listDir function. - listDir := listDirFactory(context.Background(), disk1, isLeaf) - - // Create the namespace. - files := []string{ - "d/e", - "d/f", - "d/g/h", - "i/j/k", - "lmn", - } - err = createNamespace(disk1, volume, files) - if err != nil { - t.Fatal(err) - } - - endWalkCh := make(chan struct{}) - testCases := []struct { - prefix string - marker string - recursive bool - }{ - // with no prefix, no marker and no recursive traversal - {"", "", false}, - // with no prefix, no marker and recursive traversal - {"", "", true}, - // with no prefix, marker and no recursive traversal - {"", "d/e", false}, - // with no prefix, marker and recursive traversal - {"", "d/e", true}, - // with prefix, no marker and no recursive traversal - {"d/", "", false}, - // with prefix, no marker and no recursive traversal - {"d/", "", true}, - // with prefix, marker and no recursive traversal - {"d/", "d/e", false}, - // with prefix, marker and recursive traversal - {"d/", "d/e", true}, - } - for i, test := range testCases { - var actualEntries []string - for entry := range startTreeWalk(context.Background(), volume, - test.prefix, test.marker, test.recursive, - listDir, isLeaf, isLeafDir, endWalkCh) { - actualEntries = append(actualEntries, entry.entry) - } - if !sort.IsSorted(sort.StringSlice(actualEntries)) { - t.Error(i+1, "Expected entries to be sort, but it wasn't") - } - } -} - -func TestTreeWalkIsEnd(t *testing.T) { - // Create a backend directories fsDir1. - fsDir1 := t.TempDir() - - endpoints := mustGetNewEndpoints(fsDir1) - disk1, err := newStorageAPI(endpoints[0]) - if err != nil { - t.Fatalf("Unable to create StorageAPI: %s", err) - } - - isLeaf := func(bucket, leafPath string) bool { - return !strings.HasSuffix(leafPath, slashSeparator) - } - - isLeafDir := func(bucket, leafPath string) bool { - entries, _ := disk1.ListDir(context.Background(), bucket, leafPath, 1) - return len(entries) == 0 - } - - // Create listDir function. - listDir := listDirFactory(context.Background(), disk1, isLeaf) - - // Create the namespace. - files := []string{ - "d/e", - "d/f", - "d/g/h", - "i/j/k", - "lmn", - } - err = createNamespace(disk1, volume, files) - if err != nil { - t.Fatal(err) - } - - endWalkCh := make(chan struct{}) - testCases := []struct { - prefix string - marker string - recursive bool - expectedEntry string - }{ - // with no prefix, no marker and no recursive traversal - {"", "", false, "lmn"}, - // with no prefix, no marker and recursive traversal - {"", "", true, "lmn"}, - // with no prefix, marker and no recursive traversal - {"", "d/e", false, "lmn"}, - // with no prefix, marker and recursive traversal - {"", "d/e", true, "lmn"}, - // with prefix, no marker and no recursive traversal - {"d/", "", false, "d/g/"}, - // with prefix, no marker and no recursive traversal - {"d/", "", true, "d/g/h"}, - // with prefix, marker and no recursive traversal - {"d/", "d/e", false, "d/g/"}, - // with prefix, marker and recursive traversal - {"d/", "d/e", true, "d/g/h"}, - } - for i, test := range testCases { - var entry TreeWalkResult - for entry = range startTreeWalk(context.Background(), volume, test.prefix, - test.marker, test.recursive, listDir, isLeaf, isLeafDir, endWalkCh) { - } - if entry.entry != test.expectedEntry { - t.Errorf("Test %d: Expected entry %s, but received %s with the EOF marker", i, test.expectedEntry, entry.entry) - } - if !entry.end { - t.Errorf("Test %d: Last entry %s, doesn't have EOF marker set", i, entry.entry) - } - } -} diff --git a/cmd/utils.go b/cmd/utils.go index dc9866130..97f6b5e9e 100644 --- a/cmd/utils.go +++ b/cmd/utils.go @@ -46,18 +46,21 @@ import ( "github.com/felixge/fgprof" "github.com/gorilla/mux" "github.com/minio/madmin-go" + "github.com/minio/minio-go/v7" miniogopolicy "github.com/minio/minio-go/v7/pkg/policy" "github.com/minio/minio/internal/config" "github.com/minio/minio/internal/config/api" xtls "github.com/minio/minio/internal/config/identity/tls" "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/handlers" + "github.com/minio/minio/internal/hash" xhttp "github.com/minio/minio/internal/http" ioutilx "github.com/minio/minio/internal/ioutil" "github.com/minio/minio/internal/logger" "github.com/minio/minio/internal/logger/message/audit" "github.com/minio/pkg/certs" "github.com/minio/pkg/env" + xnet "github.com/minio/pkg/net" "golang.org/x/oauth2" ) @@ -87,6 +90,79 @@ func IsErr(err error, errs ...error) bool { return false } +// ErrorRespToObjectError converts MinIO errors to minio object layer errors. +func ErrorRespToObjectError(err error, params ...string) error { + if err == nil { + return nil + } + + bucket := "" + object := "" + if len(params) >= 1 { + bucket = params[0] + } + if len(params) == 2 { + object = params[1] + } + + if xnet.IsNetworkOrHostDown(err, false) { + return BackendDown{Err: err.Error()} + } + + minioErr, ok := err.(minio.ErrorResponse) + if !ok { + // We don't interpret non MinIO errors. As minio errors will + // have StatusCode to help to convert to object errors. + return err + } + + switch minioErr.Code { + case "PreconditionFailed": + err = PreConditionFailed{} + case "InvalidRange": + err = InvalidRange{} + case "BucketAlreadyOwnedByYou": + err = BucketAlreadyOwnedByYou{} + case "BucketNotEmpty": + err = BucketNotEmpty{} + case "NoSuchBucketPolicy": + err = BucketPolicyNotFound{} + case "NoSuchLifecycleConfiguration": + err = BucketLifecycleNotFound{} + case "InvalidBucketName": + err = BucketNameInvalid{Bucket: bucket} + case "InvalidPart": + err = InvalidPart{} + case "NoSuchBucket": + err = BucketNotFound{Bucket: bucket} + case "NoSuchKey": + if object != "" { + err = ObjectNotFound{Bucket: bucket, Object: object} + } else { + err = BucketNotFound{Bucket: bucket} + } + case "XMinioInvalidObjectName": + err = ObjectNameInvalid{} + case "AccessDenied": + err = PrefixAccessDenied{ + Bucket: bucket, + Object: object, + } + case "XAmzContentSHA256Mismatch": + err = hash.SHA256Mismatch{} + case "NoSuchUpload": + err = InvalidUploadID{} + case "EntityTooSmall": + err = PartTooSmall{} + } + + switch minioErr.StatusCode { + case http.StatusMethodNotAllowed: + err = toObjectErr(errMethodNotAllowed, bucket, object) + } + return err +} + // returns 'true' if either string has space in the // - beginning of a string // OR @@ -120,9 +196,6 @@ func path2BucketObject(s string) (bucket, prefix string) { return path2BucketObjectWithBasePath("", s) } -// CloneMSS is an exposed function of cloneMSS for gateway usage. -var CloneMSS = cloneMSS - // cloneMSS will clone a map[string]string. // If input is nil an empty map is returned, not nil. func cloneMSS(v map[string]string) map[string]string { @@ -182,9 +255,6 @@ const ( // Maximum Part ID for multipart upload is 10000 // (Acceptable values range from 1 to 10000 inclusive) globalMaxPartID = 10000 - - // Default values used while communicating for gateway communication - defaultDialTimeout = 5 * time.Second ) // isMaxObjectSize - verify if max object size @@ -615,10 +685,10 @@ func newCustomHTTPTransport(tlsConfig *tls.Config, dialTimeout time.Duration) fu } } -// NewGatewayHTTPTransportWithClientCerts returns a new http configuration +// NewHTTPTransportWithClientCerts returns a new http configuration // used while communicating with the cloud backends. -func NewGatewayHTTPTransportWithClientCerts(clientCert, clientKey string) *http.Transport { - transport := newGatewayHTTPTransport(1 * time.Minute) +func NewHTTPTransportWithClientCerts(clientCert, clientKey string) *http.Transport { + transport := newHTTPTransport(1 * time.Minute) if clientCert != "" && clientKey != "" { ctx, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() @@ -636,19 +706,22 @@ func NewGatewayHTTPTransportWithClientCerts(clientCert, clientKey string) *http. return transport } -// NewGatewayHTTPTransport returns a new http configuration +// NewHTTPTransport returns a new http configuration // used while communicating with the cloud backends. -func NewGatewayHTTPTransport() *http.Transport { - return newGatewayHTTPTransport(1 * time.Minute) +func NewHTTPTransport() *http.Transport { + return newHTTPTransport(1 * time.Minute) } -func newGatewayHTTPTransport(timeout time.Duration) *http.Transport { +// Default values for dial timeout +const defaultDialTimeout = 5 * time.Second + +func newHTTPTransport(timeout time.Duration) *http.Transport { tr := newCustomHTTPTransport(&tls.Config{ RootCAs: globalRootCAs, ClientSessionCache: tls.NewLRUClientSessionCache(tlsClientSessionCacheSize), }, defaultDialTimeout)() - // Customize response header timeout for gateway transport. + // Customize response header timeout tr.ResponseHeaderTimeout = timeout return tr } @@ -735,6 +808,20 @@ func ceilFrac(numerator, denominator int64) (ceil int64) { return } +// cleanMinioInternalMetadataKeys removes X-Amz-Meta- prefix from minio internal +// encryption metadata. +func cleanMinioInternalMetadataKeys(metadata map[string]string) map[string]string { + newMeta := make(map[string]string, len(metadata)) + for k, v := range metadata { + if strings.HasPrefix(k, "X-Amz-Meta-X-Minio-Internal-") { + newMeta[strings.TrimPrefix(k, "X-Amz-Meta-")] = v + } else { + newMeta[k] = v + } + } + return newMeta +} + // pathClean is like path.Clean but does not return "." for // empty inputs, instead returns "empty" as is. func pathClean(p string) string { @@ -893,8 +980,6 @@ func getMinioMode() string { mode = globalMinioModeDistErasure } else if globalIsErasure { mode = globalMinioModeErasure - } else if globalIsGateway { - mode = globalMinioModeGatewayPrefix + globalGatewayName } else if globalIsErasureSD { mode = globalMinioModeErasureSD } diff --git a/cmd/utils_test.go b/cmd/utils_test.go index 2a0861882..463f12285 100644 --- a/cmd/utils_test.go +++ b/cmd/utils_test.go @@ -18,13 +18,11 @@ package cmd import ( - "bytes" "encoding/json" "errors" "fmt" "net/http" "net/url" - "os" "reflect" "strings" "testing" @@ -324,51 +322,6 @@ func TestContains(t *testing.T) { } } -// Test jsonLoad. -func TestJSONLoad(t *testing.T) { - format := newFormatFSV1() - b, err := json.Marshal(format) - if err != nil { - t.Fatal(err) - } - var gotFormat formatFSV1 - if err = jsonLoad(bytes.NewReader(b), &gotFormat); err != nil { - t.Fatal(err) - } - if *format != gotFormat { - t.Fatal("jsonLoad() failed to decode json") - } -} - -// Test jsonSave. -func TestJSONSave(t *testing.T) { - f, err := os.CreateTemp("", "") - if err != nil { - t.Fatal(err) - } - defer os.Remove(f.Name()) - - // Test to make sure formatFSSave overwrites and does not append. - format := newFormatFSV1() - if err = jsonSave(f, format); err != nil { - t.Fatal(err) - } - fi1, err := f.Stat() - if err != nil { - t.Fatal(err) - } - if err = jsonSave(f, format); err != nil { - t.Fatal(err) - } - fi2, err := f.Stat() - if err != nil { - t.Fatal(err) - } - if fi1.Size() != fi2.Size() { - t.Fatal("Size should not differs after jsonSave()", fi1.Size(), fi2.Size(), f.Name()) - } -} - // Test ceilFrac func TestCeilFrac(t *testing.T) { cases := []struct { @@ -480,9 +433,6 @@ func TestGetMinioMode(t *testing.T) { globalIsDistErasure, globalIsErasure = false, false testMinioMode(globalMinioModeFS) - - globalIsGateway, globalGatewayName = true, "azure" - testMinioMode(globalMinioModeGatewayPrefix + globalGatewayName) } func TestTimedValue(t *testing.T) { diff --git a/cmd/warm-backend-minio.go b/cmd/warm-backend-minio.go index 8207b37f4..30f9914bc 100644 --- a/cmd/warm-backend-minio.go +++ b/cmd/warm-backend-minio.go @@ -42,7 +42,7 @@ func newWarmBackendMinIO(conf madmin.TierMinIO) (*warmBackendMinIO, error) { creds := credentials.NewStaticV4(conf.AccessKey, conf.SecretKey, "") getRemoteTierTargetInstanceTransportOnce.Do(func() { - getRemoteTierTargetInstanceTransport = newGatewayHTTPTransport(10 * time.Minute) + getRemoteTierTargetInstanceTransport = newHTTPTransport(10 * time.Minute) }) opts := &minio.Options{ Creds: creds, diff --git a/cmd/warm-backend-s3.go b/cmd/warm-backend-s3.go index 1626682c4..110e27a15 100644 --- a/cmd/warm-backend-s3.go +++ b/cmd/warm-backend-s3.go @@ -118,7 +118,7 @@ func newWarmBackendS3(conf madmin.TierS3) (*warmBackendS3, error) { creds = credentials.NewStaticV4(conf.AccessKey, conf.SecretKey, "") } getRemoteTierTargetInstanceTransportOnce.Do(func() { - getRemoteTierTargetInstanceTransport = newGatewayHTTPTransport(10 * time.Minute) + getRemoteTierTargetInstanceTransport = newHTTPTransport(10 * time.Minute) }) opts := &minio.Options{ Creds: creds, diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index 7f2f89e6c..845da8f35 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -488,7 +488,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates // if no xl.meta/xl.json found, skip the file. return sizeSummary{}, errSkipFile } - stopFn := globalScannerMetrics.log(scannerMetricScanObject, s.diskPath, PathJoin(item.bucket, item.objectPath())) + stopFn := globalScannerMetrics.log(scannerMetricScanObject, s.diskPath, pathJoin(item.bucket, item.objectPath())) defer stopFn() doneSz := globalScannerMetrics.timeSize(scannerMetricReadMetadata) diff --git a/cmd/xl-storage_test.go b/cmd/xl-storage_test.go index 8b511868e..0f32ee8bd 100644 --- a/cmd/xl-storage_test.go +++ b/cmd/xl-storage_test.go @@ -1317,6 +1317,7 @@ func TestXLStorageReadFileWithVerify(t *testing.T) { // TestXLStorageFormatFileChange - to test if changing the diskID makes the calls fail. func TestXLStorageFormatFileChange(t *testing.T) { + volume := "fail-vol" xlStorage, _, err := newXLStorageTestSetup(t) if err != nil { t.Fatalf("Unable to create xlStorage test setup, %s", err) diff --git a/docs/bucket/notifications/README.md b/docs/bucket/notifications/README.md index d0d58ef4c..d4a7140bf 100644 --- a/docs/bucket/notifications/README.md +++ b/docs/bucket/notifications/README.md @@ -2,8 +2,6 @@ Events occurring on objects in a bucket can be monitored using bucket event notifications. -> NOTE: Gateway mode does not support bucket notifications (except NAS gateway). - Various event types supported by MinIO server are | Supported Object Event Types | | | diff --git a/docs/bucket/quota/README.md b/docs/bucket/quota/README.md index a4924ef96..003ad5d40 100644 --- a/docs/bucket/quota/README.md +++ b/docs/bucket/quota/README.md @@ -4,8 +4,6 @@ Buckets can be configured to have `Hard` quota - it disallows writes to the bucket after configured quota limit is reached. -> NOTE: Bucket quotas are not supported under gateway or standalone single disk deployments. - ## Prerequisites - Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#procedure). diff --git a/docs/compression/README.md b/docs/compression/README.md index 10c71a42f..8cf9fa4bd 100644 --- a/docs/compression/README.md +++ b/docs/compression/README.md @@ -116,10 +116,6 @@ Below is a list of common files and content-types which are typically not suitab All files with these extensions and mime types are excluded from compression, even if compression is enabled for all types. -### 5. Notes - -- MinIO does not support compression for Gateway implementations. - ## To test the setup To test this setup, practice put calls to the server using `mc` and use `mc ls` on diff --git a/docs/config/README.md b/docs/config/README.md index ae610bfce..d3370b3fc 100644 --- a/docs/config/README.md +++ b/docs/config/README.md @@ -89,38 +89,6 @@ MINIO_STORAGE_CLASS_RRS (string) set the parity count for reduced redun MINIO_STORAGE_CLASS_COMMENT (sentence) optionally add a comment to this setting ``` -### Cache - -MinIO provides caching storage tier for primarily gateway deployments, allowing you to cache content for faster reads, cost savings on repeated downloads from the cloud. - -``` -KEY: -cache add caching storage tier - -ARGS: -drives* (csv) comma separated mountpoints e.g. "/optane1,/optane2" -expiry (number) cache expiry duration in days e.g. "90" -quota (number) limit cache drive usage in percentage e.g. "90" -exclude (csv) comma separated wildcard exclusion patterns e.g. "bucket/*.tmp,*.exe" -after (number) minimum number of access before caching an object -comment (sentence) optionally add a comment to this setting -``` - -or environment variables - -``` -KEY: -cache add caching storage tier - -ARGS: -MINIO_CACHE_DRIVES* (csv) comma separated mountpoints e.g. "/optane1,/optane2" -MINIO_CACHE_EXPIRY (number) cache expiry duration in days e.g. "90" -MINIO_CACHE_QUOTA (number) limit cache drive usage in percentage e.g. "90" -MINIO_CACHE_EXCLUDE (csv) comma separated wildcard exclusion patterns e.g. "bucket/*.tmp,*.exe" -MINIO_CACHE_AFTER (number) minimum number of access before caching an object -MINIO_CACHE_COMMENT (sentence) optionally add a comment to this setting -``` - #### Etcd MinIO supports storing encrypted IAM assets in etcd, if KMS is configured. Please refer to how to encrypt your config and IAM credentials [here](https://github.com/minio/minio/blob/master/docs/kms/IAM.md). @@ -283,8 +251,6 @@ Example: the following setting will decrease the scanner speed by a factor of 3, Once set the scanner settings are automatically applied without the need for server restarts. -> NOTE: Data usage scanner is not supported under Gateway deployments. - ### Healing Healing is enabled by default. The following configuration settings allow for more staggered delay in terms of healing. The healing system by default adapts to the system speed and pauses up to '1sec' per object when the system has `max_io` number of concurrent requests. It is possible to adjust the `max_sleep` and `max_io` values thereby increasing the healing speed. The delays between each operation of the healer can be adjusted by the `mc admin config set alias/ heal max_sleep=1s` and maximum concurrent requests allowed before we start slowing things down can be configured with `mc admin config set alias/ heal max_io=30` . By default the wait delay is `1sec` beyond 10 concurrent operations. This means the healer will sleep *1 second* at max for each heal operation if there are more than *10* concurrent client requests. @@ -310,8 +276,6 @@ Example: The following settings will increase the heal operation speed by allowi Once set the healer settings are automatically applied without the need for server restarts. -> NOTE: Healing is not supported for Gateway deployments. - ## Environment only settings (not in config) ### Browser diff --git a/docs/disk-caching/DESIGN.md b/docs/disk-caching/DESIGN.md deleted file mode 100644 index 571c5bf4f..000000000 --- a/docs/disk-caching/DESIGN.md +++ /dev/null @@ -1,117 +0,0 @@ -# Disk Caching Design [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -This document explains some basic assumptions and design approach, limits of the disk caching feature. If you're looking to get started with disk cache, we suggest you go through the [getting started document](https://github.com/minio/minio/blob/master/docs/disk-caching/README.md) first. - -## Supported environment variables - -| Environment | Description | -| :---------------------- | ------------------------------------------------------------ | -| MINIO_CACHE_DRIVES | list of mounted cache drives or directories separated by "," | -| MINIO_CACHE_EXCLUDE | list of cache exclusion patterns separated by "," | -| MINIO_CACHE_QUOTA | maximum permitted usage of the cache in percentage (0-100) | -| MINIO_CACHE_AFTER | minimum number of access before caching an object | -| MINIO_CACHE_WATERMARK_LOW | % of cache quota at which cache eviction stops | -| MINIO_CACHE_WATERMARK_HIGH | % of cache quota at which cache eviction starts | -| MINIO_CACHE_RANGE | set to "on" or "off" caching of independent range requests per object, defaults to "on" | -| MINIO_CACHE_COMMIT | set to 'writeback' or 'writethrough' for upload caching | - -## Use-cases - -The edge serves as a gateway cache, creating an intermediary between the application and the public cloud. In this scenario, the gateways are backed by servers with a number of either hard drives or flash drives and are deployed in edge data centers. All access to the public cloud goes through these caches (write-through cache), so data is uploaded to the public cloud with strict consistency guarantee. Subsequent reads are served from the cache based on ETAG match or the cache control headers. - -This architecture reduces costs by decreasing the bandwidth needed to transfer data, improves performance by keeping data cached closer to the application and also reduces the operational cost - the data is still kept in the public cloud, just cached at the edge. - -Following example shows: - -- start MinIO gateway to s3 with edge caching enabled on '/mnt/drive1', '/mnt/drive2' and '/mnt/export1 ... /mnt/export24' drives -- exclude all objects under 'mybucket', exclude all objects with '.pdf' as extension. -- cache only those objects accessed atleast 3 times. -- cache garbage collection triggers in at high water mark (i.e. cache disk usage reaches 90% of cache quota) or at 72% and evicts oldest objects by access time until low watermark is reached ( 70% of cache quota) , i.e. 63% of disk usage. - -```sh -export MINIO_CACHE_DRIVES="/mnt/drive1,/mnt/drive2,/mnt/export{1..24}" -export MINIO_CACHE_EXCLUDE="mybucket/*,*.pdf" -export MINIO_CACHE_QUOTA=80 -export MINIO_CACHE_AFTER=3 -export MINIO_CACHE_WATERMARK_LOW=70 -export MINIO_CACHE_WATERMARK_HIGH=90 - -minio gateway s3 https://s3.amazonaws.com -``` - -### Run MinIO gateway with cache on Docker Container - -### Stable - -Cache drives need to have `strictatime` or `relatime` enabled for disk caching feature. In this example, mount the xfs file system on /mnt/cache with `strictatime` or `relatime` enabled. - -```sh -truncate -s 4G /tmp/data -``` - -### Build xfs filesystem on /tmp/data - -``` -mkfs.xfs /tmp/data -``` - -### Create mount dir - -``` -sudo mkdir /mnt/cache # -``` - -### Mount xfs on /mnt/cache with atime - -``` -sudo mount -o relatime /tmp/data /mnt/cache -``` - -### Start using the cached drive with S3 gateway - -``` -podman run --net=host -e MINIO_ROOT_USER={s3-access-key} -e MINIO_ROOT_PASSWORD={s3-secret-key} \ - -e MINIO_CACHE_DRIVES=/cache -e MINIO_CACHE_QUOTA=99 -e MINIO_CACHE_AFTER=0 \ - -e MINIO_CACHE_WATERMARK_LOW=90 -e MINIO_CACHE_WATERMARK_HIGH=95 \ - -v /mnt/cache:/cache quay.io/minio/minio gateway s3 --console-address ":9001" -``` - -## Assumptions - -- Disk cache quota defaults to 80% of your drive capacity. -- The cache drives are required to be a filesystem mount point with [`atime`](http://kerolasa.github.io/filetimes.html) support to be enabled on the drive. Alternatively writable directories with atime support can be specified in MINIO_CACHE_DRIVES -- Garbage collection sweep happens whenever cache disk usage reaches high watermark with respect to the configured cache quota , GC evicts least recently accessed objects until cache low watermark is reached with respect to the configured cache quota. Garbage collection runs a cache eviction sweep at 30 minute intervals. -- An object is only cached when drive has sufficient disk space. - -## Behavior - -Disk caching caches objects for **downloaded** objects i.e - -- Caches new objects for entries not found in cache while downloading. Otherwise serves from the cache. -- Bitrot protection is added to cached content and verified when object is served from cache. -- When an object is deleted, corresponding entry in cache if any is deleted as well. -- Cache continues to work for read-only operations such as GET, HEAD when backend is offline. -- Cache-Control and Expires headers can be used to control how long objects stay in the cache. ETag of cached objects are not validated with backend until expiry time as per the Cache-Control or Expires header is met. -- All range GET requests are cached by default independently, this may be not desirable in all situations when cache storage is limited and where downloading an entire object at once might be more optimal. To optionally turn this feature off, and allow downloading entire object in the background `export MINIO_CACHE_RANGE=off`. -- To ensure security guarantees, encrypted objects are normally not cached. However, if you wish to encrypt cached content on disk, you can set MINIO_CACHE_ENCRYPTION_SECRET_KEY environment variable to set a cache KMS -master key to automatically encrypt all cached content. - - Note that cache KMS master key is not recommended for use in production deployments. If the MinIO server/gateway machine is ever compromised, the cache KMS master key must also be treated as compromised. - Support for external KMS to manage cache KMS keys is on the roadmap,and would be ideal for production use cases. - -- `MINIO_CACHE_COMMIT` setting of `writethrough` allows caching of single and multipart uploads synchronously if enabled. By default, however single PUT operations are cached asynchronously on write without any special setting. - -- Partially cached stale uploads older than 24 hours are automatically cleaned up. - -- Expiration happens automatically based on the configured interval as explained above, frequently accessed objects stay alive in cache for a significantly longer time. - -> NOTE: `MINIO_CACHE_COMMIT` also has a value of `writeback` which allows staging single uploads in cache before committing to remote. It is not possible to stage multipart uploads in the cache for consistency reasons - hence, multipart uploads will be cached synchronously even if `writeback` is set. - -### Crash Recovery - -Upon restart of minio gateway after a running minio process is killed or crashes, disk caching resumes automatically. The garbage collection cycle resumes and any previously cached entries are served from cache. - -## Limits - -- Bucket policies are not cached, so anonymous operations are not supported when backend is offline. -- Objects are distributed using deterministic hashing among the list of configured cache drives. If one or more drives go offline, or cache drive configuration is altered in any way, performance may degrade to O(n) lookup time depending on the number of disks in cache. diff --git a/docs/disk-caching/README.md b/docs/disk-caching/README.md deleted file mode 100644 index 8f2bd6cfe..000000000 --- a/docs/disk-caching/README.md +++ /dev/null @@ -1,45 +0,0 @@ -# Disk Cache Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -Disk caching feature here refers to the use of caching disks to store content closer to the tenants. For instance, if you access an object from a lets say `gateway s3` setup and download the object that gets cached, each subsequent request on the object gets served directly from the cache drives until it expires. This feature allows MinIO users to have - -- Object to be delivered with the best possible performance. -- Dramatic improvements for time to first byte for any object. - -## Get started - -### 1. Prerequisites - -Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linuxe). - -### 2. Run MinIO gateway with cache - -Disk caching can be enabled by setting the `cache` environment variables for MinIO gateway . `cache` environment variables takes the mounted drive(s) or directory paths, any wildcard patterns to exclude from being cached,low and high watermarks for garbage collection and the minimum accesses before caching an object. - -Following example uses `/mnt/drive1`, `/mnt/drive2` ,`/mnt/cache1` ... `/mnt/cache3` for caching, while excluding all objects under bucket `mybucket` and all objects with '.pdf' as extension on a s3 gateway setup. Objects are cached if they have been accessed three times or more.Cache max usage is restricted to 80% of disk capacity in this example. Garbage collection is triggered when high watermark is reached - i.e. at 72% of cache disk usage and clears least recently accessed entries until the disk usage drops to low watermark - i.e. cache disk usage drops to 56% (70% of 80% quota) - -```bash -export MINIO_CACHE="on" -export MINIO_CACHE_DRIVES="/mnt/drive1,/mnt/drive2,/mnt/cache{1...3}" -export MINIO_CACHE_EXCLUDE="*.pdf,mybucket/*" -export MINIO_CACHE_QUOTA=80 -export MINIO_CACHE_AFTER=3 -export MINIO_CACHE_WATERMARK_LOW=70 -export MINIO_CACHE_WATERMARK_HIGH=90 - -minio gateway s3 -``` - -The `CACHE_WATERMARK` numbers are percentages of `CACHE_QUOTA`. -In the example above this means that `MINIO_CACHE_WATERMARK_LOW` is effectively `0.8 * 0.7 * 100 = 56%` and the `MINIO_CACHE_WATERMARK_HIGH` is effectively `0.8 * 0.9 * 100 = 72%` of total disk space. - -### 3. Test your setup - -To test this setup, access the MinIO gateway via browser or [`mc`](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart). You’ll see the uploaded files are accessible from all the MinIO endpoints. - -## Explore Further - -- [Disk cache design](https://github.com/minio/minio/blob/master/docs/disk-caching/DESIGN.md) -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) diff --git a/docs/gateway/README.md b/docs/gateway/README.md deleted file mode 100644 index 7ee8d49f6..000000000 --- a/docs/gateway/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# MinIO Gateway [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -**The MinIO Gateway has been deprecated as of February 2022 and removed from MinIO files as of July 2022.** -**See https://blog.min.io/deprecation-of-the-minio-gateway/ for more information.** - -## Support - -Gateway implementations are frozen and are not accepting any new features. Please reports any bugs at . If you are an existing customer please login to for production support. - -## Implementations - -MinIO Gateway adds Amazon S3 compatibility layer to third party NAS and Cloud Storage vendors. MinIO Gateway is implemented to facilitate migration of existing from your existing legacy or cloud vendors to MinIO distributed server deployments. - -- [NAS](https://github.com/minio/minio/blob/master/docs/gateway/nas.md) -- [S3](https://github.com/minio/minio/blob/master/docs/gateway/s3.md) diff --git a/docs/gateway/nas.md b/docs/gateway/nas.md deleted file mode 100644 index 3f1377720..000000000 --- a/docs/gateway/nas.md +++ /dev/null @@ -1,114 +0,0 @@ -# MinIO NAS Gateway [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -> NAS gateway is deprecated and will be removed in future, no more fresh deployments are supported. - -MinIO Gateway adds Amazon S3 compatibility to NAS storage. You may run multiple minio instances on the same shared NAS volume as a distributed object gateway. - -## Support - -Gateway implementations are frozen and are not accepting any new features. Please reports any bugs at . If you are an existing customer please login to for production support. - -## Run MinIO Gateway for NAS Storage - -### Using Docker - -Please ensure to replace `/shared/nasvol` with actual mount path. - -``` -podman run \ - -p 9000:9000 \ - -p 9001:9001 \ - --name nas-s3 \ - -e "MINIO_ROOT_USER=minio" \ - -e "MINIO_ROOT_PASSWORD=minio123" \ - -v /shared/nasvol:/container/vol \ - quay.io/minio/minio gateway nas /container/vol --console-address ":9001" -``` - -### Using Binary - -``` -export MINIO_ROOT_USER=minio -export MINIO_ROOT_PASSWORD=minio123 -minio gateway nas /shared/nasvol -``` - -## Test using MinIO Console - -MinIO Gateway comes with an embedded web based object browser. Point your web browser to to ensure that your server has started successfully. - -| Dashboard | Creating a bucket | -| ------------- | ------------- | -| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) | - -## Test using MinIO Client `mc` - -`mc` provides a modern alternative to UNIX commands such as ls, cat, cp, mirror, diff, etc. It supports filesystems and Amazon S3 compatible cloud storage services. - -### Configure `mc` - -``` -mc alias set mynas http://gateway-ip:9000 access_key secret_key -``` - -### List buckets on nas - -``` -mc ls mynas -[2017-02-22 01:50:43 PST] 0B ferenginar/ -[2017-02-26 21:43:51 PST] 0B my-bucket/ -[2017-02-26 22:10:11 PST] 0B test-bucket1/ -``` - -## Breaking changes - -There will be a breaking change after the release version 'RELEASE.2020-06-22T03-12-50Z'. - -### The file-based config settings are deprecated in NAS - -The support for admin config APIs will be removed. This will include getters and setters like `mc admin config get` and `mc admin config` and any other `mc admin config` options. The reason for this change is to avoid un-necessary reloads of the config from the disk. And to comply with the Environment variable based settings like other gateways. - -### Migration guide - -The users who have been using the older config approach should migrate to ENV settings by setting environment variables accordingly. - -For example, - -Consider the following webhook target config. - -``` -notify_webhook:1 endpoint=http://localhost:8080/ auth_token= queue_limit=0 queue_dir=/tmp/webhk client_cert= client_key= -``` - -The corresponding environment variable setting can be - -``` -export MINIO_NOTIFY_WEBHOOK_ENABLE_1=on -export MINIO_NOTIFY_WEBHOOK_ENDPOINT_1=http://localhost:8080/ -export MINIO_NOTIFY_WEBHOOK_QUEUE_DIR_1=/tmp/webhk -``` - -> NOTE: Please check the docs for the corresponding ENV setting. Alternatively, we can obtain other ENVs in the form `mc admin config set alias/ --env` - -## Symlink support - -NAS gateway implementation allows symlinks on regular files. - -### Behavior - -- For reads symlinks resolve to the file the symlink points to. -- For deletes - - Deleting a symlink deletes the symlink but not the real file to which the symlink points. - - Deleting the real file a symlink points to automatically makes the dangling symlink invisible. - -#### Caveats - -- Disallows follow of directory symlinks to avoid security issues, and leaving them as is on namespace makes them very inconsistent. - -*Directory symlinks are not and will not be supported as there are no safe ways to handle them.* - -## Explore Further - -- [`mc` command-line interface](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) -- [`aws` command-line interface](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [`minio-go` Go SDK](https://min.io/docs/minio/linux/developers/go/minio-go.html) diff --git a/docs/gateway/s3.md b/docs/gateway/s3.md deleted file mode 100644 index 616f2218a..000000000 --- a/docs/gateway/s3.md +++ /dev/null @@ -1,160 +0,0 @@ -# MinIO S3 Gateway [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -MinIO S3 Gateway adds MinIO features like MinIO Console and disk caching to AWS S3 or any other AWS S3 compatible service. - -## Support - -Gateway implementations are frozen and are not accepting any new features. Please reports any bugs at . If you are an existing customer please login to for production support. - -## Run MinIO Gateway for AWS S3 - -As a prerequisite to run MinIO S3 gateway, you need valid AWS S3 access key and secret key by default. Optionally you can also set custom access/secret key, when you have rotating AWS IAM credentials or AWS credentials through environment variables (i.e. AWS_ACCESS_KEY_ID) - -### Using Docker - -``` -podman run \ - -p 9000:9000 \ - -p 9001:9001 \ - --name minio-s3 \ - -e "MINIO_ROOT_USER=aws_s3_access_key" \ - -e "MINIO_ROOT_PASSWORD=aws_s3_secret_key" \ - quay.io/minio/minio gateway s3 --console-address ":9001" -``` - -### Using Binary - -``` -export MINIO_ROOT_USER=aws_s3_access_key -export MINIO_ROOT_PASSWORD=aws_s3_secret_key -minio gateway s3 -``` - -### Using Binary in EC2 - -Using IAM rotating credentials for AWS S3 - -If you are using an S3 enabled IAM role on an EC2 instance for S3 access, MinIO will still require env vars MINIO_ROOT_USER and MINIO_ROOT_PASSWORD to be set for its internal use. These may be set to any value which meets the length requirements. Access key length should be at least 3, and secret key length at least 8 characters. - -``` -export MINIO_ROOT_USER=custom_access_key -export MINIO_ROOT_PASSWORD=custom_secret_key -minio gateway s3 -``` - -MinIO gateway will automatically look for list of credential styles in following order, if your backend URL is AWS S3. - -- AWS env vars (i.e. AWS_ACCESS_KEY_ID) -- AWS creds file (i.e. AWS_SHARED_CREDENTIALS_FILE or ~/.aws/credentials) -- IAM profile based credentials. (performs an HTTP call to a pre-defined endpoint, only valid inside configured ec2 instances) - -Minimum permissions required if you wish to provide restricted access with your AWS credentials, please make sure you have following IAM policies attached for your AWS user or roles. - -```json -{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": [ - "s3:GetBucketLocation" - ], - "Resource": [ - "arn:aws:s3:::*" - ] - }, - { - "Effect": "Allow", - "Action": [ - "s3:PutObject", - "s3:GetObject", - "s3:ListBucket", - "s3:DeleteObject", - "s3:GetBucketAcl", - ], - "Resource": [ - "arn:aws:s3:::mybucket", - "arn:aws:s3:::mybucket/*" - ] - } - ] -} -``` - -## Run MinIO Gateway for AWS S3 compatible services - -As a prerequisite to run MinIO S3 gateway on an AWS S3 compatible service, you need valid access key, secret key and service endpoint. - -## Run MinIO Gateway with double-encryption - -MinIO gateway to S3 supports encryption of data at rest. Three types of encryption modes are supported - -- encryption can be set to ``pass-through`` to backend only for SSE-S3, SSE-C is not allowed passthrough. -- ``single encryption`` (at the gateway) -- ``double encryption`` (single encryption at gateway and pass through to backend) - -This can be specified by setting MINIO_GATEWAY_SSE environment variable. If MINIO_GATEWAY_SSE and KMS are not setup, all encryption headers are passed through to the backend. If KMS environment variables are set up, ``single encryption`` is automatically performed at the gateway and encrypted object is saved at the backend. - -To specify ``double encryption``, MINIO_GATEWAY_SSE environment variable needs to be set to "s3" for sse-s3 -and "c" for sse-c encryption. More than one encryption option can be set, delimited by ";". Objects are encrypted at the gateway and the gateway also does a pass-through to backend. Note that in the case of SSE-C encryption, gateway derives a unique SSE-C key for pass through from the SSE-C client key using a key derivation function (KDF). - -```sh -curl -sSL --tlsv1.2 \ - -O 'https://raw.githubusercontent.com/minio/kes/master/root.key' \ - -O 'https://raw.githubusercontent.com/minio/kes/master/root.cert' -``` - -```sh -export MINIO_GATEWAY_SSE="s3;c" -export MINIO_KMS_KES_ENDPOINT=https://play.min.io:7373 -export MINIO_KMS_KES_KEY_FILE=root.key -export MINIO_KMS_KES_CERT_FILE=root.cert -export MINIO_KMS_KES_KEY_NAME=my-minio-key -minio gateway s3 -``` - -### Using Docker (double encryption) - -``` -podman run -p 9000:9000 --name minio-s3 \ - -e "MINIO_ROOT_USER=access_key" \ - -e "MINIO_ROOT_PASSWORD=secret_key" \ - quay.io/minio/minio gateway s3 https://s3_compatible_service_endpoint:port -``` - -### Using Binary (double encryption) - -``` -export MINIO_ROOT_USER=access_key -export MINIO_ROOT_PASSWORD=secret_key -minio gateway s3 https://s3_compatible_service_endpoint:port -``` - -## MinIO Caching - -MinIO edge caching allows storing content closer to the applications. Frequently accessed objects are stored in a local disk based cache. Edge caching with MinIO gateway feature allows - -- Dramatic improvements for time to first byte for any object. -- Avoid S3 [data transfer charges](https://aws.amazon.com/s3/pricing/). - -## MinIO Console - -MinIO Gateway comes with an embedded web based object browser. Point your web browser to to ensure that your server has started successfully. - -| Dashboard | Creating a bucket | -| ------------- | ------------- | -| ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic1.png?raw=true) | ![Dashboard](https://github.com/minio/minio/blob/master/docs/screenshots/pic2.png?raw=true) | - -With MinIO S3 gateway, you can use MinIO Console to explore AWS S3 based objects. - -### Known limitations - -- Bucket Notification APIs are not supported. -- Bucket Locking APIs are not supported. -- Versioned buckets on AWS S3 are not supported from gateway layer. Gateway is meant to be used with non-versioned buckets. - -## Explore Further - -- [`mc` command-line interface](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [`aws` command-line interface](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [`minio-go` Go SDK](https://min.io/docs/minio/linux/developers/go/minio-go.html) diff --git a/docs/metrics/healthcheck/README.md b/docs/metrics/healthcheck/README.md index 91582fc8d..0b3c686e0 100644 --- a/docs/metrics/healthcheck/README.md +++ b/docs/metrics/healthcheck/README.md @@ -4,7 +4,7 @@ MinIO server exposes three un-authenticated, healthcheck endpoints liveness prob ## Liveness probe -This probe always responds with '200 OK'. Only fails if 'etcd' is configured and unreachable. This behavior is specific to gateway. When liveness probe fails, Kubernetes like platforms restart the container. +This probe always responds with '200 OK'. Only fails if 'etcd' is configured and unreachable. When liveness probe fails, Kubernetes like platforms restart the container. ``` livenessProbe: @@ -21,7 +21,7 @@ livenessProbe: ## Readiness probe -This probe always responds with '200 OK'. Only fails if 'etcd' is configured and unreachable. This behavior is specific to gateway. When readiness probe fails, Kubernetes like platforms turn-off routing to the container. +This probe always responds with '200 OK'. Only fails if 'etcd' is configured and unreachable. When readiness probe fails, Kubernetes like platforms turn-off routing to the container. ``` readinessProbe: diff --git a/docs/multi-user/README.md b/docs/multi-user/README.md index 56f6c720e..6fc92c075 100644 --- a/docs/multi-user/README.md +++ b/docs/multi-user/README.md @@ -10,7 +10,7 @@ In this document we will explain in detail on how to configure multiple users. - Install mc - [MinIO Client Quickstart Guide](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart) - Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux) -- Configure etcd (optional needed only in gateway or federation mode) - [Etcd V3 Quickstart Guide](https://github.com/minio/minio/blob/master/docs/sts/etcd.md) +- Configure etcd - [Etcd V3 Quickstart Guide](https://github.com/minio/minio/blob/master/docs/sts/etcd.md) ### 2. Create a new user with canned policy diff --git a/docs/shared-backend/DESIGN.md b/docs/shared-backend/DESIGN.md deleted file mode 100644 index 4755c62c1..000000000 --- a/docs/shared-backend/DESIGN.md +++ /dev/null @@ -1,138 +0,0 @@ -# Introduction [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -This feature allows MinIO to serve a shared NAS drive across multiple MinIO instances. There are no special configuration changes required to enable this feature. Access to files stored on NAS volume are locked and synchronized by default. - -## Motivation - -Since MinIO instances serve the purpose of a single tenant there is an increasing requirement where users want to run multiple MinIO instances on a same backend which is managed by an existing NAS (NFS, GlusterFS, Other distributed filesystems) rather than a local disk. This feature is implemented also with minimal disruption in mind for the user and overall UI. - -## Restrictions - -* A PutObject() is blocked and waits if another GetObject() is in progress. -* A CompleteMultipartUpload() is blocked and waits if another PutObject() or GetObject() is in progress. -* Cannot run FS mode as a remote disk RPC. - -## How To Run? - -Running MinIO instances on shared backend is no different than running on a stand-alone disk. There are no special configuration changes required to enable this feature. Access to files stored on NAS volume are locked and synchronized by default. Following examples will clarify this further for each operating system of your choice: - -### Ubuntu 16.04 LTS - -Example 1: Start MinIO instance on a shared backend mounted and available at `/path/to/nfs-volume`. - -On linux server1 - -```shell -minio gateway nas /path/to/nfs-volume -``` - -On linux server2 - -```shell -minio gateway nas /path/to/nfs-volume -``` - -### Windows 2012 Server - -Example 1: Start MinIO instance on a shared backend mounted and available at `\\remote-server\cifs`. - -On windows server1 - -```cmd -minio.exe gateway nas \\remote-server\cifs\data -``` - -On windows server2 - -```cmd -minio.exe gateway nas \\remote-server\cifs\data -``` - -Alternatively if `\\remote-server\cifs` is mounted as `D:\` drive. - -On windows server1 - -```cmd -minio.exe gateway nas D:\data -``` - -On windows server2 - -```cmd -minio.exe gateway nas D:\data -``` - -## Architecture - -### POSIX/Win32 Locks - -#### Lock process - -With in the same MinIO instance locking is handled by existing in-memory namespace locks (**sync.RWMutex** et. al). To synchronize locks between many MinIO instances we leverage POSIX `fcntl()` locks on Unixes and on Windows `LockFileEx()` Win32 API. Requesting write lock block if there are any read locks held by neighboring MinIO instance on the same path. So does the read lock if there are any active write locks in-progress. - -#### Unlock process - -Unlocking happens on filesystems locks by just closing the file descriptor (fd) which was initially requested for lock operation. Closing the fd tells the kernel to relinquish all the locks held on the path by the current process. This gets trickier when there are many readers on the same path by the same process, it would mean that closing an fd relinquishes locks for all concurrent readers as well. To properly manage this situation a simple fd reference count is implemented, the same fd is shared between many readers. When readers start closing on the fd we start reducing the reference count, once reference count has reached zero we can be sure that there are no more readers active. So we proceed and close the underlying file descriptor which would relinquish the read lock held on the path. - -This doesn't apply for the writes because there is always one writer and many readers for any unique object. - -### Handling Concurrency - -An example here shows how the contention is handled with GetObject(). - -GetObject() holds a read lock on `fs.json`. - -```go - fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) - rlk, err := fs.rwPool.Open(fsMetaPath) - if err != nil { - return toObjectErr(err, bucket, object) - } - defer rlk.Close() - -... you can perform other operations here ... - - _, err = io.Copy(writer, reader) - -... after successful copy operation unlocks the read lock ... -``` - -A concurrent PutObject is requested on the same object, PutObject() attempts a write lock on `fs.json`. - -```go - fsMetaPath := pathJoin(fs.fsPath, minioMetaBucket, bucketMetaPrefix, bucket, object, fsMetaJSONFile) - wlk, err := fs.rwPool.Create(fsMetaPath) - if err != nil { - return ObjectInfo{}, toObjectErr(err, bucket, object) - } - // This close will allow for locks to be synchronized on `fs.json`. - defer wlk.Close() -``` - -Now from the above snippet the following code one can notice that until the GetObject() returns writing to the client. Following portion of the code will block. - -```go - wlk, err := fs.rwPool.Create(fsMetaPath) -``` - -This restriction is needed so that corrupted data is not returned to the client in between I/O. The logic works vice-versa as well an on-going PutObject(), GetObject() would wait for the PutObject() to complete. - -### Caveats (concurrency) - -Consider for example 3 servers sharing the same backend - -On minio1 - -* DeleteObject(object1) --> lock acquired on `fs.json` while object1 is being deleted. - -On minio2 - -* PutObject(object1) --> lock waiting until DeleteObject finishes. - -On minio3 - -* PutObject(object1) --> (concurrent request during PutObject minio2 checking if `fs.json` exists) - -Once lock is acquired the minio2 validates if the file really exists to avoid obtaining lock on an fd which is already deleted. But this situation calls for a race with a third server which is also attempting to write the same file before the minio2 can validate if the file exists. It might be potentially possible `fs.json` is created so the lock acquired by minio2 might be invalid and can lead to a potential inconsistency. - -This is a known problem and cannot be solved by POSIX fcntl locks. These are considered to be the limits of shared filesystem. diff --git a/docs/shared-backend/README.md b/docs/shared-backend/README.md deleted file mode 100644 index 843d78ba7..000000000 --- a/docs/shared-backend/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Shared Backend MinIO Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) - -MinIO shared mode lets you use single [NAS](https://en.wikipedia.org/wiki/Network-attached_storage) (like NFS, GlusterFS, and other -distributed filesystems) as the storage backend for multiple MinIO servers. Synchronization among MinIO servers is taken care by design. -Read more about the MinIO shared mode design [here](https://github.com/minio/minio/blob/master/docs/shared-backend/DESIGN.md). - -MinIO shared mode is developed to solve several real world use cases, without any special configuration changes. Some of these are - -- You have already invested in NAS and would like to use MinIO to add S3 compatibility to your storage tier. -- You need to use NAS with an S3 interface due to your application architecture requirements. -- You expect huge traffic and need a load balanced S3 compatible server, serving files from a single NAS backend. - -With a proxy running in front of multiple, shared mode MinIO servers, it is very easy to create a Highly Available, load balanced, AWS S3 compatible storage system. - -## Get started - -If you're aware of stand-alone MinIO set up, the installation and running remains the same. - -## 1. Prerequisites - -Install MinIO - [MinIO Quickstart Guide](https://min.io/docs/minio/linux/index.html#quickstart-for-linux). - -## 2. Run MinIO on Shared Backend - -To run MinIO shared backend instances, you need to start multiple MinIO servers pointing to the same backend storage. We'll see examples on how to do this in the following sections. - -- All the nodes running shared MinIO need to have same access key and secret key. To achieve this, we export access key and secret key as environment variables on all the nodes before executing MinIO server command. -- The drive paths below are for demonstration purposes only, you need to replace these with the actual drive paths/folders. - -### MinIO shared mode on Ubuntu 16.04 LTS - -You'll need the path to the shared volume, e.g. `/path/to/nfs-volume`. Then run the following commands on all the nodes you'd like to launch MinIO. - -```sh -export MINIO_ROOT_USER= -export MINIO_ROOT_PASSWORD= -minio gateway nas /path/to/nfs-volume -``` - -### MinIO shared mode on Windows 2012 Server - -You'll need the path to the shared volume, e.g. `\\remote-server\smb`. Then run the following commands on all the nodes you'd like to launch MinIO. - -```cmd -set MINIO_ROOT_USER=my-username -set MINIO_ROOT_PASSWORD=my-password -minio.exe gateway nas \\remote-server\smb\export -``` - -### Windows Tip - -If a remote volume, e.g. `\\remote-server\smb` is mounted as a drive, e.g. `M:\`. You can use [`net use`](https://technet.microsoft.com/en-us/library/bb490717.aspx) command to map the drive to a folder. - -```cmd -set MINIO_ROOT_USER=my-username -set MINIO_ROOT_PASSWORD=my-password -net use m: \\remote-server\smb\export /P:Yes -minio.exe gateway nas M:\export -``` - -## 3. Test your setup - -To test this setup, access the MinIO server via browser or [`mc`](https://min.io/docs/minio/linux/reference/minio-mc.html#quickstart). You’ll see the uploaded files are accessible from the all the MinIO shared backend endpoints. - -## Explore Further - -- [Use `mc` with MinIO Server](https://min.io/docs/minio/linux/reference/minio-mc.html) -- [Use `aws-cli` with MinIO Server](https://min.io/docs/minio/linux/integrations/aws-cli-with-minio.html) -- [Use `minio-go` SDK with MinIO Server](https://min.io/docs/minio/linux/developers/go/minio-go.html) -- [The MinIO documentation website](https://min.io/docs/minio/linux/index.html) diff --git a/docs/sts/README.md b/docs/sts/README.md index 72aea5f2b..87098ce6c 100644 --- a/docs/sts/README.md +++ b/docs/sts/README.md @@ -42,7 +42,7 @@ In this document we will explain in detail on how to configure all the prerequis ### Prerequisites - [Configuring keycloak](https://github.com/minio/minio/blob/master/docs/sts/keycloak.md) or [Configuring Casdoor](https://github.com/minio/minio/blob/master/docs/sts/casdoor.md) -- [Configuring etcd (optional needed only in gateway or federation mode)](https://github.com/minio/minio/blob/master/docs/sts/etcd.md) +- [Configuring etcd](https://github.com/minio/minio/blob/master/docs/sts/etcd.md) ### Setup MinIO with Identity Provider @@ -68,21 +68,6 @@ export MINIO_IDENTITY_OPENID_CLIENT_ID="843351d4-1080-11ea-aa20-271ecba3924a" minio server /mnt/data ``` -### Setup MinIO Gateway with Keycloak and Etcd - -Make sure we have followed the previous step and configured each software independently, once done we can now proceed to use MinIO STS API and MinIO gateway to use these credentials to perform object API operations. - -> NOTE: MinIO gateway requires etcd to be configured to use STS API. - -``` -export MINIO_ROOT_USER=aws_access_key -export MINIO_ROOT_PASSWORD=aws_secret_key -export MINIO_IDENTITY_OPENID_CONFIG_URL=http://localhost:8080/auth/realms/demo/.well-known/openid-configuration -export MINIO_IDENTITY_OPENID_CLIENT_ID="843351d4-1080-11ea-aa20-271ecba3924a" -export MINIO_ETCD_ENDPOINTS=http://localhost:2379 -minio gateway s3 -``` - ### Using WebIdentiy API On another terminal run `web-identity.go` a sample client application which obtains JWT id_tokens from an identity provider, in our case its Keycloak. Uses the returned id_token response to get new temporary credentials from the MinIO server using the STS API call `AssumeRoleWithWebIdentity`. diff --git a/helm/minio/Chart.yaml b/helm/minio/Chart.yaml index 0ced4c58d..3744d29f5 100644 --- a/helm/minio/Chart.yaml +++ b/helm/minio/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v1 description: Multi-Cloud Object Storage name: minio -version: 4.1.0 +version: 5.0.0 appVersion: RELEASE.2022-10-24T18-35-07Z keywords: - minio diff --git a/helm/minio/templates/gateway-deployment.yaml b/helm/minio/templates/gateway-deployment.yaml deleted file mode 100644 index b14f86bd0..000000000 --- a/helm/minio/templates/gateway-deployment.yaml +++ /dev/null @@ -1,173 +0,0 @@ -{{- if eq .Values.mode "gateway" }} -{{ $scheme := "http" }} -{{- if .Values.tls.enabled }} -{{ $scheme = "https" }} -{{ end }} -{{ $bucketRoot := or ($.Values.bucketRoot) ($.Values.mountPath) }} -apiVersion: {{ template "minio.deployment.apiVersion" . }} -kind: Deployment -metadata: - name: {{ template "minio.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: - app: {{ template "minio.name" . }} - chart: {{ template "minio.chart" . }} - release: {{ .Release.Name }} - heritage: {{ .Release.Service }} -{{- if .Values.additionalLabels }} -{{ toYaml .Values.additionalLabels | trimSuffix "\n" | indent 4 }} -{{- end }} -{{- if .Values.additionalAnnotations }} - annotations: -{{ toYaml .Values.additionalAnnotations | trimSuffix "\n" | indent 4 }} -{{- end }} -spec: - strategy: - type: {{ .Values.DeploymentUpdate.type }} - {{- if eq .Values.DeploymentUpdate.type "RollingUpdate" }} - rollingUpdate: - maxSurge: {{ .Values.DeploymentUpdate.maxSurge }} - maxUnavailable: {{ .Values.DeploymentUpdate.maxUnavailable }} - {{- end}} - replicas: {{ .Values.gateway.replicas }} - selector: - matchLabels: - app: {{ template "minio.name" . }} - release: {{ .Release.Name }} - template: - metadata: - name: {{ template "minio.fullname" . }} - labels: - app: {{ template "minio.name" . }} - release: {{ .Release.Name }} -{{- if .Values.podLabels }} -{{ toYaml .Values.podLabels | indent 8 }} -{{- end }} - annotations: -{{- if not .Values.ignoreChartChecksums }} - checksum/secrets: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} -{{- end }} -{{- if .Values.podAnnotations }} -{{ toYaml .Values.podAnnotations | trimSuffix "\n" | indent 8 }} -{{- end }} - spec: - {{- if .Values.priorityClassName }} - priorityClassName: "{{ .Values.priorityClassName }}" - {{- end }} -{{- if and .Values.securityContext.enabled .Values.persistence.enabled }} - securityContext: - runAsUser: {{ .Values.securityContext.runAsUser }} - runAsGroup: {{ .Values.securityContext.runAsGroup }} - fsGroup: {{ .Values.securityContext.fsGroup }} -{{- end }} -{{ if .Values.serviceAccount.create }} - serviceAccountName: {{ .Values.serviceAccount.name }} -{{- end }} - containers: - - name: {{ .Chart.Name }} - image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" - imagePullPolicy: {{ .Values.image.pullPolicy }} - command: - - "/bin/sh" - - "-ce" - {{- if eq .Values.gateway.type "nas" }} - - "/usr/bin/docker-entrypoint.sh minio gateway nas {{ $bucketRoot }} -S {{ .Values.certsPath }} --address :{{ .Values.minioAPIPort }} --console-address :{{ .Values.minioConsolePort }} {{- template "minio.extraArgs" . }} " - {{- end }} - volumeMounts: - - name: minio-user - mountPath: "/tmp/credentials" - readOnly: true - {{- if .Values.persistence.enabled }} - - name: export - mountPath: {{ .Values.mountPath }} - {{- if .Values.persistence.subPath }} - subPath: "{{ .Values.persistence.subPath }}" - {{- end }} - {{- end }} - {{- if .Values.extraSecret }} - - name: extra-secret - mountPath: "/tmp/minio-config-env" - {{- end }} - {{- include "minio.tlsKeysVolumeMount" . | indent 12 }} - ports: - - name: {{ $scheme }} - containerPort: {{ .Values.minioAPIPort }} - - name: {{ $scheme }}-console - containerPort: {{ .Values.minioConsolePort }} - env: - - name: MINIO_ROOT_USER - valueFrom: - secretKeyRef: - name: {{ template "minio.secretName" . }} - key: rootUser - - name: MINIO_ROOT_PASSWORD - valueFrom: - secretKeyRef: - name: {{ template "minio.secretName" . }} - key: rootPassword - {{- if .Values.extraSecret }} - - name: MINIO_CONFIG_ENV_FILE - value: "/tmp/minio-config-env/config.env" - {{- end}} - {{- if .Values.metrics.serviceMonitor.public }} - - name: MINIO_PROMETHEUS_AUTH_TYPE - value: "public" - {{- end}} - {{- if .Values.etcd.endpoints }} - - name: MINIO_ETCD_ENDPOINTS - value: {{ join "," .Values.etcd.endpoints | quote }} - {{- if .Values.etcd.clientCert }} - - name: MINIO_ETCD_CLIENT_CERT - value: "/tmp/credentials/etcd_client.crt" - {{- end }} - {{- if .Values.etcd.clientCertKey }} - - name: MINIO_ETCD_CLIENT_CERT_KEY - value: "/tmp/credentials/etcd_client.key" - {{- end }} - {{- if .Values.etcd.pathPrefix }} - - name: MINIO_ETCD_PATH_PREFIX - value: {{ .Values.etcd.pathPrefix }} - {{- end }} - {{- if .Values.etcd.corednsPathPrefix }} - - name: MINIO_ETCD_COREDNS_PATH - value: {{ .Values.etcd.corednsPathPrefix }} - {{- end }} - {{- end }} - {{- range $key, $val := .Values.environment }} - - name: {{ $key }} - value: {{ $val | quote }} - {{- end}} - resources: -{{ toYaml .Values.resources | indent 12 }} -{{- with .Values.nodeSelector }} - nodeSelector: -{{ toYaml . | indent 8 }} -{{- end }} -{{- include "minio.imagePullSecrets" . | indent 6 }} -{{- with .Values.affinity }} - affinity: -{{ toYaml . | indent 8 }} -{{- end }} -{{- with .Values.tolerations }} - tolerations: -{{ toYaml . | indent 8 }} -{{- end }} - volumes: - - name: export - {{- if .Values.persistence.enabled }} - persistentVolumeClaim: - claimName: {{ .Values.persistence.existingClaim | default (include "minio.fullname" .) }} - {{- else }} - emptyDir: {} - {{- end }} - - name: minio-user - secret: - secretName: {{ template "minio.secretName" . }} - {{- if .Values.extraSecret }} - - name: extra-secret - secret: - secretName: {{ .Values.extraSecret }} - {{- end }} - {{- include "minio.tlsKeysVolume" . | indent 8 }} -{{- end }} diff --git a/helm/minio/values.yaml b/helm/minio/values.yaml index 0ad74ca64..4dd63e697 100644 --- a/helm/minio/values.yaml +++ b/helm/minio/values.yaml @@ -28,8 +28,8 @@ mcImage: tag: RELEASE.2022-10-20T23-26-33Z pullPolicy: IfNotPresent -## minio mode, i.e. standalone or distributed or gateway. -mode: distributed ## other supported values are "standalone", "gateway" +## minio mode, i.e. standalone or distributed +mode: distributed ## other supported values are "standalone" ## Additional labels to include with deployment or statefulset additionalLabels: {} @@ -118,11 +118,6 @@ replicas: 16 # Number of expanded MinIO clusters pools: 1 -# Deploy if 'mode == gateway' - 4 replicas. -gateway: - type: "nas" # currently only "nas" are supported. - replicas: 4 - ## TLS Settings for MinIO tls: enabled: false diff --git a/index.yaml b/index.yaml index e9f5bf9ae..c5a4480b5 100644 --- a/index.yaml +++ b/index.yaml @@ -3,7 +3,29 @@ entries: minio: - apiVersion: v1 appVersion: RELEASE.2022-10-24T18-35-07Z - created: "2022-10-24T13:36:32.958627308-07:00" + created: "2022-10-24T16:33:53.41075068-07:00" + description: Multi-Cloud Object Storage + digest: 6215c800d84fd4c40e4fb4142645fc1c6a039c251776a3cc8c11a24b9e3b59c7 + home: https://min.io + icon: https://min.io/resources/img/logo/MINIO_wordmark.png + keywords: + - minio + - storage + - object-storage + - s3 + - cluster + maintainers: + - email: dev@minio.io + name: MinIO, Inc + name: minio + sources: + - https://github.com/minio/minio + urls: + - https://charts.min.io/helm-releases/minio-5.0.0.tgz + version: 5.0.0 + - apiVersion: v1 + appVersion: RELEASE.2022-10-24T18-35-07Z + created: "2022-10-24T16:33:53.397345207-07:00" description: Multi-Cloud Object Storage digest: 2d3d884490ea1127742f938bc9382844bae713caae08b3308f766f3c9000659a home: https://min.io @@ -25,7 +47,7 @@ entries: version: 4.1.0 - apiVersion: v1 appVersion: RELEASE.2022-09-17T00-09-45Z - created: "2022-10-24T13:36:32.946784661-07:00" + created: "2022-10-24T16:33:53.347100337-07:00" description: Multi-Cloud Object Storage digest: 6f16f2dbfed91ab81a7fae60b6ea32f554365bd27bf5fda55b64a0fa264f4252 home: https://min.io @@ -47,7 +69,7 @@ entries: version: 4.0.15 - apiVersion: v1 appVersion: RELEASE.2022-09-01T23-53-36Z - created: "2022-10-24T13:36:32.945387849-07:00" + created: "2022-10-24T16:33:53.33575129-07:00" description: Multi-Cloud Object Storage digest: 35d89d8f49d53ea929466fb88ee26123431326033f1387e6b2d536a629c0a398 home: https://min.io @@ -69,7 +91,7 @@ entries: version: 4.0.14 - apiVersion: v1 appVersion: RELEASE.2022-08-22T23-53-06Z - created: "2022-10-24T13:36:32.944043161-07:00" + created: "2022-10-24T16:33:53.32653023-07:00" description: Multi-Cloud Object Storage digest: 5b86937ca88d9f6046141fdc2b1cc54760435ed92d289cd0a115fa7148781d4e home: https://min.io @@ -91,7 +113,7 @@ entries: version: 4.0.13 - apiVersion: v1 appVersion: RELEASE.2022-08-13T21-54-44Z - created: "2022-10-24T13:36:32.942615607-07:00" + created: "2022-10-24T16:33:53.319145721-07:00" description: Multi-Cloud Object Storage digest: 2d9c227c0f46ea8bdef4d760c212156fd4c6623ddc5406779c569fe925527787 home: https://min.io @@ -113,7 +135,7 @@ entries: version: 4.0.12 - apiVersion: v1 appVersion: RELEASE.2022-08-05T23-27-09Z - created: "2022-10-24T13:36:32.940413814-07:00" + created: "2022-10-24T16:33:53.316096268-07:00" description: Multi-Cloud Object Storage digest: 6caaffcb636e040cd7e8bc4883a1674a673757f4781c32d53b5ec0f41fea3944 home: https://min.io @@ -135,7 +157,7 @@ entries: version: 4.0.11 - apiVersion: v1 appVersion: RELEASE.2022-08-02T23-59-16Z - created: "2022-10-24T13:36:32.939217336-07:00" + created: "2022-10-24T16:33:53.313166523-07:00" description: Multi-Cloud Object Storage digest: 841d87788fb094d6a7d8a91e91821fe1e847bc952e054c781fc93742d112e18a home: https://min.io @@ -157,7 +179,7 @@ entries: version: 4.0.10 - apiVersion: v1 appVersion: RELEASE.2022-08-02T23-59-16Z - created: "2022-10-24T13:36:32.957369673-07:00" + created: "2022-10-24T16:33:53.394107185-07:00" description: Multi-Cloud Object Storage digest: 6f1a78382df3215deac07495a5e7de7009a1153b4cf6cb565630652a69aec4cf home: https://min.io @@ -179,7 +201,7 @@ entries: version: 4.0.9 - apiVersion: v1 appVersion: RELEASE.2022-07-29T19-40-48Z - created: "2022-10-24T13:36:32.955924583-07:00" + created: "2022-10-24T16:33:53.391342493-07:00" description: Multi-Cloud Object Storage digest: d11db37963636922cb778b6bc0ad2ca4724cb391ea7b785995ada52467d7dd83 home: https://min.io @@ -201,7 +223,7 @@ entries: version: 4.0.8 - apiVersion: v1 appVersion: RELEASE.2022-07-26T00-53-03Z - created: "2022-10-24T13:36:32.954618396-07:00" + created: "2022-10-24T16:33:53.388420109-07:00" description: Multi-Cloud Object Storage digest: ca775e08c84331bb5029d4d29867d30c16e2c62e897788eb432212a756e91e4e home: https://min.io @@ -223,7 +245,7 @@ entries: version: 4.0.7 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2022-10-24T13:36:32.953270447-07:00" + created: "2022-10-24T16:33:53.381475272-07:00" description: Multi-Cloud Object Storage digest: 06542b8f3d149d5908b15de9a8d6f8cf304af0213830be56dc315785d14f9ccd home: https://min.io @@ -245,7 +267,7 @@ entries: version: 4.0.6 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2022-10-24T13:36:32.95208926-07:00" + created: "2022-10-24T16:33:53.377390619-07:00" description: Multi-Cloud Object Storage digest: dd2676362f067454a496cdd293609d0c904b08f521625af49f95402a024ba1f5 home: https://min.io @@ -267,7 +289,7 @@ entries: version: 4.0.5 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2022-10-24T13:36:32.950941486-07:00" + created: "2022-10-24T16:33:53.370521675-07:00" description: Multi-Cloud Object Storage digest: bab9ef192d4eda4c572ad0ce0cf551736c847f582d1837d6833ee10543c23167 home: https://min.io @@ -289,7 +311,7 @@ entries: version: 4.0.4 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2022-10-24T13:36:32.949723705-07:00" + created: "2022-10-24T16:33:53.361762168-07:00" description: Multi-Cloud Object Storage digest: c770bb9841c76576e4e8573f78b0ec33e0d729504c9667e67ad62d48df5ed64c home: https://min.io @@ -311,7 +333,7 @@ entries: version: 4.0.3 - apiVersion: v1 appVersion: RELEASE.2022-05-08T23-50-31Z - created: "2022-10-24T13:36:32.948006485-07:00" + created: "2022-10-24T16:33:53.352482938-07:00" description: Multi-Cloud Object Storage digest: 95835f4199d963e2a23a2493610b348e6f2ff8b71c1a648c4a3b84af9b7a83eb home: https://min.io @@ -333,7 +355,7 @@ entries: version: 4.0.2 - apiVersion: v1 appVersion: RELEASE.2022-04-30T22-23-53Z - created: "2022-10-24T13:36:32.937920314-07:00" + created: "2022-10-24T16:33:53.310224854-07:00" description: Multi-Cloud Object Storage digest: 55a088c403b056e1f055a97426aa11759c3d6cbad38face170fe6cbbec7d568f home: https://min.io @@ -355,7 +377,7 @@ entries: version: 4.0.1 - apiVersion: v1 appVersion: RELEASE.2022-04-26T01-20-24Z - created: "2022-10-24T13:36:32.936711722-07:00" + created: "2022-10-24T16:33:53.307447803-07:00" description: Multi-Cloud Object Storage digest: f541237e24336ec3f7f45ae0d523fef694e3a2f9ef648c5b11c15734db6ba2b2 home: https://min.io @@ -377,7 +399,7 @@ entries: version: 4.0.0 - apiVersion: v1 appVersion: RELEASE.2022-04-16T04-26-02Z - created: "2022-10-24T13:36:32.9354864-07:00" + created: "2022-10-24T16:33:53.304712458-07:00" description: Multi-Cloud Object Storage digest: edc0c3dd6d5246a06b74ba16bb4aff80a6d7225dc9aecf064fd89a8af371b9c1 home: https://min.io @@ -399,7 +421,7 @@ entries: version: 3.6.6 - apiVersion: v1 appVersion: RELEASE.2022-04-12T06-55-35Z - created: "2022-10-24T13:36:32.933655413-07:00" + created: "2022-10-24T16:33:53.285241241-07:00" description: Multi-Cloud Object Storage digest: 211e89f6b9eb0b9a3583abaa127be60e1f9717a098e6b2858cb9dc1cc50c1650 home: https://min.io @@ -421,7 +443,7 @@ entries: version: 3.6.5 - apiVersion: v1 appVersion: RELEASE.2022-04-09T15-09-52Z - created: "2022-10-24T13:36:32.932341206-07:00" + created: "2022-10-24T16:33:53.279745562-07:00" description: Multi-Cloud Object Storage digest: 534a879d73b370a18b554b93d0930e1c115419619c4ce4ec7dbaae632acacf06 home: https://min.io @@ -443,7 +465,7 @@ entries: version: 3.6.4 - apiVersion: v1 appVersion: RELEASE.2022-03-24T00-43-44Z - created: "2022-10-24T13:36:32.931195936-07:00" + created: "2022-10-24T16:33:53.274344261-07:00" description: Multi-Cloud Object Storage digest: 99508b20eb0083a567dcccaf9a6c237e09575ed1d70cd2e8333f89c472d13d75 home: https://min.io @@ -465,7 +487,7 @@ entries: version: 3.6.3 - apiVersion: v1 appVersion: RELEASE.2022-03-17T06-34-49Z - created: "2022-10-24T13:36:32.929969618-07:00" + created: "2022-10-24T16:33:53.272620165-07:00" description: Multi-Cloud Object Storage digest: b4cd25611ca322b1d23d23112fdfa6b068fd91eefe0b0663b88ff87ea4282495 home: https://min.io @@ -487,7 +509,7 @@ entries: version: 3.6.2 - apiVersion: v1 appVersion: RELEASE.2022-03-14T18-25-24Z - created: "2022-10-24T13:36:32.928577403-07:00" + created: "2022-10-24T16:33:53.270869928-07:00" description: Multi-Cloud Object Storage digest: d75b88162bfe54740a233bcecf87328bba2ae23d170bec3a35c828bc6fdc224c home: https://min.io @@ -509,7 +531,7 @@ entries: version: 3.6.1 - apiVersion: v1 appVersion: RELEASE.2022-03-11T23-57-45Z - created: "2022-10-24T13:36:32.926852299-07:00" + created: "2022-10-24T16:33:53.269124841-07:00" description: Multi-Cloud Object Storage digest: 22e53a1184a21a679bc7d8b94e955777f3506340fc29da5ab0cb6d729bdbde8d home: https://min.io @@ -531,7 +553,7 @@ entries: version: 3.6.0 - apiVersion: v1 appVersion: RELEASE.2022-03-03T21-21-16Z - created: "2022-10-24T13:36:32.925645769-07:00" + created: "2022-10-24T16:33:53.26740353-07:00" description: Multi-Cloud Object Storage digest: 6fda968d3fdfd60470c0055a4e1a3bd8e5aee9ad0af5ba2fb7b7b926fdc9e4a0 home: https://min.io @@ -553,7 +575,7 @@ entries: version: 3.5.9 - apiVersion: v1 appVersion: RELEASE.2022-02-26T02-54-46Z - created: "2022-10-24T13:36:32.924591097-07:00" + created: "2022-10-24T16:33:53.265675059-07:00" description: Multi-Cloud Object Storage digest: 8e015369048a3a82bbd53ad36696786f18561c6b25d14eee9e2c93a7336cef46 home: https://min.io @@ -575,7 +597,7 @@ entries: version: 3.5.8 - apiVersion: v1 appVersion: RELEASE.2022-02-18T01-50-10Z - created: "2022-10-24T13:36:32.923538843-07:00" + created: "2022-10-24T16:33:53.262741599-07:00" description: Multi-Cloud Object Storage digest: cb3543fe748e5f0d59b3ccf4ab9af8e10b731405ae445d1f5715e30013632373 home: https://min.io @@ -597,7 +619,7 @@ entries: version: 3.5.7 - apiVersion: v1 appVersion: RELEASE.2022-02-18T01-50-10Z - created: "2022-10-24T13:36:32.922490534-07:00" + created: "2022-10-24T16:33:53.260866605-07:00" description: Multi-Cloud Object Storage digest: f2e359fa5eefffc59abb3d14a8fa94b11ddeaa99f6cd8dd5f40f4e04121000d6 home: https://min.io @@ -619,7 +641,7 @@ entries: version: 3.5.6 - apiVersion: v1 appVersion: RELEASE.2022-02-16T00-35-27Z - created: "2022-10-24T13:36:32.921238551-07:00" + created: "2022-10-24T16:33:53.259208697-07:00" description: Multi-Cloud Object Storage digest: 529d56cca9d83a3d0e5672e63b6e87b5bcbe10a6b45f7a55ba998cceb32f9c81 home: https://min.io @@ -641,7 +663,7 @@ entries: version: 3.5.5 - apiVersion: v1 appVersion: RELEASE.2022-02-12T00-51-25Z - created: "2022-10-24T13:36:32.919640665-07:00" + created: "2022-10-24T16:33:53.257446217-07:00" description: Multi-Cloud Object Storage digest: 3d530598f8ece67bec5b7f990d206584893987c713502f9228e4ee24b5535414 home: https://min.io @@ -663,7 +685,7 @@ entries: version: 3.5.4 - apiVersion: v1 appVersion: RELEASE.2022-02-12T00-51-25Z - created: "2022-10-24T13:36:32.918594306-07:00" + created: "2022-10-24T16:33:53.255870599-07:00" description: Multi-Cloud Object Storage digest: 53937031348b29615f07fc4869b2d668391d8ba9084630a497abd7a7dea9dfb0 home: https://min.io @@ -685,7 +707,7 @@ entries: version: 3.5.3 - apiVersion: v1 appVersion: RELEASE.2022-02-07T08-17-33Z - created: "2022-10-24T13:36:32.917421865-07:00" + created: "2022-10-24T16:33:53.254386142-07:00" description: Multi-Cloud Object Storage digest: 68d643414ff0d565716c5715034fcbf1af262e041915a5c02eb51ec1a65c1ea0 home: https://min.io @@ -707,7 +729,7 @@ entries: version: 3.5.2 - apiVersion: v1 appVersion: RELEASE.2022-02-01T18-00-14Z - created: "2022-10-24T13:36:32.916369587-07:00" + created: "2022-10-24T16:33:53.251927589-07:00" description: Multi-Cloud Object Storage digest: a3e855ed0f31233b989fffd775a29d6fbfa0590089010ff16783fd7f142ef6e7 home: https://min.io @@ -729,7 +751,7 @@ entries: version: 3.5.1 - apiVersion: v1 appVersion: RELEASE.2022-02-01T18-00-14Z - created: "2022-10-24T13:36:32.915338807-07:00" + created: "2022-10-24T16:33:53.250459119-07:00" description: Multi-Cloud Object Storage digest: b1b0ae3c54b4260a698753e11d7781bb8ddc67b7e3fbf0af82796e4cd4ef92a3 home: https://min.io @@ -751,7 +773,7 @@ entries: version: 3.5.0 - apiVersion: v1 appVersion: RELEASE.2022-01-28T02-28-16Z - created: "2022-10-24T13:36:32.914387572-07:00" + created: "2022-10-24T16:33:53.248957191-07:00" description: Multi-Cloud Object Storage digest: fecf25d2d3fb208c6f894fed642a60780a570b7f6d0adddde846af7236dc80aa home: https://min.io @@ -773,7 +795,7 @@ entries: version: 3.4.8 - apiVersion: v1 appVersion: RELEASE.2022-01-25T19-56-04Z - created: "2022-10-24T13:36:32.912675297-07:00" + created: "2022-10-24T16:33:53.247465369-07:00" description: Multi-Cloud Object Storage digest: c78008caa5ce98f64c887630f59d0cbd481cb3f19a7d4e9d3e81bf4e1e45cadc home: https://min.io @@ -795,7 +817,7 @@ entries: version: 3.4.7 - apiVersion: v1 appVersion: RELEASE.2022-01-08T03-11-54Z - created: "2022-10-24T13:36:32.911466146-07:00" + created: "2022-10-24T16:33:53.245981461-07:00" description: Multi-Cloud Object Storage digest: 8f2e2691bf897f74ff094dd370ec56ba9d417e5e8926710c14c2ba346330238d home: https://min.io @@ -817,7 +839,7 @@ entries: version: 3.4.6 - apiVersion: v1 appVersion: RELEASE.2022-01-04T07-41-07Z - created: "2022-10-24T13:36:32.910272698-07:00" + created: "2022-10-24T16:33:53.244460437-07:00" description: Multi-Cloud Object Storage digest: bacd140f0016fab35f516bde787da6449b3a960c071fad9e4b6563118033ac84 home: https://min.io @@ -839,7 +861,7 @@ entries: version: 3.4.5 - apiVersion: v1 appVersion: RELEASE.2021-12-29T06-49-06Z - created: "2022-10-24T13:36:32.909028855-07:00" + created: "2022-10-24T16:33:53.242570045-07:00" description: Multi-Cloud Object Storage digest: 48a453ea5ffeef25933904caefd9470bfb26224dfc2d1096bd0031467ba53007 home: https://min.io @@ -861,7 +883,7 @@ entries: version: 3.4.4 - apiVersion: v1 appVersion: RELEASE.2021-12-20T22-07-16Z - created: "2022-10-24T13:36:32.907822321-07:00" + created: "2022-10-24T16:33:53.240628569-07:00" description: Multi-Cloud Object Storage digest: 47ef4a930713b98f9438ceca913c6e700f85bb25dba5624b056486254b5f0c60 home: https://min.io @@ -883,7 +905,7 @@ entries: version: 3.4.3 - apiVersion: v1 appVersion: RELEASE.2021-12-20T22-07-16Z - created: "2022-10-24T13:36:32.906537425-07:00" + created: "2022-10-24T16:33:53.23900453-07:00" description: Multi-Cloud Object Storage digest: d6763f7e2ea66810bd55eb225579a9c3b968f9ae1256f45fd469362e55d846ff home: https://min.io @@ -905,7 +927,7 @@ entries: version: 3.4.2 - apiVersion: v1 appVersion: RELEASE.2021-12-10T23-03-39Z - created: "2022-10-24T13:36:32.904878892-07:00" + created: "2022-10-24T16:33:53.236570801-07:00" description: Multi-Cloud Object Storage digest: 2fb822c87216ba3fc2ae51a54a0a3e239aa560d86542991504a841cc2a2b9a37 home: https://min.io @@ -927,7 +949,7 @@ entries: version: 3.4.1 - apiVersion: v1 appVersion: RELEASE.2021-12-18T04-42-33Z - created: "2022-10-24T13:36:32.903957404-07:00" + created: "2022-10-24T16:33:53.234884893-07:00" description: Multi-Cloud Object Storage digest: fa8ba1aeb1a15316c6be8403416a5e6b5e6139b7166592087e7bddc9e6db5453 home: https://min.io @@ -949,7 +971,7 @@ entries: version: 3.4.0 - apiVersion: v1 appVersion: RELEASE.2021-12-10T23-03-39Z - created: "2022-10-24T13:36:32.902979181-07:00" + created: "2022-10-24T16:33:53.233323683-07:00" description: Multi-Cloud Object Storage digest: b9b0af9ca50b8d00868e1f1b989dca275829d9110af6de91bb9b3a398341e894 home: https://min.io @@ -971,7 +993,7 @@ entries: version: 3.3.4 - apiVersion: v1 appVersion: RELEASE.2021-12-10T23-03-39Z - created: "2022-10-24T13:36:32.901872776-07:00" + created: "2022-10-24T16:33:53.231791004-07:00" description: Multi-Cloud Object Storage digest: f8b22a5b8fe95a7ddf61b825e17d11c9345fb10e4c126b0d78381608aa300a08 home: https://min.io @@ -993,7 +1015,7 @@ entries: version: 3.3.3 - apiVersion: v1 appVersion: RELEASE.2021-12-10T23-03-39Z - created: "2022-10-24T13:36:32.900759974-07:00" + created: "2022-10-24T16:33:53.229433719-07:00" description: Multi-Cloud Object Storage digest: c48d474f269427abe5ab446f00687d0625b3d1adfc5c73bdb4b21ca9e42853fb home: https://min.io @@ -1015,7 +1037,7 @@ entries: version: 3.3.2 - apiVersion: v1 appVersion: RELEASE.2021-11-24T23-19-33Z - created: "2022-10-24T13:36:32.899563506-07:00" + created: "2022-10-24T16:33:53.228009196-07:00" description: Multi-Cloud Object Storage digest: 7c3da39d9b0090cbf5efedf0cc163a1e2df05becc5152c3add8e837384690bc4 home: https://min.io @@ -1037,7 +1059,7 @@ entries: version: 3.3.1 - apiVersion: v1 appVersion: RELEASE.2021-11-24T23-19-33Z - created: "2022-10-24T13:36:32.897936163-07:00" + created: "2022-10-24T16:33:53.226614437-07:00" description: Multi-Cloud Object Storage digest: 50d6590b4cc779c40f81cc13b1586fbe508aa7f3230036c760bfc5f4154fbce4 home: https://min.io @@ -1059,7 +1081,7 @@ entries: version: 3.3.0 - apiVersion: v1 appVersion: RELEASE.2021-10-13T00-23-17Z - created: "2022-10-24T13:36:32.896736618-07:00" + created: "2022-10-24T16:33:53.225150798-07:00" description: Multi-Cloud Object Storage digest: 5b797b7208cd904c11a76cd72938c8652160cb5fcd7f09fa41e4e703e6d64054 home: https://min.io @@ -1081,7 +1103,7 @@ entries: version: 3.2.0 - apiVersion: v1 appVersion: RELEASE.2021-10-10T16-53-30Z - created: "2022-10-24T13:36:32.895565174-07:00" + created: "2022-10-24T16:33:53.22375416-07:00" description: Multi-Cloud Object Storage digest: e084ac4bb095f071e59f8f08bd092e4ab2404c1ddadacfdce7dbe248f1bafff8 home: https://min.io @@ -1103,7 +1125,7 @@ entries: version: 3.1.9 - apiVersion: v1 appVersion: RELEASE.2021-10-06T23-36-31Z - created: "2022-10-24T13:36:32.8943929-07:00" + created: "2022-10-24T16:33:53.222326562-07:00" description: Multi-Cloud Object Storage digest: 2890430a8d9487d1fa5508c26776e4881d0086b2c052aa6bdc65c0e4423b9159 home: https://min.io @@ -1125,7 +1147,7 @@ entries: version: 3.1.8 - apiVersion: v1 appVersion: RELEASE.2021-10-02T16-31-05Z - created: "2022-10-24T13:36:32.893377358-07:00" + created: "2022-10-24T16:33:53.220812632-07:00" description: Multi-Cloud Object Storage digest: 01a92196af6c47e3a01e1c68d7cf693a8bc487cba810c2cecff155071e4d6a11 home: https://min.io @@ -1147,7 +1169,7 @@ entries: version: 3.1.7 - apiVersion: v1 appVersion: RELEASE.2021-09-18T18-09-59Z - created: "2022-10-24T13:36:32.892415357-07:00" + created: "2022-10-24T16:33:53.218478862-07:00" description: Multi-Cloud Object Storage digest: e779d73f80b75f33b9c9d995ab10fa455c9c57ee575ebc54e06725a64cd04310 home: https://min.io @@ -1169,7 +1191,7 @@ entries: version: 3.1.6 - apiVersion: v1 appVersion: RELEASE.2021-09-18T18-09-59Z - created: "2022-10-24T13:36:32.891298462-07:00" + created: "2022-10-24T16:33:53.217059239-07:00" description: Multi-Cloud Object Storage digest: 19de4bbc8a400f0c2a94c5e85fc25c9bfc666e773fb3e368dd621d5a57dd1c2a home: https://min.io @@ -1191,7 +1213,7 @@ entries: version: 3.1.5 - apiVersion: v1 appVersion: RELEASE.2021-09-18T18-09-59Z - created: "2022-10-24T13:36:32.889868055-07:00" + created: "2022-10-24T16:33:53.21565796-07:00" description: Multi-Cloud Object Storage digest: f789d93a171296dd01af0105a5ce067c663597afbb2432faeda293b752b355c0 home: https://min.io @@ -1213,7 +1235,7 @@ entries: version: 3.1.4 - apiVersion: v1 appVersion: RELEASE.2021-09-09T21-37-07Z - created: "2022-10-24T13:36:32.888801692-07:00" + created: "2022-10-24T16:33:53.214194448-07:00" description: Multi-Cloud Object Storage digest: e2eb34d31560b012ef6581f0ff6004ea4376c968cbe0daed2d8f3a614a892afb home: https://min.io @@ -1235,7 +1257,7 @@ entries: version: 3.1.3 - apiVersion: v1 appVersion: RELEASE.2021-09-09T21-37-07Z - created: "2022-10-24T13:36:32.887716046-07:00" + created: "2022-10-24T16:33:53.212752163-07:00" description: Multi-Cloud Object Storage digest: 8d7e0cc46b3583abd71b97dc0c071f98321101f90eca17348f1e9e0831be64cd home: https://min.io @@ -1257,7 +1279,7 @@ entries: version: 3.1.2 - apiVersion: v1 appVersion: RELEASE.2021-09-09T21-37-07Z - created: "2022-10-24T13:36:32.886871767-07:00" + created: "2022-10-24T16:33:53.211258112-07:00" description: Multi-Cloud Object Storage digest: 50dcbf366b1b21f4a6fc429d0b884c0c7ff481d0fb95c5e9b3ae157c348dd124 home: https://min.io @@ -1279,7 +1301,7 @@ entries: version: 3.1.1 - apiVersion: v1 appVersion: RELEASE.2021-09-09T21-37-07Z - created: "2022-10-24T13:36:32.885997189-07:00" + created: "2022-10-24T16:33:53.208934303-07:00" description: Multi-Cloud Object Storage digest: 6c01af55d2e2e5f716eabf6fef3a92a8464d0674529e9bacab292e5478a73b7a home: https://min.io @@ -1301,7 +1323,7 @@ entries: version: 3.1.0 - apiVersion: v1 appVersion: RELEASE.2021-09-03T03-56-13Z - created: "2022-10-24T13:36:32.884972178-07:00" + created: "2022-10-24T16:33:53.207464754-07:00" description: Multi-Cloud Object Storage digest: 18e10be4d0458bc590ca9abf753227e0c70f60511495387b8d4fb15a4daf932e home: https://min.io @@ -1323,7 +1345,7 @@ entries: version: 3.0.2 - apiVersion: v1 appVersion: RELEASE.2021-08-31T05-46-54Z - created: "2022-10-24T13:36:32.883399218-07:00" + created: "2022-10-24T16:33:53.20559794-07:00" description: Multi-Cloud Object Storage digest: f5b6e7f6272a9e71aef3b75555f6f756a39eef65cb78873f26451dba79b19906 home: https://min.io @@ -1345,7 +1367,7 @@ entries: version: 3.0.1 - apiVersion: v1 appVersion: RELEASE.2021-08-31T05-46-54Z - created: "2022-10-24T13:36:32.882514205-07:00" + created: "2022-10-24T16:33:53.204280042-07:00" description: Multi-Cloud Object Storage digest: 6d2ee1336c412affaaf209fdb80215be2a6ebb23ab2443adbaffef9e7df13fab home: https://min.io @@ -1367,7 +1389,7 @@ entries: version: 3.0.0 - apiVersion: v1 appVersion: RELEASE.2021-08-31T05-46-54Z - created: "2022-10-24T13:36:32.88164439-07:00" + created: "2022-10-24T16:33:53.203016336-07:00" description: Multi-Cloud Object Storage digest: 0a004aaf5bb61deed6a5c88256d1695ebe2f9ff1553874a93e4acfd75e8d339b home: https://min.io @@ -1387,7 +1409,7 @@ entries: version: 2.0.1 - apiVersion: v1 appVersion: RELEASE.2021-08-25T00-41-18Z - created: "2022-10-24T13:36:32.88087136-07:00" + created: "2022-10-24T16:33:53.201702448-07:00" description: Multi-Cloud Object Storage digest: fcd944e837ee481307de6aa3d387ea18c234f995a84c15abb211aab4a4054afc home: https://min.io @@ -1407,7 +1429,7 @@ entries: version: 2.0.0 - apiVersion: v1 appVersion: RELEASE.2021-08-25T00-41-18Z - created: "2022-10-24T13:36:32.879926305-07:00" + created: "2022-10-24T16:33:53.200425938-07:00" description: Multi-Cloud Object Storage digest: 7b6c033d43a856479eb493ab8ca05b230f77c3e42e209e8f298fac6af1a9796f home: https://min.io @@ -1427,7 +1449,7 @@ entries: version: 1.0.5 - apiVersion: v1 appVersion: RELEASE.2021-08-25T00-41-18Z - created: "2022-10-24T13:36:32.879023548-07:00" + created: "2022-10-24T16:33:53.198865308-07:00" description: Multi-Cloud Object Storage digest: abd221245ace16c8e0c6c851cf262d1474a5219dcbf25c4b2e7b77142f9c59ed home: https://min.io @@ -1447,7 +1469,7 @@ entries: version: 1.0.4 - apiVersion: v1 appVersion: RELEASE.2021-08-20T18-32-01Z - created: "2022-10-24T13:36:32.87797421-07:00" + created: "2022-10-24T16:33:53.196314211-07:00" description: Multi-Cloud Object Storage digest: 922a333f5413d1042f7aa81929f43767f6ffca9b260c46713f04ce1dda86d57d home: https://min.io @@ -1467,7 +1489,7 @@ entries: version: 1.0.3 - apiVersion: v1 appVersion: RELEASE.2021-08-20T18-32-01Z - created: "2022-10-24T13:36:32.871696966-07:00" + created: "2022-10-24T16:33:53.194755983-07:00" description: High Performance, Kubernetes Native Object Storage digest: 10e22773506bbfb1c66442937956534cf4057b94f06a977db78b8cd223588388 home: https://min.io @@ -1487,7 +1509,7 @@ entries: version: 1.0.2 - apiVersion: v1 appVersion: RELEASE.2021-08-20T18-32-01Z - created: "2022-10-24T13:36:32.870742213-07:00" + created: "2022-10-24T16:33:53.193457062-07:00" description: High Performance, Kubernetes Native Object Storage digest: ef86ab6df23d6942705da9ef70991b649638c51bc310587d37a425268ba4a06c home: https://min.io @@ -1507,7 +1529,7 @@ entries: version: 1.0.1 - apiVersion: v1 appVersion: RELEASE.2021-08-17T20-53-08Z - created: "2022-10-24T13:36:32.869620886-07:00" + created: "2022-10-24T16:33:53.192158201-07:00" description: High Performance, Kubernetes Native Object Storage digest: 1add7608692cbf39aaf9b1252530e566f7b2f306a14e390b0f49b97a20f2b188 home: https://min.io @@ -1525,4 +1547,4 @@ entries: urls: - https://charts.min.io/helm-releases/minio-1.0.0.tgz version: 1.0.0 -generated: "2022-10-24T13:36:32.868331318-07:00" +generated: "2022-10-24T16:33:53.190475725-07:00" diff --git a/internal/auth/credentials.go b/internal/auth/credentials.go index a5eb4b29b..90336ac48 100644 --- a/internal/auth/credentials.go +++ b/internal/auth/credentials.go @@ -40,7 +40,7 @@ const ( // There is no max length enforcement for access keys accessKeyMaxLen = 20 - // Minimum length for MinIO secret key for both server and gateway mode. + // Minimum length for MinIO secret key for both server secretKeyMinLen = 8 // Maximum secret key length for MinIO, this diff --git a/internal/bucket/encryption/bucket-sse-config.go b/internal/bucket/encryption/bucket-sse-config.go index dbf65dc57..ef403a47b 100644 --- a/internal/bucket/encryption/bucket-sse-config.go +++ b/internal/bucket/encryption/bucket-sse-config.go @@ -124,7 +124,6 @@ func ParseBucketSSEConfig(r io.Reader) (*BucketSSEConfig, error) { // when bucketSSEConfig is empty. type ApplyOptions struct { AutoEncrypt bool - Passthrough bool // Set to 'true' for S3 gateway mode. } // Apply applies the SSE bucket configuration on the given HTTP headers and @@ -139,11 +138,7 @@ func (b *BucketSSEConfig) Apply(headers http.Header, opts ApplyOptions) { } if b == nil { if opts.AutoEncrypt { - if !opts.Passthrough { - headers.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS) - } else { - headers.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionAES) - } + headers.Set(xhttp.AmzServerSideEncryption, xhttp.AmzEncryptionKMS) } return } diff --git a/internal/config/errors.go b/internal/config/errors.go index 83802a825..12e1f10b3 100644 --- a/internal/config/errors.go +++ b/internal/config/errors.go @@ -19,12 +19,6 @@ package config // UI errors var ( - ErrInvalidFSValue = newErrFn( - "Invalid drive path", - "Please provide an existing deployment with MinIO", - "MinIO does not support newer NAS gateway deployments anymore refer https://github.com/minio/minio/issues/14331", - ) - ErrInvalidXLValue = newErrFn( "Invalid drive path", "Please provide a fresh drive for single drive MinIO setup", @@ -109,12 +103,6 @@ var ( "MINIO_CACHE_WATERMARK_HIGH: Valid cache high watermark value must be between 0-100", ) - ErrInvalidCacheEncryptionKey = newErrFn( - "Invalid cache encryption master key value", - "Please check the passed value", - "MINIO_CACHE_ENCRYPTION_SECRET_KEY: For more information, please refer to https://blog.min.io/deprecation-of-the-minio-gateway/", - ) - ErrInvalidCacheRange = newErrFn( "Invalid cache range value", "Please check the passed value", @@ -284,18 +272,6 @@ Example 1: "Compress extensions/mime-types are delimited by `,`. For eg, MINIO_COMPRESS_MIME_TYPES=\"A,B,C\"", ) - ErrInvalidGWSSEValue = newErrFn( - "Invalid gateway SSE value", - "Please check the passed value", - "MINIO_GATEWAY_SSE: Gateway SSE accepts only C and S3 as valid values. Delimit by `;` to set more than one value", - ) - - ErrInvalidGWSSEEnvValue = newErrFn( - "Invalid gateway SSE configuration", - "", - "Refer to https://min.io/docs/minio/linux/administration/server-side-encryption/server-side-encryption-sse-kms.html#quickstart for setting up SSE", - ) - ErrInvalidReplicationWorkersValue = newErrFn( "Invalid value for replication workers", "", diff --git a/internal/logger/logger.go b/internal/logger/logger.go index 4e0976664..6fa87c387 100644 --- a/internal/logger/logger.go +++ b/internal/logger/logger.go @@ -63,7 +63,6 @@ const TimeFormat string = "15:04:05 MST 01/02/2006" var matchingFuncNames = [...]string{ "http.HandlerFunc.ServeHTTP", "cmd.serverMain", - "cmd.StartGateway", // add more here .. } diff --git a/main.go b/main.go index 47aaecd99..54fb45490 100644 --- a/main.go +++ b/main.go @@ -24,9 +24,6 @@ import ( _ "github.com/minio/minio/internal/init" minio "github.com/minio/minio/cmd" - - // Import gateway - _ "github.com/minio/minio/cmd/gateway" ) func main() { diff --git a/main_contrib_test.go b/main_contrib_test.go deleted file mode 100644 index 6f0ce4776..000000000 --- a/main_contrib_test.go +++ /dev/null @@ -1,74 +0,0 @@ -//go:build testrunmain - -/* - * MinIO Object Storage (c) 2021 MinIO, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package main - -import ( - "log" - "os" - "os/signal" - "strings" - "syscall" - "testing" - - minio "github.com/minio/minio/cmd" - _ "github.com/minio/minio/cmd/gateway" -) - -// TestRunMain takes arguments from APP_ARGS env variable and calls minio.Main(args) -// 1. Build and RUN test executable: -// $ go test -tags testrunmain -covermode count -coverpkg="./..." -c -tags testrunmain -// $ APP_ARGS="server /tmp/test" ./minio.test -test.run "^TestRunMain$" -test.coverprofile coverage.cov -// -// 1. As an alternative you can also run the system under test by just by calling "go test" -// $ APP_ARGS="server /tmp/test" go test -cover -tags testrunmain -covermode count -coverpkg="./..." -coverprofile=coverage.cov -// -// 2. Run System-Tests (when using GitBash prefix this line with MSYS_NO_PATHCONV=1) -// Note the SERVER_ENDPOINT must be reachable from inside the docker container (so don't use localhost!) -// -// $ podman run -e MINT_MODE=full -e SERVER_ENDPOINT=192.168.47.11:9000 -e ACCESS_KEY=minioadmin -e SECRET_KEY=minioadmin -v /tmp/mint/log:/mint/log minio/mint -// -// 3. Stop system under test by sending SIGTERM -// $ ctrl+c -// -// 4. Optionally transform coverage file to HTML -// $ go tool cover -html=./coverage.cov -o coverage.html -// -// 5. Optionally transform the coverage file to .csv -// $ cat coverage.cov | sed -E 's/mode: .*/source;from;to;stmnts;count/g' | sed -E 's/:| |,/;/g' > coverage.csv -func TestRunMain(t *testing.T) { - cancelChan := make(chan os.Signal, 1) - // catch SIGETRM or SIGINTERRUPT. The test must gracefully end to complete the test coverage. - signal.Notify(cancelChan, syscall.SIGTERM, syscall.SIGINT) - go func() { - // start minio server with params from env variable APP_ARGS - args := os.Getenv("APP_ARGS") - if args == "" { - log.Printf("No environment variable APP_ARGS found. Starting minio without parameters ...") - } else { - log.Printf("Starting \"minio %v\" ...", args) - } - minio.Main(strings.Split("minio.test "+args, " ")) - }() - sig := <-cancelChan - log.Printf("Caught SIGTERM %v", sig) - log.Print("You might want to transform the coverage.cov file to .html by calling:") - log.Print("$ go tool cover -html=./coverage.cov -o coverage.html") - // shutdown other goroutines gracefully - // close other resources -}