diff --git a/cmd/auth-handler.go b/cmd/auth-handler.go index e54403f51..5793f30e3 100644 --- a/cmd/auth-handler.go +++ b/cmd/auth-handler.go @@ -567,7 +567,7 @@ func isReqAuthenticated(ctx context.Context, r *http.Request, region string, sty // Verify 'Content-Md5' and/or 'X-Amz-Content-Sha256' if present. // The verification happens implicit during reading. - reader, err := hash.NewReader(r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) + reader, err := hash.NewReader(ctx, r.Body, -1, clientETag.String(), hex.EncodeToString(contentSHA256), -1) if err != nil { return toAPIErrorCode(ctx, err) } diff --git a/cmd/batch-handlers.go b/cmd/batch-handlers.go index d02a6f32b..39c525db4 100644 --- a/cmd/batch-handlers.go +++ b/cmd/batch-handlers.go @@ -166,7 +166,7 @@ func (r *BatchJobReplicateV1) ReplicateFromSource(ctx context.Context, api Objec } defer rd.Close() - hr, err := hash.NewReader(rd, objInfo.Size, "", "", objInfo.Size) + hr, err := hash.NewReader(ctx, rd, objInfo.Size, "", "", objInfo.Size) if err != nil { return err } @@ -229,7 +229,7 @@ func (r *BatchJobReplicateV1) copyWithMultipartfromSource(ctx context.Context, a } defer rd.Close() - hr, err = hash.NewReader(io.LimitReader(rd, objInfo.Size), objInfo.Size, "", "", objInfo.Size) + hr, err = hash.NewReader(ctx, io.LimitReader(rd, objInfo.Size), objInfo.Size, "", "", objInfo.Size) if err != nil { return err } diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 767f71d51..1e219c1da 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -1139,7 +1139,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } - hashReader, err := hash.NewReader(reader, fileSize, "", "", fileSize) + hashReader, err := hash.NewReader(ctx, reader, fileSize, "", "", fileSize) if err != nil { logger.LogIf(ctx, err) writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) @@ -1254,7 +1254,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } // do not try to verify encrypted content/ - hashReader, err = hash.NewReader(reader, -1, "", "", -1) + hashReader, err = hash.NewReader(ctx, reader, -1, "", "", -1) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 72508b013..bd3a34cf5 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -1571,7 +1571,7 @@ func replicateObjectWithMultipart(ctx context.Context, c *minio.Core, bucket, ob ) for _, partInfo := range objInfo.Parts { - hr, err = hash.NewReader(io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize) + hr, err = hash.NewReader(ctx, io.LimitReader(r, partInfo.ActualSize), partInfo.ActualSize, "", "", partInfo.ActualSize) if err != nil { return err } diff --git a/cmd/config-common.go b/cmd/config-common.go index fd6d7dd26..71140aa3d 100644 --- a/cmd/config-common.go +++ b/cmd/config-common.go @@ -71,7 +71,7 @@ func deleteConfig(ctx context.Context, objAPI objectDeleter, configFile string) } func saveConfigWithOpts(ctx context.Context, store objectIO, configFile string, data []byte, opts ObjectOptions) error { - hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data))) + hashReader, err := hash.NewReader(ctx, bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data))) if err != nil { return err } diff --git a/cmd/data-usage-cache.go b/cmd/data-usage-cache.go index f6e1895c8..be40fd3ed 100644 --- a/cmd/data-usage-cache.go +++ b/cmd/data-usage-cache.go @@ -998,7 +998,7 @@ func (d *dataUsageCache) save(ctx context.Context, store objectIO, name string) return err } - hr, err := hash.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()), "", "", int64(buf.Len())) + hr, err := hash.NewReader(ctx, bytes.NewReader(buf.Bytes()), int64(buf.Len()), "", "", int64(buf.Len())) if err != nil { return err } diff --git a/cmd/disk-cache-backend.go b/cmd/disk-cache-backend.go index 9813ded53..2cfa80600 100644 --- a/cmd/disk-cache-backend.go +++ b/cmd/disk-cache-backend.go @@ -1441,7 +1441,7 @@ func newCachePartEncryptReader(ctx context.Context, bucket, object string, partI info := ObjectInfo{Size: size} wantSize = info.EncryptedSize() } - hReader, err := hash.NewReader(content, wantSize, "", "", size) + hReader, err := hash.NewReader(ctx, content, wantSize, "", "", size) if err != nil { return nil, err } diff --git a/cmd/disk-cache.go b/cmd/disk-cache.go index e1e720012..f85640fb6 100644 --- a/cmd/disk-cache.go +++ b/cmd/disk-cache.go @@ -734,7 +734,7 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r * defer cLock.Unlock(lkctx) // Initialize pipe to stream data to backend pipeReader, pipeWriter := io.Pipe() - hashReader, err := hash.NewReader(pipeReader, size, "", "", r.ActualSize()) + hashReader, err := hash.NewReader(ctx, pipeReader, size, "", "", r.ActualSize()) if err != nil { return } @@ -795,7 +795,7 @@ func (c *cacheObjects) uploadObject(ctx context.Context, oi ObjectInfo) { if st == CommitComplete || st.String() == "" { return } - hashReader, err := hash.NewReader(cReader, oi.Size, "", "", oi.Size) + hashReader, err := hash.NewReader(ctx, cReader, oi.Size, "", "", oi.Size) if err != nil { return } @@ -1059,7 +1059,7 @@ func (c *cacheObjects) PutObjectPart(ctx context.Context, bucket, object, upload info = PartInfo{} // Initialize pipe to stream data to backend pipeReader, pipeWriter := io.Pipe() - hashReader, err := hash.NewReader(pipeReader, size, "", "", data.ActualSize()) + hashReader, err := hash.NewReader(ctx, pipeReader, size, "", "", data.ActualSize()) if err != nil { return } diff --git a/cmd/erasure-object.go b/cmd/erasure-object.go index 9c2b39161..9015d68ae 100644 --- a/cmd/erasure-object.go +++ b/cmd/erasure-object.go @@ -2155,7 +2155,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object)) } defer gr.Close() - hashReader, err := hash.NewReader(gr, gr.ObjInfo.Size, "", "", gr.ObjInfo.Size) + hashReader, err := hash.NewReader(ctx, gr, gr.ObjInfo.Size, "", "", gr.ObjInfo.Size) if err != nil { return setRestoreHeaderFn(oi, toObjectErr(err, bucket, object)) } @@ -2180,7 +2180,7 @@ func (er erasureObjects) restoreTransitionedObject(ctx context.Context, bucket s // rehydrate the parts back on disk as per the original xl.meta prior to transition for _, partInfo := range oi.Parts { - hr, err := hash.NewReader(io.LimitReader(gr, partInfo.Size), partInfo.Size, "", "", partInfo.Size) + hr, err := hash.NewReader(ctx, io.LimitReader(gr, partInfo.Size), partInfo.Size, "", "", partInfo.Size) if err != nil { return setRestoreHeaderFn(oi, err) } diff --git a/cmd/erasure-server-pool-decom.go b/cmd/erasure-server-pool-decom.go index 95b787680..0912110a7 100644 --- a/cmd/erasure-server-pool-decom.go +++ b/cmd/erasure-server-pool-decom.go @@ -608,7 +608,7 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri defer z.AbortMultipartUpload(ctx, bucket, objInfo.Name, res.UploadID, ObjectOptions{}) parts := make([]CompletePart, len(objInfo.Parts)) for i, part := range objInfo.Parts { - hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize) + hr, err := hash.NewReader(ctx, io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize) if err != nil { return fmt.Errorf("decommissionObject: hash.NewReader() %w", err) } @@ -642,7 +642,7 @@ func (z *erasureServerPools) decommissionObject(ctx context.Context, bucket stri return err } - hr, err := hash.NewReader(io.LimitReader(gr, objInfo.Size), objInfo.Size, "", "", actualSize) + hr, err := hash.NewReader(ctx, io.LimitReader(gr, objInfo.Size), objInfo.Size, "", "", actualSize) if err != nil { return fmt.Errorf("decommissionObject: hash.NewReader() %w", err) } diff --git a/cmd/erasure-server-pool-rebalance.go b/cmd/erasure-server-pool-rebalance.go index ac7ca1c05..58e7c8676 100644 --- a/cmd/erasure-server-pool-rebalance.go +++ b/cmd/erasure-server-pool-rebalance.go @@ -736,7 +736,7 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string, parts := make([]CompletePart, len(oi.Parts)) for i, part := range oi.Parts { - hr, err := hash.NewReader(io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize) + hr, err := hash.NewReader(ctx, io.LimitReader(gr, part.Size), part.Size, "", "", part.ActualSize) if err != nil { return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err) } @@ -766,7 +766,7 @@ func (z *erasureServerPools) rebalanceObject(ctx context.Context, bucket string, return err } - hr, err := hash.NewReader(gr, oi.Size, "", "", actualSize) + hr, err := hash.NewReader(ctx, gr, oi.Size, "", "", actualSize) if err != nil { return fmt.Errorf("rebalanceObject: hash.NewReader() %w", err) } diff --git a/cmd/metacache-set.go b/cmd/metacache-set.go index b4b5dc1ed..b6ff0e04e 100644 --- a/cmd/metacache-set.go +++ b/cmd/metacache-set.go @@ -761,7 +761,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache return nil } o.debugln(color.Green("saveMetaCacheStream:")+" saving block", b.n, "to", o.objectPath(b.n)) - r, err := hash.NewReader(bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data))) + r, err := hash.NewReader(ctx, bytes.NewReader(b.data), int64(len(b.data)), "", "", int64(len(b.data))) logger.LogIf(ctx, err) custom := b.headerKV() _, err = er.putMetacacheObject(ctx, o.objectPath(b.n), NewPutObjReader(r), ObjectOptions{ diff --git a/cmd/object-api-utils.go b/cmd/object-api-utils.go index 899fd1b11..b7135ed3f 100644 --- a/cmd/object-api-utils.go +++ b/cmd/object-api-utils.go @@ -342,6 +342,15 @@ func mustGetUUID() string { return u.String() } +// mustGetUUIDBytes - get a random UUID as 16 bytes unencoded. +func mustGetUUIDBytes() []byte { + u, err := uuid.NewRandom() + if err != nil { + logger.CriticalIf(GlobalContext, err) + } + return u[:] +} + // Create an s3 compatible MD5sum for complete multipart transaction. func getCompleteMultipartMD5(parts []CompletePart) string { var finalMD5Bytes []byte diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 4a1a58642..69073e969 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -1182,7 +1182,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re compressMetadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 compressMetadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(actualSize, 10) - reader = etag.NewReader(reader, nil) + reader = etag.NewReader(ctx, reader, nil, nil) wantEncryption := crypto.Requested(r.Header) s2c, cb := newS2CompressReader(reader, actualSize, wantEncryption) dstOpts.IndexCB = cb @@ -1195,7 +1195,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re reader = gr } - srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualSize) + srcInfo.Reader, err = hash.NewReader(ctx, reader, length, "", "", actualSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1316,7 +1316,7 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re } // do not try to verify encrypted content - srcInfo.Reader, err = hash.NewReader(reader, targetSize, "", "", actualSize) + srcInfo.Reader, err = hash.NewReader(ctx, reader, targetSize, "", "", actualSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1743,7 +1743,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) - actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) + actualReader, err := hash.NewReader(ctx, reader, size, md5hex, sha256hex, actualSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1763,8 +1763,20 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req md5hex = "" // Do not try to verify the content. sha256hex = "" } - - hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) + var hashReader *hash.Reader + // Optimization: If SSE-KMS and SSE-C did not request Content-Md5. Use uuid as etag + if !etag.ContentMD5Requested(r.Header) && (crypto.S3KMS.IsRequested(r.Header) || crypto.SSEC.IsRequested(r.Header)) { + hashReader, err = hash.NewReaderWithOpts(ctx, reader, hash.Options{ + Size: size, + MD5Hex: md5hex, + SHA256Hex: sha256hex, + ActualSize: actualSize, + DisableMD5: false, + ForceMD5: mustGetUUIDBytes(), + }) + } else { + hashReader, err = hash.NewReader(ctx, reader, size, md5hex, sha256hex, actualSize) + } if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1860,7 +1872,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req } // do not try to verify encrypted content - hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) + hashReader, err = hash.NewReader(ctx, etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -2077,7 +2089,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h } } - hreader, err := hash.NewReader(reader, size, md5hex, sha256hex, size) + hreader, err := hash.NewReader(ctx, reader, size, md5hex, sha256hex, size) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -2128,7 +2140,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h metadata[ReservedMetadataPrefix+"compression"] = compressionAlgorithmV2 metadata[ReservedMetadataPrefix+"actual-size"] = strconv.FormatInt(size, 10) - actualReader, err := hash.NewReader(reader, size, "", "", actualSize) + actualReader, err := hash.NewReader(ctx, reader, size, "", "", actualSize) if err != nil { return err } @@ -2142,7 +2154,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h size = -1 // Since compressed size is un-predictable. } - hashReader, err := hash.NewReader(reader, size, "", "", actualSize) + hashReader, err := hash.NewReader(ctx, reader, size, "", "", actualSize) if err != nil { return err } @@ -2212,7 +2224,7 @@ func (api objectAPIHandlers) PutObjectExtractHandler(w http.ResponseWriter, r *h } // do not try to verify encrypted content - hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) + hashReader, err = hash.NewReader(ctx, etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) if err != nil { return err } diff --git a/cmd/object-multipart-handlers.go b/cmd/object-multipart-handlers.go index 0e4b22ba1..321aeaf39 100644 --- a/cmd/object-multipart-handlers.go +++ b/cmd/object-multipart-handlers.go @@ -448,7 +448,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt } actualPartSize = length - var reader io.Reader = etag.NewReader(gr, nil) + var reader io.Reader = etag.NewReader(ctx, gr, nil, nil) mi, err := objectAPI.GetMultipartInfo(ctx, dstBucket, dstObject, uploadID, dstOpts) if err != nil { @@ -471,7 +471,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt length = -1 } - srcInfo.Reader, err = hash.NewReader(reader, length, "", "", actualPartSize) + srcInfo.Reader, err = hash.NewReader(ctx, reader, length, "", "", actualPartSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -526,7 +526,7 @@ func (api objectAPIHandlers) CopyObjectPartHandler(w http.ResponseWriter, r *htt wantSize = info.EncryptedSize() } - srcInfo.Reader, err = hash.NewReader(reader, wantSize, "", "", actualPartSize) + srcInfo.Reader, err = hash.NewReader(ctx, reader, wantSize, "", "", actualPartSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -715,7 +715,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http _, isCompressed := mi.UserDefined[ReservedMetadataPrefix+"compression"] var idxCb func() []byte if isCompressed { - actualReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) + actualReader, err := hash.NewReader(ctx, reader, size, md5hex, sha256hex, actualSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -736,7 +736,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http sha256hex = "" } - hashReader, err := hash.NewReader(reader, size, md5hex, sha256hex, actualSize) + hashReader, err := hash.NewReader(ctx, reader, size, md5hex, sha256hex, actualSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -798,7 +798,7 @@ func (api objectAPIHandlers) PutObjectPartHandler(w http.ResponseWriter, r *http wantSize = info.EncryptedSize() } // do not try to verify encrypted content - hashReader, err = hash.NewReader(etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) + hashReader, err = hash.NewReader(ctx, etag.Wrap(reader, hashReader), wantSize, "", "", actualSize) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return diff --git a/cmd/post-policy-fan-out.go b/cmd/post-policy-fan-out.go index 42091c588..1bb637e72 100644 --- a/cmd/post-policy-fan-out.go +++ b/cmd/post-policy-fan-out.go @@ -61,7 +61,7 @@ func fanOutPutObject(ctx context.Context, bucket string, objectAPI ObjectLayer, ActualSize: -1, DisableMD5: true, } - hr, err := hash.NewReaderWithOpts(bytes.NewReader(fanOutBuf), hopts) + hr, err := hash.NewReaderWithOpts(ctx, bytes.NewReader(fanOutBuf), hopts) if err != nil { errs[idx] = err return @@ -91,7 +91,7 @@ func fanOutPutObject(ctx context.Context, bucket string, objectAPI ObjectLayer, } // do not try to verify encrypted content/ - hr, err = hash.NewReader(encrd, -1, "", "", -1) + hr, err = hash.NewReader(ctx, encrd, -1, "", "", -1) if err != nil { errs[idx] = err return diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index 635b37119..8bf12bdff 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -162,7 +162,7 @@ func calculateSignedChunkLength(chunkDataSize int64) int64 { } func mustGetPutObjReader(t TestErrHandler, data io.Reader, size int64, md5hex, sha256hex string) *PutObjReader { - hr, err := hash.NewReader(data, size, md5hex, sha256hex, size) + hr, err := hash.NewReader(context.Background(), data, size, md5hex, sha256hex, size) if err != nil { t.Fatal(err) } diff --git a/cmd/tier.go b/cmd/tier.go index ea1e28994..498285a4f 100644 --- a/cmd/tier.go +++ b/cmd/tier.go @@ -287,7 +287,7 @@ func (config *TierConfigMgr) getDriver(tierName string) (d WarmBackend, err erro // using a PutObject API. PutObjReader encrypts json encoded tier configurations // if KMS is enabled, otherwise simply yields the json encoded bytes as is. // Similarly, ObjectOptions value depends on KMS' status. -func (config *TierConfigMgr) configReader() (*PutObjReader, *ObjectOptions, error) { +func (config *TierConfigMgr) configReader(ctx context.Context) (*PutObjReader, *ObjectOptions, error) { b, err := config.Bytes() if err != nil { return nil, nil, err @@ -295,7 +295,7 @@ func (config *TierConfigMgr) configReader() (*PutObjReader, *ObjectOptions, erro payloadSize := int64(len(b)) br := bytes.NewReader(b) - hr, err := hash.NewReader(br, payloadSize, "", "", payloadSize) + hr, err := hash.NewReader(ctx, br, payloadSize, "", "", payloadSize) if err != nil { return nil, nil, err } @@ -318,7 +318,7 @@ func (config *TierConfigMgr) configReader() (*PutObjReader, *ObjectOptions, erro Size: payloadSize, } encSize := info.EncryptedSize() - encHr, err := hash.NewReader(encBr, encSize, "", "", encSize) + encHr, err := hash.NewReader(ctx, encBr, encSize, "", "", encSize) if err != nil { return nil, nil, err } @@ -371,7 +371,7 @@ func (config *TierConfigMgr) Save(ctx context.Context, objAPI ObjectLayer) error return errServerNotInitialized } - pr, opts, err := globalTierConfigMgr.configReader() + pr, opts, err := globalTierConfigMgr.configReader(ctx) if err != nil { return err } diff --git a/internal/etag/etag.go b/internal/etag/etag.go index 63f182c52..e18bf7748 100644 --- a/internal/etag/etag.go +++ b/internal/etag/etag.go @@ -119,6 +119,7 @@ import ( "github.com/minio/minio/internal/fips" "github.com/minio/minio/internal/hash/sha256" + xhttp "github.com/minio/minio/internal/http" "github.com/minio/sio" ) @@ -263,6 +264,12 @@ func FromContentMD5(h http.Header) (ETag, error) { return ETag(b), nil } +// ContentMD5Requested - for http.request.header is not request Content-Md5 +func ContentMD5Requested(h http.Header) bool { + _, ok := h[xhttp.ContentMD5] + return ok +} + // Multipart computes an S3 multipart ETag given a list of // S3 singlepart ETags. It returns nil if the list of // ETags is empty. diff --git a/internal/etag/etag_test.go b/internal/etag/etag_test.go index f649034c4..288be862e 100644 --- a/internal/etag/etag_test.go +++ b/internal/etag/etag_test.go @@ -18,6 +18,7 @@ package etag import ( + "context" "io" "net/http" "strings" @@ -137,7 +138,7 @@ var readerTests = []struct { // Reference values computed by: echo | m func TestReader(t *testing.T) { for i, test := range readerTests { - reader := NewReader(strings.NewReader(test.Content), test.ETag) + reader := NewReader(context.Background(), strings.NewReader(test.Content), test.ETag, nil) if _, err := io.Copy(io.Discard, reader); err != nil { t.Fatalf("Test %d: read failed: %v", i, err) } diff --git a/internal/etag/reader.go b/internal/etag/reader.go index 3dd9c41c9..4dc110b28 100644 --- a/internal/etag/reader.go +++ b/internal/etag/reader.go @@ -18,6 +18,7 @@ package etag import ( + "context" "crypto/md5" "fmt" "hash" @@ -102,12 +103,19 @@ type Reader struct { // If the provided etag is not nil the returned // Reader compares the etag with the computed // MD5 sum once the r returns io.EOF. -func NewReader(r io.Reader, etag ETag) *Reader { +func NewReader(ctx context.Context, r io.Reader, etag ETag, forceMD5 []byte) *Reader { if er, ok := r.(*Reader); ok { if er.readN == 0 && Equal(etag, er.checksum) { return er } } + if len(forceMD5) != 0 { + return &Reader{ + src: r, + md5: NewUUIDHash(forceMD5), + checksum: etag, + } + } return &Reader{ src: r, md5: md5.New(), @@ -153,3 +161,40 @@ type VerifyError struct { func (v VerifyError) Error() string { return fmt.Sprintf("etag: expected ETag %q does not match computed ETag %q", v.Expected, v.Computed) } + +// UUIDHash - use uuid to make md5sum +type UUIDHash struct { + uuid []byte +} + +// Write - implement hash.Hash Write +func (u UUIDHash) Write(p []byte) (n int, err error) { + return len(p), nil +} + +// Sum - implement md5.Sum +func (u UUIDHash) Sum(b []byte) []byte { + return u.uuid +} + +// Reset - implement hash.Hash Reset +func (u UUIDHash) Reset() { + return +} + +// Size - implement hash.Hash Size +func (u UUIDHash) Size() int { + return len(u.uuid) +} + +// BlockSize - implement hash.Hash BlockSize +func (u UUIDHash) BlockSize() int { + return md5.BlockSize +} + +var _ hash.Hash = &UUIDHash{} + +// NewUUIDHash - new UUIDHash +func NewUUIDHash(uuid []byte) *UUIDHash { + return &UUIDHash{uuid: uuid} +} diff --git a/internal/hash/reader.go b/internal/hash/reader.go index 3b3d895bd..f849a69f0 100644 --- a/internal/hash/reader.go +++ b/internal/hash/reader.go @@ -19,6 +19,7 @@ package hash import ( "bytes" + "context" "encoding/base64" "encoding/hex" "errors" @@ -71,13 +72,14 @@ type Options struct { Size int64 ActualSize int64 DisableMD5 bool + ForceMD5 []byte } // NewReaderWithOpts is like NewReader but takes `Options` as argument, allowing // callers to indicate if they want to disable md5sum checksum. -func NewReaderWithOpts(src io.Reader, opts Options) (*Reader, error) { +func NewReaderWithOpts(ctx context.Context, src io.Reader, opts Options) (*Reader, error) { // return hard limited reader - return newReader(src, opts.Size, opts.MD5Hex, opts.SHA256Hex, opts.ActualSize, opts.DisableMD5) + return newReader(ctx, src, opts.Size, opts.MD5Hex, opts.SHA256Hex, opts.ActualSize, opts.DisableMD5, opts.ForceMD5) } // NewReader returns a new Reader that wraps src and computes @@ -95,11 +97,11 @@ func NewReaderWithOpts(src io.Reader, opts Options) (*Reader, error) { // checksums multiple times. // NewReader enforces S3 compatibility strictly by ensuring caller // does not send more content than specified size. -func NewReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) { - return newReader(src, size, md5Hex, sha256Hex, actualSize, false) +func NewReader(ctx context.Context, src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) (*Reader, error) { + return newReader(ctx, src, size, md5Hex, sha256Hex, actualSize, false, nil) } -func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, disableMD5 bool) (*Reader, error) { +func newReader(ctx context.Context, src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64, disableMD5 bool, forceMD5 []byte) (*Reader, error) { MD5, err := hex.DecodeString(md5Hex) if err != nil { return nil, BadDigest{ // TODO(aead): Return an error that indicates that an invalid ETag has been specified @@ -153,7 +155,7 @@ func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i r := ioutil.HardLimitReader(src, size) if !disableMD5 { if _, ok := src.(etag.Tagger); !ok { - src = etag.NewReader(r, MD5) + src = etag.NewReader(ctx, r, MD5, forceMD5) } else { src = etag.Wrap(r, src) } @@ -162,7 +164,7 @@ func newReader(src io.Reader, size int64, md5Hex, sha256Hex string, actualSize i } } else if _, ok := src.(etag.Tagger); !ok { if !disableMD5 { - src = etag.NewReader(src, MD5) + src = etag.NewReader(ctx, src, MD5, forceMD5) } } var h hash.Hash diff --git a/internal/hash/reader_test.go b/internal/hash/reader_test.go index c3f9ffb19..314efc57e 100644 --- a/internal/hash/reader_test.go +++ b/internal/hash/reader_test.go @@ -19,6 +19,7 @@ package hash import ( "bytes" + "context" "encoding/base64" "encoding/hex" "fmt" @@ -30,7 +31,7 @@ import ( // Tests functions like Size(), MD5*(), SHA256*() func TestHashReaderHelperMethods(t *testing.T) { - r, err := NewReader(bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4) + r, err := NewReader(context.Background(), bytes.NewReader([]byte("abcd")), 4, "e2fc714c4727ee9395f324cd2e7f331f", "88d4266fd4e6338d13b845fcf289579d209c897823b9217da3e161936f031589", 4) if err != nil { t.Fatal(err) } @@ -194,7 +195,7 @@ func TestHashReaderVerification(t *testing.T) { } for i, testCase := range testCases { t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) { - r, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) + r, err := NewReader(context.Background(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) if err != nil { t.Fatalf("Test %q: Initializing reader failed %s", testCase.desc, err) } @@ -213,7 +214,7 @@ func TestHashReaderVerification(t *testing.T) { } func mustReader(t *testing.T, src io.Reader, size int64, md5Hex, sha256Hex string, actualSize int64) *Reader { - r, err := NewReader(src, size, md5Hex, sha256Hex, actualSize) + r, err := NewReader(context.Background(), src, size, md5Hex, sha256Hex, actualSize) if err != nil { t.Fatal(err) } @@ -303,7 +304,7 @@ func TestHashReaderInvalidArguments(t *testing.T) { for i, testCase := range testCases { t.Run(fmt.Sprintf("case-%d", i+1), func(t *testing.T) { - _, err := NewReader(testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) + _, err := NewReader(context.Background(), testCase.src, testCase.size, testCase.md5hex, testCase.sha256hex, testCase.actualSize) if err != nil && testCase.success { t.Errorf("Test %q: Expected success, but got error %s instead", testCase.desc, err) }