remove checkBucketExist check entirely to avoid fan-out calls (#18917)

Each Put, List, Multipart operations heavily rely on making
GetBucketInfo() call to verify if bucket exists or not on
a regular basis. This has a large performance cost when there
are tons of servers involved.

We did optimize this part by vectorizing the bucket calls,
however its not enough, beyond 100 nodes and this becomes
fairly visible in terms of performance.
This commit is contained in:
Harshavardhana 2024-01-30 12:43:25 -08:00 committed by GitHub
parent a669946357
commit 80ca120088
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
38 changed files with 408 additions and 291 deletions

View File

@ -90,7 +90,7 @@ func newStreamingBitrotWriterBuffer(w io.Writer, algo BitrotAlgorithm, shardSize
} }
// Returns streaming bitrot writer implementation. // Returns streaming bitrot writer implementation.
func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer { func newStreamingBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
r, w := io.Pipe() r, w := io.Pipe()
h := algo.New() h := algo.New()
@ -110,7 +110,7 @@ func newStreamingBitrotWriter(disk StorageAPI, volume, filePath string, length i
bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums. bitrotSumsTotalSize := ceilFrac(length, shardSize) * int64(h.Size()) // Size used for storing bitrot checksums.
totalFileSize = bitrotSumsTotalSize + length totalFileSize = bitrotSumsTotalSize + length
} }
r.CloseWithError(disk.CreateFile(context.TODO(), volume, filePath, totalFileSize, r)) r.CloseWithError(disk.CreateFile(context.TODO(), origvolume, volume, filePath, totalFileSize, r))
}() }()
return bw return bw
} }

View File

@ -102,9 +102,9 @@ func BitrotAlgorithmFromString(s string) (a BitrotAlgorithm) {
return return
} }
func newBitrotWriter(disk StorageAPI, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer { func newBitrotWriter(disk StorageAPI, origvolume, volume, filePath string, length int64, algo BitrotAlgorithm, shardSize int64) io.Writer {
if algo == HighwayHash256S { if algo == HighwayHash256S {
return newStreamingBitrotWriter(disk, volume, filePath, length, algo, shardSize) return newStreamingBitrotWriter(disk, origvolume, volume, filePath, length, algo, shardSize)
} }
return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize) return newWholeBitrotWriter(disk, volume, filePath, algo, shardSize)
} }

View File

@ -36,7 +36,7 @@ func testBitrotReaderWriterAlgo(t *testing.T, bitrotAlgo BitrotAlgorithm) {
disk.MakeVol(context.Background(), volume) disk.MakeVol(context.Background(), volume)
writer := newBitrotWriter(disk, volume, filePath, 35, bitrotAlgo, 10) writer := newBitrotWriter(disk, "", volume, filePath, 35, bitrotAlgo, 10)
_, err = writer.Write([]byte("aaaaaaaaaa")) _, err = writer.Write([]byte("aaaaaaaaaa"))
if err != nil { if err != nil {

View File

@ -3393,7 +3393,7 @@ func (p *ReplicationPool) persistToDrive(ctx context.Context, v MRFReplicateEntr
for _, localDrive := range localDrives { for _, localDrive := range localDrives {
r := newReader() r := newReader()
err := localDrive.CreateFile(ctx, minioMetaBucket, pathJoin(replicationMRFDir, globalLocalNodeNameHex+".bin"), -1, r) err := localDrive.CreateFile(ctx, "", minioMetaBucket, pathJoin(replicationMRFDir, globalLocalNodeNameHex+".bin"), -1, r)
r.Close() r.Close()
if err == nil { if err == nil {
break break

View File

@ -106,7 +106,7 @@ func TestErasureDecode(t *testing.T) {
buffer := make([]byte, test.blocksize, 2*test.blocksize) buffer := make([]byte, test.blocksize, 2*test.blocksize)
writers := make([]io.Writer, len(disks)) writers := make([]io.Writer, len(disks))
for i, disk := range disks { for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(test.data), writeAlgorithm, erasure.ShardSize())
} }
n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) n, err := erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers) closeBitrotWriters(writers)
@ -228,7 +228,7 @@ func TestErasureDecodeRandomOffsetLength(t *testing.T) {
if disk == nil { if disk == nil {
continue continue
} }
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(length), DefaultBitrotAlgorithm, erasure.ShardSize())
} }
// 10000 iterations with random offsets and lengths. // 10000 iterations with random offsets and lengths.
@ -297,7 +297,7 @@ func benchmarkErasureDecode(data, parity, dataDown, parityDown int, size int64,
if disk == nil { if disk == nil {
continue continue
} }
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
} }
content := make([]byte, size) content := make([]byte, size)

View File

@ -41,7 +41,7 @@ func (a badDisk) ReadFileStream(ctx context.Context, volume, path string, offset
return nil, errFaultyDisk return nil, errFaultyDisk
} }
func (a badDisk) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error { func (a badDisk) CreateFile(ctx context.Context, origvolume, volume, path string, size int64, reader io.Reader) error {
return errFaultyDisk return errFaultyDisk
} }
@ -103,7 +103,7 @@ func TestErasureEncode(t *testing.T) {
if disk == OfflineDisk { if disk == OfflineDisk {
continue continue
} }
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
} }
n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1) n, err := erasure.Encode(context.Background(), bytes.NewReader(data[test.offset:]), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers) closeBitrotWriters(writers)
@ -127,7 +127,7 @@ func TestErasureEncode(t *testing.T) {
if disk == nil { if disk == nil {
continue continue
} }
writers[i] = newBitrotWriter(disk, "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "", "testbucket", "object2", erasure.ShardFileSize(int64(len(data[test.offset:]))), test.algorithm, erasure.ShardSize())
} }
for j := range disks[:test.offDisks] { for j := range disks[:test.offDisks] {
switch w := writers[j].(type) { switch w := writers[j].(type) {
@ -192,7 +192,7 @@ func benchmarkErasureEncode(data, parity, dataDown, parityDown int, size int64,
Recursive: false, Recursive: false,
Immediate: false, Immediate: false,
}) })
writers[i] = newBitrotWriter(disk, "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "", "testbucket", "object", erasure.ShardFileSize(size), DefaultBitrotAlgorithm, erasure.ShardSize())
} }
_, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1) _, err := erasure.Encode(context.Background(), bytes.NewReader(content), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers) closeBitrotWriters(writers)

View File

@ -86,7 +86,7 @@ func TestErasureHeal(t *testing.T) {
buffer := make([]byte, test.blocksize, 2*test.blocksize) buffer := make([]byte, test.blocksize, 2*test.blocksize)
writers := make([]io.Writer, len(disks)) writers := make([]io.Writer, len(disks))
for i, disk := range disks { for i, disk := range disks {
writers[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, "", "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
} }
_, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1) _, err = erasure.Encode(context.Background(), bytes.NewReader(data), writers, buffer, erasure.dataBlocks+1)
closeBitrotWriters(writers) closeBitrotWriters(writers)
@ -128,7 +128,7 @@ func TestErasureHeal(t *testing.T) {
continue continue
} }
os.Remove(pathJoin(disk.String(), "testbucket", "testobject")) os.Remove(pathJoin(disk.String(), "testbucket", "testobject"))
staleWriters[i] = newBitrotWriter(disk, "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize()) staleWriters[i] = newBitrotWriter(disk, "", "testbucket", "testobject", erasure.ShardFileSize(test.size), test.algorithm, erasure.ShardSize())
} }
// test case setup is complete - now call Heal() // test case setup is complete - now call Heal()

View File

@ -246,7 +246,7 @@ func TestListOnlineDisks(t *testing.T) {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs) fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err) t.Fatalf("Failed to getLatestFileInfo %v", err)
@ -424,7 +424,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", true, true) partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", true, true)
fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs) fi, err := getLatestFileInfo(ctx, partsMetadata, z.serverPools[0].sets[0].defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo %v", err) t.Fatalf("Failed to getLatestFileInfo %v", err)
@ -437,7 +437,7 @@ func TestListOnlineDisksSmallObjects(t *testing.T) {
partsMetadata[j].ModTime = test.modTimes[j] partsMetadata[j].ModTime = test.modTimes[j]
} }
if erasureDisks, err = writeUniqueFileInfo(ctx, erasureDisks, bucket, object, partsMetadata, diskCount(erasureDisks)); err != nil { if erasureDisks, err = writeUniqueFileInfo(ctx, erasureDisks, "", bucket, object, partsMetadata, diskCount(erasureDisks)); err != nil {
t.Fatal(ctx, err) t.Fatal(ctx, err)
} }
@ -534,7 +534,7 @@ func TestDisksWithAllParts(t *testing.T) {
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
_, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) _, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
readQuorum := len(erasureDisks) / 2 readQuorum := len(erasureDisks) / 2
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil {
t.Fatalf("Failed to read xl meta data %v", reducedErr) t.Fatalf("Failed to read xl meta data %v", reducedErr)
@ -542,7 +542,7 @@ func TestDisksWithAllParts(t *testing.T) {
// Test 1: Test that all disks are returned without any failures with // Test 1: Test that all disks are returned without any failures with
// unmodified meta data // unmodified meta data
partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) partsMetadata, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
if err != nil { if err != nil {
t.Fatalf("Failed to read xl meta data %v", err) t.Fatalf("Failed to read xl meta data %v", err)
} }

View File

@ -230,7 +230,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
} }
// Re-read when we have lock... // Re-read when we have lock...
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, object, versionID, true, true) partsMetadata, errs := readAllFileInfo(ctx, storageDisks, "", bucket, object, versionID, true, true)
if isAllNotFound(errs) { if isAllNotFound(errs) {
err := errFileNotFound err := errFileNotFound
if versionID != "" { if versionID != "" {
@ -518,7 +518,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, erasure.ShardFileSize(latestMeta.Size)+32)) inlineBuffers[i] = bytes.NewBuffer(make([]byte, 0, erasure.ShardFileSize(latestMeta.Size)+32))
writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newStreamingBitrotWriterBuffer(inlineBuffers[i], DefaultBitrotAlgorithm, erasure.ShardSize())
} else { } else {
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, partPath, writers[i] = newBitrotWriter(disk, bucket, minioMetaTmpBucket, partPath,
tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize()) tillOffset, DefaultBitrotAlgorithm, erasure.ShardSize())
} }
} }
@ -796,7 +796,7 @@ func statAllDirs(ctx context.Context, storageDisks []StorageAPI, bucket, prefix
} }
index := index index := index
g.Go(func() error { g.Go(func() error {
entries, err := storageDisks[index].ListDir(ctx, bucket, prefix, 1) entries, err := storageDisks[index].ListDir(ctx, "", bucket, prefix, 1)
if err != nil { if err != nil {
return err return err
} }
@ -810,6 +810,10 @@ func statAllDirs(ctx context.Context, storageDisks []StorageAPI, bucket, prefix
return g.Wait() return g.Wait()
} }
func isAllVolumeNotFound(errs []error) bool {
return countErrs(errs, errVolumeNotFound) == len(errs)
}
// isAllNotFound will return if any element of the error slice is not // isAllNotFound will return if any element of the error slice is not
// errFileNotFound, errFileVersionNotFound or errVolumeNotFound. // errFileNotFound, errFileVersionNotFound or errVolumeNotFound.
// A 0 length slice will always return false. // A 0 length slice will always return false.
@ -993,7 +997,7 @@ func (er erasureObjects) HealObject(ctx context.Context, bucket, object, version
// Perform quick read without lock. // Perform quick read without lock.
// This allows to quickly check if all is ok or all are missing. // This allows to quickly check if all is ok or all are missing.
_, errs := readAllFileInfo(healCtx, storageDisks, bucket, object, versionID, false, false) _, errs := readAllFileInfo(healCtx, storageDisks, "", bucket, object, versionID, false, false)
if isAllNotFound(errs) { if isAllNotFound(errs) {
err := errFileNotFound err := errFileNotFound
if versionID != "" { if versionID != "" {

View File

@ -266,7 +266,7 @@ func TestHealing(t *testing.T) {
} }
disk := er.getDisks()[0] disk := er.getDisks()[0]
fileInfoPreHeal, err := disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPreHeal, err := disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -289,7 +289,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
fileInfoPostHeal, err := disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal, err := disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -308,7 +308,7 @@ func TestHealing(t *testing.T) {
// gone down when an object was replaced by a new object. // gone down when an object was replaced by a new object.
fileInfoOutDated := fileInfoPreHeal fileInfoOutDated := fileInfoPreHeal
fileInfoOutDated.ModTime = time.Now() fileInfoOutDated.ModTime = time.Now()
err = disk.WriteMetadata(context.Background(), bucket, object, fileInfoOutDated) err = disk.WriteMetadata(context.Background(), "", bucket, object, fileInfoOutDated)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -318,7 +318,7 @@ func TestHealing(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -426,11 +426,11 @@ func TestHealingVersioned(t *testing.T) {
} }
disk := er.getDisks()[0] disk := er.getDisks()[0]
fileInfoPreHeal1, err := disk.ReadVersion(context.Background(), bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true}) fileInfoPreHeal1, err := disk.ReadVersion(context.Background(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
fileInfoPreHeal2, err := disk.ReadVersion(context.Background(), bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true}) fileInfoPreHeal2, err := disk.ReadVersion(context.Background(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -453,11 +453,11 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
fileInfoPostHeal1, err := disk.ReadVersion(context.Background(), bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal1, err := disk.ReadVersion(context.Background(), "", bucket, object, oi1.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
fileInfoPostHeal2, err := disk.ReadVersion(context.Background(), bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal2, err := disk.ReadVersion(context.Background(), "", bucket, object, oi2.VersionID, ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -479,7 +479,7 @@ func TestHealingVersioned(t *testing.T) {
// gone down when an object was replaced by a new object. // gone down when an object was replaced by a new object.
fileInfoOutDated := fileInfoPreHeal1 fileInfoOutDated := fileInfoPreHeal1
fileInfoOutDated.ModTime = time.Now() fileInfoOutDated.ModTime = time.Now()
err = disk.WriteMetadata(context.Background(), bucket, object, fileInfoOutDated) err = disk.WriteMetadata(context.Background(), "", bucket, object, fileInfoOutDated)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -489,7 +489,7 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
fileInfoPostHeal1, err = disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal1, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -499,7 +499,7 @@ func TestHealingVersioned(t *testing.T) {
t.Fatal("HealObject failed") t.Fatal("HealObject failed")
} }
fileInfoPostHeal2, err = disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal2, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -637,7 +637,7 @@ func TestHealingDanglingObject(t *testing.T) {
// Restore... // Restore...
setDisks(orgDisks[:4]...) setDisks(orgDisks[:4]...)
fileInfoPreHeal, err := disks[0].ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPreHeal, err := disks[0].ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -654,7 +654,7 @@ func TestHealingDanglingObject(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
fileInfoPostHeal, err := disks[0].ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal, err := disks[0].ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -684,7 +684,7 @@ func TestHealingDanglingObject(t *testing.T) {
setDisks(orgDisks[:4]...) setDisks(orgDisks[:4]...)
disk := getDisk(0) disk := getDisk(0)
fileInfoPreHeal, err = disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPreHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -702,7 +702,7 @@ func TestHealingDanglingObject(t *testing.T) {
} }
disk = getDisk(0) disk = getDisk(0)
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -733,7 +733,7 @@ func TestHealingDanglingObject(t *testing.T) {
setDisks(orgDisks[:4]...) setDisks(orgDisks[:4]...)
disk = getDisk(0) disk = getDisk(0)
fileInfoPreHeal, err = disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPreHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -751,7 +751,7 @@ func TestHealingDanglingObject(t *testing.T) {
} }
disk = getDisk(0) disk = getDisk(0)
fileInfoPostHeal, err = disk.ReadVersion(context.Background(), bucket, object, "", ReadOptions{ReadData: false, Healing: true}) fileInfoPostHeal, err = disk.ReadVersion(context.Background(), "", bucket, object, "", ReadOptions{ReadData: false, Healing: true})
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -836,7 +836,7 @@ func TestHealCorrectQuorum(t *testing.T) {
er := set.sets[0] er := set.sets[0]
erasureDisks := er.getDisks() erasureDisks := er.getDisks()
fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
nfi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) nfi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if errors.Is(err, errFileNotFound) { if errors.Is(err, errFileNotFound) {
continue continue
@ -858,12 +858,12 @@ func TestHealCorrectQuorum(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs = readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
if countErrs(errs, nil) != len(fileInfos) { if countErrs(errs, nil) != len(fileInfos) {
t.Fatal("Expected all xl.meta healed, but partial heal detected") t.Fatal("Expected all xl.meta healed, but partial heal detected")
} }
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, minioMetaBucket, cfgFile, "", false, true) fileInfos, errs = readAllFileInfo(ctx, erasureDisks, "", minioMetaBucket, cfgFile, "", false, true)
nfi, err = getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) nfi, err = getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if errors.Is(err, errFileNotFound) { if errors.Is(err, errFileNotFound) {
continue continue
@ -885,7 +885,7 @@ func TestHealCorrectQuorum(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, minioMetaBucket, cfgFile, "", false, true) fileInfos, errs = readAllFileInfo(ctx, erasureDisks, "", minioMetaBucket, cfgFile, "", false, true)
if countErrs(errs, nil) != len(fileInfos) { if countErrs(errs, nil) != len(fileInfos) {
t.Fatal("Expected all xl.meta healed, but partial heal detected") t.Fatal("Expected all xl.meta healed, but partial heal detected")
} }
@ -970,7 +970,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Fatalf("Failed to heal object - %v", err) t.Fatalf("Failed to heal object - %v", err)
} }
fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
fi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) fi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
@ -998,7 +998,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Errorf("Expected nil but received %v", err) t.Errorf("Expected nil but received %v", err)
} }
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs = readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
nfi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) nfi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
@ -1029,7 +1029,7 @@ func TestHealObjectCorruptedPools(t *testing.T) {
t.Errorf("Expected nil but received %v", err) t.Errorf("Expected nil but received %v", err)
} }
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs = readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
nfi, err = getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) nfi, err = getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
@ -1133,7 +1133,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
firstDisk := erasureDisks[0] firstDisk := erasureDisks[0]
// Test 1: Remove the object backend files from the first disk. // Test 1: Remove the object backend files from the first disk.
fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
fi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) fi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
@ -1156,7 +1156,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
t.Errorf("Expected xl.meta file to be present but stat failed - %v", err) t.Errorf("Expected xl.meta file to be present but stat failed - %v", err)
} }
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs = readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
nfi1, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) nfi1, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
@ -1179,7 +1179,7 @@ func TestHealObjectCorruptedXLMeta(t *testing.T) {
t.Errorf("Expected nil but received %v", err) t.Errorf("Expected nil but received %v", err)
} }
fileInfos, errs = readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs = readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
nfi2, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) nfi2, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)
@ -1277,7 +1277,7 @@ func TestHealObjectCorruptedParts(t *testing.T) {
firstDisk := erasureDisks[0] firstDisk := erasureDisks[0]
secondDisk := erasureDisks[1] secondDisk := erasureDisks[1]
fileInfos, errs := readAllFileInfo(ctx, erasureDisks, bucket, object, "", false, true) fileInfos, errs := readAllFileInfo(ctx, erasureDisks, "", bucket, object, "", false, true)
fi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs) fi, err := getLatestFileInfo(ctx, fileInfos, er.defaultParityCount, errs)
if err != nil { if err != nil {
t.Fatalf("Failed to getLatestFileInfo - %v", err) t.Fatalf("Failed to getLatestFileInfo - %v", err)

View File

@ -156,8 +156,8 @@ var readFileInfoIgnoredErrs = append(objectOpIgnoredErrs,
io.EOF, // some times we would read without locks, ignore these errors io.EOF, // some times we would read without locks, ignore these errors
) )
func readFileInfo(ctx context.Context, disk StorageAPI, bucket, object, versionID string, opts ReadOptions) (FileInfo, error) { func readFileInfo(ctx context.Context, disk StorageAPI, origbucket, bucket, object, versionID string, opts ReadOptions) (FileInfo, error) {
fi, err := disk.ReadVersion(ctx, bucket, object, versionID, opts) fi, err := disk.ReadVersion(ctx, origbucket, bucket, object, versionID, opts)
if err != nil && !IsErr(err, readFileInfoIgnoredErrs...) { if err != nil && !IsErr(err, readFileInfoIgnoredErrs...) {
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)", logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
@ -170,7 +170,7 @@ func readFileInfo(ctx context.Context, disk StorageAPI, bucket, object, versionI
// Reads all `xl.meta` metadata as a FileInfo slice. // Reads all `xl.meta` metadata as a FileInfo slice.
// Returns error slice indicating the failed metadata reads. // Returns error slice indicating the failed metadata reads.
func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, versionID string, readData, healing bool) ([]FileInfo, []error) { func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string, bucket, object, versionID string, readData, healing bool) ([]FileInfo, []error) {
metadataArray := make([]FileInfo, len(disks)) metadataArray := make([]FileInfo, len(disks))
opts := ReadOptions{ opts := ReadOptions{
@ -186,7 +186,7 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, bucket, object, ve
if disks[index] == nil { if disks[index] == nil {
return errDiskNotFound return errDiskNotFound
} }
metadataArray[index], err = readFileInfo(ctx, disks[index], bucket, object, versionID, opts) metadataArray[index], err = readFileInfo(ctx, disks[index], origbucket, bucket, object, versionID, opts)
return err return err
}, index) }, index)
} }

View File

@ -381,7 +381,7 @@ func pickValidFileInfo(ctx context.Context, metaArr []FileInfo, modTime time.Tim
} }
// writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently. // writeUniqueFileInfo - writes unique `xl.meta` content for each disk concurrently.
func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) { func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, origbucket, bucket, prefix string, files []FileInfo, quorum int) ([]StorageAPI, error) {
g := errgroup.WithNErrs(len(disks)) g := errgroup.WithNErrs(len(disks))
// Start writing `xl.meta` to all disks in parallel. // Start writing `xl.meta` to all disks in parallel.
@ -395,7 +395,7 @@ func writeUniqueFileInfo(ctx context.Context, disks []StorageAPI, bucket, prefix
fi := files[index] fi := files[index]
fi.Erasure.Index = index + 1 fi.Erasure.Index = index + 1
if fi.IsValid() { if fi.IsValid() {
return disks[index].WriteMetadata(ctx, bucket, prefix, fi) return disks[index].WriteMetadata(ctx, origbucket, bucket, prefix, fi)
} }
return errCorruptedFormat return errCorruptedFormat
}, index) }, index)

View File

@ -61,7 +61,7 @@ func (er erasureObjects) getMultipartSHADir(bucket, object string) string {
// checkUploadIDExists - verify if a given uploadID exists and is valid. // checkUploadIDExists - verify if a given uploadID exists and is valid.
func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string, write bool) (fi FileInfo, metArr []FileInfo, err error) { func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object, uploadID string, write bool) (fi FileInfo, metArr []FileInfo, err error) {
defer func() { defer func() {
if errors.Is(err, errFileNotFound) || errors.Is(err, errVolumeNotFound) { if errors.Is(err, errFileNotFound) {
err = errUploadIDNotFound err = errUploadIDNotFound
} }
}() }()
@ -71,7 +71,7 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object
storageDisks := er.getDisks() storageDisks := er.getDisks()
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
partsMetadata, errs := readAllFileInfo(ctx, storageDisks, minioMetaMultipartBucket, partsMetadata, errs := readAllFileInfo(ctx, storageDisks, bucket, minioMetaMultipartBucket,
uploadIDPath, "", false, false) uploadIDPath, "", false, false)
readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount) readQuorum, writeQuorum, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount)
@ -87,15 +87,14 @@ func (er erasureObjects) checkUploadIDExists(ctx context.Context, bucket, object
// List all online disks. // List all online disks.
_, modTime, etag := listOnlineDisks(storageDisks, partsMetadata, errs, quorum) _, modTime, etag := listOnlineDisks(storageDisks, partsMetadata, errs, quorum)
var reducedErr error
if write { if write {
reducedErr := reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum) reducedErr = reduceWriteQuorumErrs(ctx, errs, objectOpIgnoredErrs, writeQuorum)
if errors.Is(reducedErr, errErasureWriteQuorum) {
return fi, nil, reducedErr
}
} else { } else {
if reducedErr := reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum); reducedErr != nil { reducedErr = reduceReadQuorumErrs(ctx, errs, objectOpIgnoredErrs, readQuorum)
return fi, nil, reducedErr }
} if reducedErr != nil {
return fi, nil, reducedErr
} }
// Pick one from the first valid metadata. // Pick one from the first valid metadata.
@ -200,7 +199,7 @@ func (er erasureObjects) cleanupStaleUploadsOnDisk(ctx context.Context, disk Sto
readDirFn(pathJoin(diskPath, minioMetaMultipartBucket), func(shaDir string, typ os.FileMode) error { readDirFn(pathJoin(diskPath, minioMetaMultipartBucket), func(shaDir string, typ os.FileMode) error {
readDirFn(pathJoin(diskPath, minioMetaMultipartBucket, shaDir), func(uploadIDDir string, typ os.FileMode) error { readDirFn(pathJoin(diskPath, minioMetaMultipartBucket, shaDir), func(uploadIDDir string, typ os.FileMode) error {
uploadIDPath := pathJoin(shaDir, uploadIDDir) uploadIDPath := pathJoin(shaDir, uploadIDDir)
fi, err := disk.ReadVersion(ctx, minioMetaMultipartBucket, uploadIDPath, "", ReadOptions{}) fi, err := disk.ReadVersion(ctx, "", minioMetaMultipartBucket, uploadIDPath, "", ReadOptions{})
if err != nil { if err != nil {
return nil return nil
} }
@ -281,15 +280,14 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
if !disk.IsOnline() { if !disk.IsOnline() {
continue continue
} }
uploadIDs, err = disk.ListDir(ctx, minioMetaMultipartBucket, er.getMultipartSHADir(bucket, object), -1) uploadIDs, err = disk.ListDir(ctx, bucket, minioMetaMultipartBucket, er.getMultipartSHADir(bucket, object), -1)
if err != nil { if err != nil {
if errors.Is(err, errDiskNotFound) { if errors.Is(err, errDiskNotFound) {
continue continue
} }
if errors.Is(err, errFileNotFound) || errors.Is(err, errVolumeNotFound) { if errors.Is(err, errFileNotFound) {
return result, nil return result, nil
} }
logger.LogIf(ctx, err)
return result, toObjectErr(err, bucket, object) return result, toObjectErr(err, bucket, object)
} }
break break
@ -486,8 +484,8 @@ func (er erasureObjects) newMultipartUpload(ctx context.Context, bucket string,
uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID) uploadIDPath := er.getUploadIDDir(bucket, object, uploadUUID)
// Write updated `xl.meta` to all disks. // Write updated `xl.meta` to all disks.
if _, err := writeUniqueFileInfo(ctx, onlineDisks, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil { if _, err := writeUniqueFileInfo(ctx, onlineDisks, bucket, minioMetaMultipartBucket, uploadIDPath, partsMetadata, writeQuorum); err != nil {
return nil, toObjectErr(err, minioMetaMultipartBucket, uploadIDPath) return nil, toObjectErr(err, bucket, object)
} }
return &NewMultipartUploadResult{ return &NewMultipartUploadResult{
UploadID: uploadID, UploadID: uploadID,
@ -582,6 +580,13 @@ func writeAllDisks(ctx context.Context, disks []StorageAPI, dstBucket, dstEntry
func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) { func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, r *PutObjReader, opts ObjectOptions) (pi PartInfo, err error) {
auditObjectErasureSet(ctx, object, &er) auditObjectErasureSet(ctx, object, &er)
data := r.Reader
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(ctx, errInvalidArgument, logger.Application)
return pi, toObjectErr(errInvalidArgument)
}
// Read lock for upload id. // Read lock for upload id.
// Only held while reading the upload metadata. // Only held while reading the upload metadata.
uploadIDRLock := er.NewNSLock(bucket, pathJoin(object, uploadID)) uploadIDRLock := er.NewNSLock(bucket, pathJoin(object, uploadID))
@ -596,16 +601,12 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
// Validates if upload ID exists. // Validates if upload ID exists.
fi, _, err := er.checkUploadIDExists(rctx, bucket, object, uploadID, true) fi, _, err := er.checkUploadIDExists(rctx, bucket, object, uploadID, true)
if err != nil { if err != nil {
if errors.Is(err, errVolumeNotFound) {
return pi, toObjectErr(err, bucket)
}
return pi, toObjectErr(err, bucket, object, uploadID) return pi, toObjectErr(err, bucket, object, uploadID)
} }
data := r.Reader
// Validate input data size and it can never be less than zero.
if data.Size() < -1 {
logger.LogIf(rctx, errInvalidArgument, logger.Application)
return pi, toObjectErr(errInvalidArgument)
}
// Write lock for this part ID, only hold it if we are planning to read from the // Write lock for this part ID, only hold it if we are planning to read from the
// streamto avoid any concurrent updates. // streamto avoid any concurrent updates.
// //
@ -681,7 +682,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
if disk == nil { if disk == nil {
continue continue
} }
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, bucket, minioMetaTmpBucket, tmpPartPath, erasure.ShardFileSize(data.Size()), DefaultBitrotAlgorithm, erasure.ShardSize())
} }
toEncode := io.Reader(data) toEncode := io.Reader(data)
@ -794,6 +795,9 @@ func (er erasureObjects) GetMultipartInfo(ctx context.Context, bucket, object, u
fi, _, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, false) fi, _, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, false)
if err != nil { if err != nil {
if errors.Is(err, errVolumeNotFound) {
return result, toObjectErr(err, bucket)
}
return result, toObjectErr(err, bucket, object, uploadID) return result, toObjectErr(err, bucket, object, uploadID)
} }
@ -979,6 +983,9 @@ func (er erasureObjects) CompleteMultipartUpload(ctx context.Context, bucket str
fi, partsMetadata, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, true) fi, partsMetadata, err := er.checkUploadIDExists(ctx, bucket, object, uploadID, true)
if err != nil { if err != nil {
if errors.Is(err, errVolumeNotFound) {
return oi, toObjectErr(err, bucket)
}
return oi, toObjectErr(err, bucket, object, uploadID) return oi, toObjectErr(err, bucket, object, uploadID)
} }
@ -1337,6 +1344,9 @@ func (er erasureObjects) AbortMultipartUpload(ctx context.Context, bucket, objec
// Validates if upload ID exists. // Validates if upload ID exists.
if _, _, err = er.checkUploadIDExists(ctx, bucket, object, uploadID, false); err != nil { if _, _, err = er.checkUploadIDExists(ctx, bucket, object, uploadID, false); err != nil {
if errors.Is(err, errVolumeNotFound) {
return toObjectErr(err, bucket)
}
return toObjectErr(err, bucket, object, uploadID) return toObjectErr(err, bucket, object, uploadID)
} }

View File

@ -91,7 +91,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
if srcOpts.VersionID != "" { if srcOpts.VersionID != "" {
metaArr, errs = readAllFileInfo(ctx, storageDisks, srcBucket, srcObject, srcOpts.VersionID, true, false) metaArr, errs = readAllFileInfo(ctx, storageDisks, "", srcBucket, srcObject, srcOpts.VersionID, true, false)
} else { } else {
metaArr, errs = readAllXL(ctx, storageDisks, srcBucket, srcObject, true, false, true) metaArr, errs = readAllXL(ctx, storageDisks, srcBucket, srcObject, true, false, true)
} }
@ -179,7 +179,7 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
} }
// Write unique `xl.meta` for each disk. // Write unique `xl.meta` for each disk.
if _, err = writeUniqueFileInfo(ctx, onlineDisks, srcBucket, srcObject, metaArr, writeQuorum); err != nil { if _, err = writeUniqueFileInfo(ctx, onlineDisks, "", srcBucket, srcObject, metaArr, writeQuorum); err != nil {
return oi, toObjectErr(err, srcBucket, srcObject) return oi, toObjectErr(err, srcBucket, srcObject)
} }
@ -791,7 +791,7 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
if opts.VersionID != "" { if opts.VersionID != "" {
// Read a specific version ID // Read a specific version ID
fi, err = readFileInfo(ctx, disk, bucket, object, opts.VersionID, ropts) fi, err = readFileInfo(ctx, disk, "", bucket, object, opts.VersionID, ropts)
} else { } else {
// Read the latest version // Read the latest version
rfi, err = readRawFileInfo(ctx, disk, bucket, object, readData) rfi, err = readRawFileInfo(ctx, disk, bucket, object, readData)
@ -1219,7 +1219,7 @@ func (er erasureObjects) putMetacacheObject(ctx context.Context, key string, r *
} }
} }
if _, err = writeUniqueFileInfo(ctx, onlineDisks, minioMetaBucket, key, partsMetadata, writeQuorum); err != nil { if _, err = writeUniqueFileInfo(ctx, onlineDisks, "", minioMetaBucket, key, partsMetadata, writeQuorum); err != nil {
return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key) return ObjectInfo{}, toObjectErr(err, minioMetaBucket, key)
} }
@ -1449,7 +1449,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
continue continue
} }
writers[i] = newBitrotWriter(disk, minioMetaTmpBucket, tempErasureObj, shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize()) writers[i] = newBitrotWriter(disk, bucket, minioMetaTmpBucket, tempErasureObj, shardFileSize, DefaultBitrotAlgorithm, erasure.ShardSize())
} }
toEncode := io.Reader(data) toEncode := io.Reader(data)
@ -1469,7 +1469,7 @@ func (er erasureObjects) putObject(ctx context.Context, bucket string, object st
n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum) n, erasureErr := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers) closeBitrotWriters(writers)
if erasureErr != nil { if erasureErr != nil {
return ObjectInfo{}, toObjectErr(erasureErr, minioMetaTmpBucket, tempErasureObj) return ObjectInfo{}, toObjectErr(erasureErr, bucket, object)
} }
// Should return IncompleteBody{} error when reader has fewer bytes // Should return IncompleteBody{} error when reader has fewer bytes
@ -2057,7 +2057,7 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
if opts.VersionID != "" { if opts.VersionID != "" {
metaArr, errs = readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false, false) metaArr, errs = readAllFileInfo(ctx, disks, "", bucket, object, opts.VersionID, false, false)
} else { } else {
metaArr, errs = readAllXL(ctx, disks, bucket, object, false, false, true) metaArr, errs = readAllXL(ctx, disks, bucket, object, false, false, true)
} }
@ -2130,7 +2130,7 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
// Read metadata associated with the object from all disks. // Read metadata associated with the object from all disks.
if opts.VersionID != "" { if opts.VersionID != "" {
metaArr, errs = readAllFileInfo(ctx, disks, bucket, object, opts.VersionID, false, false) metaArr, errs = readAllFileInfo(ctx, disks, "", bucket, object, opts.VersionID, false, false)
} else { } else {
metaArr, errs = readAllXL(ctx, disks, bucket, object, false, false, true) metaArr, errs = readAllXL(ctx, disks, bucket, object, false, false, true)
} }
@ -2458,7 +2458,7 @@ func (er erasureObjects) DecomTieredObject(ctx context.Context, bucket, object s
var onlineDisks []StorageAPI var onlineDisks []StorageAPI
onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi) onlineDisks, partsMetadata = shuffleDisksAndPartsMetadata(storageDisks, partsMetadata, fi)
if _, err := writeUniqueFileInfo(ctx, onlineDisks, bucket, object, partsMetadata, writeQuorum); err != nil { if _, err := writeUniqueFileInfo(ctx, onlineDisks, "", bucket, object, partsMetadata, writeQuorum); err != nil {
return toObjectErr(err, bucket, object) return toObjectErr(err, bucket, object)
} }

View File

@ -521,7 +521,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
} }
for _, disk := range xl.getDisks() { for _, disk := range xl.getDisks() {
files, _ := disk.ListDir(ctx, bucket, object, -1) files, _ := disk.ListDir(ctx, "", bucket, object, -1)
for _, file := range files { for _, file := range files {
if file != "xl.meta" { if file != "xl.meta" {
disk.Delete(ctx, bucket, pathJoin(object, file), DeleteOptions{ disk.Delete(ctx, bucket, pathJoin(object, file), DeleteOptions{
@ -625,7 +625,7 @@ func TestHeadObjectNoQuorum(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
for _, disk := range xl.getDisks() { for _, disk := range xl.getDisks() {
files, _ := disk.ListDir(ctx, bucket, object, -1) files, _ := disk.ListDir(ctx, "", bucket, object, -1)
for _, file := range files { for _, file := range files {
if file != "xl.meta" { if file != "xl.meta" {
disk.Delete(ctx, bucket, pathJoin(object, file), DeleteOptions{ disk.Delete(ctx, bucket, pathJoin(object, file), DeleteOptions{
@ -908,7 +908,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts1, errs1 := readAllFileInfo(ctx, erasureDisks, bucket, object1, "", false, false) parts1, errs1 := readAllFileInfo(ctx, erasureDisks, "", bucket, object1, "", false, false)
parts1SC := globalStorageClass parts1SC := globalStorageClass
// Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class // Object for test case 2 - No StorageClass defined, MetaData in PutObject requesting RRS Class
@ -920,7 +920,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts2, errs2 := readAllFileInfo(ctx, erasureDisks, bucket, object2, "", false, false) parts2, errs2 := readAllFileInfo(ctx, erasureDisks, "", bucket, object2, "", false, false)
parts2SC := globalStorageClass parts2SC := globalStorageClass
// Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class // Object for test case 3 - No StorageClass defined, MetaData in PutObject requesting Standard Storage Class
@ -932,7 +932,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts3, errs3 := readAllFileInfo(ctx, erasureDisks, bucket, object3, "", false, false) parts3, errs3 := readAllFileInfo(ctx, erasureDisks, "", bucket, object3, "", false, false)
parts3SC := globalStorageClass parts3SC := globalStorageClass
// Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class // Object for test case 4 - Standard StorageClass defined as Parity 6, MetaData in PutObject requesting Standard Storage Class
@ -950,7 +950,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts4, errs4 := readAllFileInfo(ctx, erasureDisks, bucket, object4, "", false, false) parts4, errs4 := readAllFileInfo(ctx, erasureDisks, "", bucket, object4, "", false, false)
parts4SC := storageclass.Config{ parts4SC := storageclass.Config{
Standard: storageclass.StorageClass{ Standard: storageclass.StorageClass{
Parity: 6, Parity: 6,
@ -973,7 +973,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts5, errs5 := readAllFileInfo(ctx, erasureDisks, bucket, object5, "", false, false) parts5, errs5 := readAllFileInfo(ctx, erasureDisks, "", bucket, object5, "", false, false)
parts5SC := globalStorageClass parts5SC := globalStorageClass
// Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class // Object for test case 6 - RRS StorageClass defined as Parity 2, MetaData in PutObject requesting Standard Storage Class
@ -994,7 +994,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts6, errs6 := readAllFileInfo(ctx, erasureDisks, bucket, object6, "", false, false) parts6, errs6 := readAllFileInfo(ctx, erasureDisks, "", bucket, object6, "", false, false)
parts6SC := storageclass.Config{ parts6SC := storageclass.Config{
RRS: storageclass.StorageClass{ RRS: storageclass.StorageClass{
Parity: 2, Parity: 2,
@ -1017,7 +1017,7 @@ func testObjectQuorumFromMeta(obj ObjectLayer, instanceType string, dirs []strin
t.Fatalf("Failed to putObject %v", err) t.Fatalf("Failed to putObject %v", err)
} }
parts7, errs7 := readAllFileInfo(ctx, erasureDisks, bucket, object7, "", false, false) parts7, errs7 := readAllFileInfo(ctx, erasureDisks, "", bucket, object7, "", false, false)
parts7SC := storageclass.Config{ parts7SC := storageclass.Config{
Standard: storageclass.StorageClass{ Standard: storageclass.StorageClass{
Parity: 5, Parity: 5,

View File

@ -1312,7 +1312,10 @@ func (z *erasureServerPools) ListObjectVersions(ctx context.Context, bucket, pre
merged, err := z.listPath(ctx, &opts) merged, err := z.listPath(ctx, &opts)
if err != nil && err != io.EOF { if err != nil && err != io.EOF {
return loi, err if !isErrBucketNotFound(err) {
logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket)
}
return loi, toObjectErr(err, bucket)
} }
defer merged.truncate(0) // Release when returning defer merged.truncate(0) // Release when returning
@ -1461,6 +1464,12 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
} }
return loi, nil return loi, nil
} }
if isErrBucketNotFound(err) {
return loi, err
}
if contextCanceled(ctx) {
return ListObjectsInfo{}, ctx.Err()
}
} }
if len(prefix) > 0 && maxKeys == 1 && marker == "" { if len(prefix) > 0 && maxKeys == 1 && marker == "" {
@ -1486,7 +1495,9 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
loi.Objects = append(loi.Objects, objInfo) loi.Objects = append(loi.Objects, objInfo)
return loi, nil return loi, nil
} }
if isErrBucketNotFound(err) {
return ListObjectsInfo{}, err
}
if contextCanceled(ctx) { if contextCanceled(ctx) {
return ListObjectsInfo{}, ctx.Err() return ListObjectsInfo{}, ctx.Err()
} }
@ -1497,7 +1508,7 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
if !isErrBucketNotFound(err) { if !isErrBucketNotFound(err) {
logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket) logger.LogOnceIf(ctx, err, "erasure-list-objects-path-"+bucket)
} }
return loi, err return loi, toObjectErr(err, bucket)
} }
merged.forwardPast(opts.Marker) merged.forwardPast(opts.Marker)
@ -1542,7 +1553,7 @@ func (z *erasureServerPools) ListObjects(ctx context.Context, bucket, prefix, ma
} }
func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) { func (z *erasureServerPools) ListMultipartUploads(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, maxUploads int) (ListMultipartsInfo, error) {
if err := checkListMultipartArgs(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter, z); err != nil { if err := checkListMultipartArgs(ctx, bucket, prefix, keyMarker, uploadIDMarker, delimiter); err != nil {
return ListMultipartsInfo{}, err return ListMultipartsInfo{}, err
} }
@ -1961,7 +1972,7 @@ func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts
// error walker returns error. Optionally if context.Done() is received // error walker returns error. Optionally if context.Done() is received
// then Walk() stops the walker. // then Walk() stops the walker.
func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts WalkOptions) error { func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts WalkOptions) error {
if err := checkListObjsArgs(ctx, bucket, prefix, "", z); err != nil { if err := checkListObjsArgs(ctx, bucket, prefix, ""); err != nil {
// Upon error close the channel. // Upon error close the channel.
xioutil.SafeClose(results) xioutil.SafeClose(results)
return err return err

View File

@ -733,7 +733,7 @@ func listDeletedBuckets(ctx context.Context, storageDisks []StorageAPI, delBucke
// we ignore disk not found errors // we ignore disk not found errors
return nil return nil
} }
volsInfo, err := storageDisks[index].ListDir(ctx, minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix), -1) volsInfo, err := storageDisks[index].ListDir(ctx, "", minioMetaBucket, pathJoin(bucketMetaPrefix, deletedBucketsPrefix), -1)
if err != nil { if err != nil {
if errors.Is(err, errFileNotFound) { if errors.Is(err, errFileNotFound) {
return nil return nil

View File

@ -58,9 +58,10 @@ func renameAllBucketMetacache(epPath string) error {
// Other important fields are Limit, Marker. // Other important fields are Limit, Marker.
// List ID always derived from the Marker. // List ID always derived from the Marker.
func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) { func (z *erasureServerPools) listPath(ctx context.Context, o *listPathOptions) (entries metaCacheEntriesSorted, err error) {
if err := checkListObjsArgs(ctx, o.Bucket, o.Prefix, o.Marker, z); err != nil { if err := checkListObjsArgs(ctx, o.Bucket, o.Prefix, o.Marker); err != nil {
return entries, err return entries, err
} }
// Marker points to before the prefix, just ignore it. // Marker points to before the prefix, just ignore it.
if o.Marker < o.Prefix { if o.Marker < o.Prefix {
o.Marker = "" o.Marker = ""
@ -313,6 +314,9 @@ func (z *erasureServerPools) listMerged(ctx context.Context, o listPathOptions,
} }
if isAllNotFound(errs) { if isAllNotFound(errs) {
if isAllVolumeNotFound(errs) {
return errVolumeNotFound
}
return nil return nil
} }

View File

@ -427,7 +427,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
if !disk.IsOnline() { if !disk.IsOnline() {
continue continue
} }
_, err := disk.ReadVersion(ctx, minioMetaBucket, _, err := disk.ReadVersion(ctx, "", minioMetaBucket,
o.objectPath(0), "", ReadOptions{}) o.objectPath(0), "", ReadOptions{})
if err != nil { if err != nil {
time.Sleep(retryDelay250) time.Sleep(retryDelay250)
@ -504,7 +504,7 @@ func (er *erasureObjects) streamMetadataParts(ctx context.Context, o listPathOpt
if !disk.IsOnline() { if !disk.IsOnline() {
continue continue
} }
_, err := disk.ReadVersion(ctx, minioMetaBucket, _, err := disk.ReadVersion(ctx, "", minioMetaBucket,
o.objectPath(partN), "", ReadOptions{}) o.objectPath(partN), "", ReadOptions{})
if err != nil { if err != nil {
time.Sleep(retryDelay250) time.Sleep(retryDelay250)
@ -1057,7 +1057,7 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
for { for {
// Get the top entry from each // Get the top entry from each
var current metaCacheEntry var current metaCacheEntry
var atEOF, fnf, hasErr, agree int var atEOF, fnf, vnf, hasErr, agree int
for i := range topEntries { for i := range topEntries {
topEntries[i] = metaCacheEntry{} topEntries[i] = metaCacheEntry{}
} }
@ -1083,6 +1083,11 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
errDiskNotFound.Error(): errDiskNotFound.Error():
atEOF++ atEOF++
fnf++ fnf++
// This is a special case, to handle bucket does
// not exist situations.
if errors.Is(err, errVolumeNotFound) {
vnf++
}
continue continue
} }
hasErr++ hasErr++
@ -1140,6 +1145,10 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
return errors.New(strings.Join(combinedErr, ", ")) return errors.New(strings.Join(combinedErr, ", "))
} }
if vnf == len(readers) {
return errVolumeNotFound
}
// Break if all at EOF or error. // Break if all at EOF or error.
if atEOF+hasErr == len(readers) { if atEOF+hasErr == len(readers) {
if hasErr > 0 && opts.finished != nil { if hasErr > 0 && opts.finished != nil {
@ -1147,9 +1156,11 @@ func listPathRaw(ctx context.Context, opts listPathRawOptions) (err error) {
} }
break break
} }
if fnf == len(readers) { if fnf == len(readers) {
return errFileNotFound return errFileNotFound
} }
if agree == len(readers) { if agree == len(readers) {
// Everybody agreed // Everybody agreed
for _, r := range readers { for _, r := range readers {

View File

@ -73,12 +73,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
if !skipAccessChecks(opts.Bucket) { if !skipAccessChecks(opts.Bucket) {
// Stat a volume entry. // Stat a volume entry.
if err = Access(volumeDir); err != nil { if err = Access(volumeDir); err != nil {
if osIsNotExist(err) { return convertAccessError(err, errVolumeAccessDenied)
return errVolumeNotFound
} else if isSysErrIO(err) {
return errFaultyDisk
}
return err
} }
} }
@ -169,7 +164,7 @@ func (s *xlStorage) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Writ
if s.walkMu != nil { if s.walkMu != nil {
s.walkMu.Lock() s.walkMu.Lock()
} }
entries, err := s.ListDir(ctx, opts.Bucket, current, -1) entries, err := s.ListDir(ctx, "", opts.Bucket, current, -1)
if s.walkMu != nil { if s.walkMu != nil {
s.walkMu.Unlock() s.walkMu.Unlock()
} }

View File

@ -172,11 +172,11 @@ func (d *naughtyDisk) WalkDir(ctx context.Context, opts WalkDirOptions, wr io.Wr
return d.disk.WalkDir(ctx, opts, wr) return d.disk.WalkDir(ctx, opts, wr)
} }
func (d *naughtyDisk) ListDir(ctx context.Context, volume, dirPath string, count int) (entries []string, err error) { func (d *naughtyDisk) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (entries []string, err error) {
if err := d.calcError(); err != nil { if err := d.calcError(); err != nil {
return []string{}, err return []string{}, err
} }
return d.disk.ListDir(ctx, volume, dirPath, count) return d.disk.ListDir(ctx, origvolume, volume, dirPath, count)
} }
func (d *naughtyDisk) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) { func (d *naughtyDisk) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) {
@ -193,11 +193,11 @@ func (d *naughtyDisk) ReadFileStream(ctx context.Context, volume, path string, o
return d.disk.ReadFileStream(ctx, volume, path, offset, length) return d.disk.ReadFileStream(ctx, volume, path, offset, length)
} }
func (d *naughtyDisk) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error { func (d *naughtyDisk) CreateFile(ctx context.Context, origvolume, volume, path string, size int64, reader io.Reader) error {
if err := d.calcError(); err != nil { if err := d.calcError(); err != nil {
return err return err
} }
return d.disk.CreateFile(ctx, volume, path, size, reader) return d.disk.CreateFile(ctx, origvolume, volume, path, size, reader)
} }
func (d *naughtyDisk) AppendFile(ctx context.Context, volume string, path string, buf []byte) error { func (d *naughtyDisk) AppendFile(ctx context.Context, volume string, path string, buf []byte) error {
@ -246,11 +246,11 @@ func (d *naughtyDisk) DeleteVersions(ctx context.Context, volume string, version
return d.disk.DeleteVersions(ctx, volume, versions, opts) return d.disk.DeleteVersions(ctx, volume, versions, opts)
} }
func (d *naughtyDisk) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) { func (d *naughtyDisk) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) (err error) {
if err := d.calcError(); err != nil { if err := d.calcError(); err != nil {
return err return err
} }
return d.disk.WriteMetadata(ctx, volume, path, fi) return d.disk.WriteMetadata(ctx, origvolume, volume, path, fi)
} }
func (d *naughtyDisk) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) (err error) { func (d *naughtyDisk) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) (err error) {
@ -267,11 +267,11 @@ func (d *naughtyDisk) DeleteVersion(ctx context.Context, volume, path string, fi
return d.disk.DeleteVersion(ctx, volume, path, fi, forceDelMarker, opts) return d.disk.DeleteVersion(ctx, volume, path, fi, forceDelMarker, opts)
} }
func (d *naughtyDisk) ReadVersion(ctx context.Context, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) { func (d *naughtyDisk) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
if err := d.calcError(); err != nil { if err := d.calcError(); err != nil {
return FileInfo{}, err return FileInfo{}, err
} }
return d.disk.ReadVersion(ctx, volume, path, versionID, opts) return d.disk.ReadVersion(ctx, origvolume, volume, path, versionID, opts)
} }
func (d *naughtyDisk) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) { func (d *naughtyDisk) WriteAll(ctx context.Context, volume string, path string, b []byte) (err error) {

View File

@ -40,7 +40,7 @@ func checkDelObjArgs(ctx context.Context, bucket, object string) error {
// Checks bucket and object name validity, returns nil if both are valid. // Checks bucket and object name validity, returns nil if both are valid.
func checkBucketAndObjectNames(ctx context.Context, bucket, object string) error { func checkBucketAndObjectNames(ctx context.Context, bucket, object string) error {
// Verify if bucket is valid. // Verify if bucket is valid.
if !isMinioMetaBucketName(bucket) && s3utils.CheckValidBucketName(bucket) != nil { if !isMinioMetaBucketName(bucket) && s3utils.CheckValidBucketNameStrict(bucket) != nil {
return BucketNameInvalid{Bucket: bucket} return BucketNameInvalid{Bucket: bucket}
} }
// Verify if object is valid. // Verify if object is valid.
@ -58,15 +58,12 @@ func checkBucketAndObjectNames(ctx context.Context, bucket, object string) error
} }
// Checks for all ListObjects arguments validity. // Checks for all ListObjects arguments validity.
func checkListObjsArgs(ctx context.Context, bucket, prefix, marker string, obj getBucketInfoI) error { func checkListObjsArgs(ctx context.Context, bucket, prefix, marker string) error {
// Verify if bucket exists before validating object name. // Verify if bucket is valid.
// This is done on purpose since the order of errors is if !isMinioMetaBucketName(bucket) && s3utils.CheckValidBucketNameStrict(bucket) != nil {
// important here bucket does not exist error should return BucketNameInvalid{Bucket: bucket}
// happen before we return an error for invalid object name.
// FIXME: should be moved to handler layer.
if err := checkBucketExist(ctx, bucket, obj); err != nil {
return err
} }
// Validates object prefix validity after bucket exists. // Validates object prefix validity after bucket exists.
if !IsValidObjectPrefix(prefix) { if !IsValidObjectPrefix(prefix) {
logger.LogIf(ctx, ObjectNameInvalid{ logger.LogIf(ctx, ObjectNameInvalid{
@ -82,8 +79,8 @@ func checkListObjsArgs(ctx context.Context, bucket, prefix, marker string, obj g
} }
// Checks for all ListMultipartUploads arguments validity. // Checks for all ListMultipartUploads arguments validity.
func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string, obj ObjectLayer) error { func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uploadIDMarker, delimiter string) error {
if err := checkListObjsArgs(ctx, bucket, prefix, keyMarker, obj); err != nil { if err := checkListObjsArgs(ctx, bucket, prefix, keyMarker); err != nil {
return err return err
} }
if uploadIDMarker != "" { if uploadIDMarker != "" {
@ -143,14 +140,11 @@ func checkAbortMultipartArgs(ctx context.Context, bucket, object, uploadID strin
return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj) return checkMultipartObjectArgs(ctx, bucket, object, uploadID, obj)
} }
// Checks Object arguments validity, also validates if bucket exists. // Checks Object arguments validity.
func checkObjectArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error { func checkObjectArgs(ctx context.Context, bucket, object string, obj ObjectLayer) error {
// Verify if bucket exists before validating object name. // Verify if bucket is valid.
// This is done on purpose since the order of errors is if !isMinioMetaBucketName(bucket) && s3utils.CheckValidBucketNameStrict(bucket) != nil {
// important here bucket does not exist error should return BucketNameInvalid{Bucket: bucket}
// happen before we return an error for invalid object name.
if err := checkBucketExist(ctx, bucket, obj); err != nil {
return err
} }
if err := checkObjectNameForLengthAndSlash(bucket, object); err != nil { if err := checkObjectNameForLengthAndSlash(bucket, object); err != nil {
@ -168,8 +162,13 @@ func checkObjectArgs(ctx context.Context, bucket, object string, obj ObjectLayer
return nil return nil
} }
// Checks for PutObject arguments validity, also validates if bucket exists. // Checks for PutObject arguments validity.
func checkPutObjectArgs(ctx context.Context, bucket, object string) error { func checkPutObjectArgs(ctx context.Context, bucket, object string) error {
// Verify if bucket is valid.
if !isMinioMetaBucketName(bucket) && s3utils.CheckValidBucketNameStrict(bucket) != nil {
return BucketNameInvalid{Bucket: bucket}
}
if err := checkObjectNameForLengthAndSlash(bucket, object); err != nil { if err := checkObjectNameForLengthAndSlash(bucket, object); err != nil {
return err return err
} }
@ -182,16 +181,3 @@ func checkPutObjectArgs(ctx context.Context, bucket, object string) error {
} }
return nil return nil
} }
type getBucketInfoI interface {
GetBucketInfo(ctx context.Context, bucket string, opts BucketOptions) (bucketInfo BucketInfo, err error)
}
// Checks whether bucket exists and returns appropriate error if not.
func checkBucketExist(ctx context.Context, bucket string, obj getBucketInfoI) error {
_, err := obj.GetBucketInfo(ctx, bucket, BucketOptions{})
if err != nil {
return err
}
return nil
}

View File

@ -155,16 +155,21 @@ func testListObjectsVersionedFolders(obj ObjectLayer, instanceType string, t1 Te
for i, testCase := range testCases { for i, testCase := range testCases {
testCase := testCase testCase := testCase
t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) { t.Run(fmt.Sprintf("%s-Test%d", instanceType, i+1), func(t *testing.T) {
t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:",
testCase.prefix, "marker:", testCase.marker, "delimiter:",
testCase.delimiter, "maxkeys:", testCase.maxKeys)
var err error var err error
var resultL ListObjectsInfo var resultL ListObjectsInfo
var resultV ListObjectVersionsInfo var resultV ListObjectVersionsInfo
if testCase.versioned { if testCase.versioned {
t.Log("ListObjectVersions, bucket:", testCase.bucketName, "prefix:",
testCase.prefix, "marker:", testCase.marker, "delimiter:",
testCase.delimiter, "maxkeys:", testCase.maxKeys)
resultV, err = obj.ListObjectVersions(context.Background(), testCase.bucketName, resultV, err = obj.ListObjectVersions(context.Background(), testCase.bucketName,
testCase.prefix, testCase.marker, "", testCase.delimiter, testCase.maxKeys) testCase.prefix, testCase.marker, "", testCase.delimiter, testCase.maxKeys)
} else { } else {
t.Log("ListObjects, bucket:", testCase.bucketName, "prefix:",
testCase.prefix, "marker:", testCase.marker, "delimiter:",
testCase.delimiter, "maxkeys:", testCase.maxKeys)
resultL, err = obj.ListObjects(context.Background(), testCase.bucketName, resultL, err = obj.ListObjects(context.Background(), testCase.bucketName,
testCase.prefix, testCase.marker, testCase.delimiter, testCase.maxKeys) testCase.prefix, testCase.marker, testCase.delimiter, testCase.maxKeys)
} }
@ -819,16 +824,14 @@ func _testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler, v
shouldPass bool shouldPass bool
}{ }{
// Test cases with invalid bucket names ( Test number 1-4 ). // Test cases with invalid bucket names ( Test number 1-4 ).
{".test", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: ".test"}, false}, {".test", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
{"Test", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "Test"}, false}, {"Test", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "---"}, false}, {"---", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "ad"}, false}, {"ad", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
// Using an existing file for bucket name, but its not a directory (5).
{"simple-file.txt", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
// Valid bucket names, but they do not exist (6-8). // Valid bucket names, but they do not exist (6-8).
{"volatile-bucket-1", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false}, {"volatile-bucket-1", "", "", "", 1000, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
{"volatile-bucket-2", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false}, {"volatile-bucket-2", "", "", "", 1000, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
{"volatile-bucket-3", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false}, {"volatile-bucket-3", "", "", "", 1000, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
// If marker is *after* the last possible object from the prefix it should return an empty list. // If marker is *after* the last possible object from the prefix it should return an empty list.
{"test-bucket-list-object", "Asia", "europe-object", "", 0, ListObjectsInfo{}, nil, true}, {"test-bucket-list-object", "Asia", "europe-object", "", 0, ListObjectsInfo{}, nil, true},
// If the marker is *before* the first possible object from the prefix it should return the first object. // If the marker is *before* the first possible object from the prefix it should return the first object.
@ -1564,16 +1567,14 @@ func testListObjectVersions(obj ObjectLayer, instanceType string, t1 TestErrHand
shouldPass bool shouldPass bool
}{ }{
// Test cases with invalid bucket names ( Test number 1-4). // Test cases with invalid bucket names ( Test number 1-4).
{".test", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: ".test"}, false}, {".test", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
{"Test", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "Test"}, false}, {"Test", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "---"}, false}, {"---", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "ad"}, false}, {"ad", "", "", "", 0, ListObjectsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
// Using an existing file for bucket name, but its not a directory (5).
{"simple-file.txt", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "simple-file.txt"}, false},
// Valid bucket names, but they do not exist (6-8). // Valid bucket names, but they do not exist (6-8).
{"volatile-bucket-1", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false}, {"volatile-bucket-1", "", "", "", 1000, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
{"volatile-bucket-2", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false}, {"volatile-bucket-2", "", "", "", 1000, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
{"volatile-bucket-3", "", "", "", 0, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false}, {"volatile-bucket-3", "", "", "", 1000, ListObjectsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
// If marker is *after* the last possible object from the prefix it should return an empty list. // If marker is *after* the last possible object from the prefix it should return an empty list.
{"test-bucket-list-object", "Asia", "europe-object", "", 0, ListObjectsInfo{}, nil, true}, {"test-bucket-list-object", "Asia", "europe-object", "", 0, ListObjectsInfo{}, nil, true},
// Setting a non-existing directory to be prefix (10-11). // Setting a non-existing directory to be prefix (10-11).

View File

@ -113,7 +113,7 @@ func testObjectAbortMultipartUpload(obj ObjectLayer, instanceType string, t Test
uploadID string uploadID string
expectedErrType error expectedErrType error
}{ }{
{"--", object, uploadID, BucketNotFound{}}, {"--", object, uploadID, BucketNameInvalid{}},
{"foo", object, uploadID, BucketNotFound{}}, {"foo", object, uploadID, BucketNotFound{}},
{bucket, object, "foo-foo", InvalidUploadID{}}, {bucket, object, "foo-foo", InvalidUploadID{}},
{bucket, object, uploadID, nil}, {bucket, object, uploadID, nil},
@ -194,6 +194,19 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
// Failed to create NewMultipartUpload, abort. // Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
err = obj.MakeBucket(context.Background(), "abc", MakeBucketOptions{})
if err != nil {
// Failed to create newbucket, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
}
resN, err := obj.NewMultipartUpload(context.Background(), "abc", "def", opts)
if err != nil {
// Failed to create NewMultipartUpload, abort.
t.Fatalf("%s : %s", instanceType, err.Error())
}
uploadID := res.UploadID uploadID := res.UploadID
// Creating a dummy bucket for tests. // Creating a dummy bucket for tests.
err = obj.MakeBucket(context.Background(), "unused-bucket", MakeBucketOptions{}) err = obj.MakeBucket(context.Background(), "unused-bucket", MakeBucketOptions{})
@ -202,6 +215,8 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
t.Fatalf("%s : %s", instanceType, err.Error()) t.Fatalf("%s : %s", instanceType, err.Error())
} }
obj.DeleteBucket(context.Background(), "abc", DeleteBucketOptions{})
// Collection of non-exhaustive PutObjectPart test cases, valid errors // Collection of non-exhaustive PutObjectPart test cases, valid errors
// and success responses. // and success responses.
testCases := []struct { testCases := []struct {
@ -221,19 +236,19 @@ func testObjectAPIPutObjectPart(obj ObjectLayer, instanceType string, t TestErrH
}{ }{
// Test case 1-4. // Test case 1-4.
// Cases with invalid bucket name. // Cases with invalid bucket name.
{bucketName: ".test", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: .test")}, {bucketName: ".test", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket name invalid: .test")},
{bucketName: "------", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: ------")}, {bucketName: "------", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket name invalid: ------")},
{ {
bucketName: "$this-is-not-valid-too", objName: "obj", PartID: 1, bucketName: "$this-is-not-valid-too", objName: "obj", PartID: 1,
expectedError: fmt.Errorf("%s", "Bucket not found: $this-is-not-valid-too"), expectedError: fmt.Errorf("%s", "Bucket name invalid: $this-is-not-valid-too"),
}, },
{bucketName: "a", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: a")}, {bucketName: "a", objName: "obj", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket name invalid: a")},
// Test case - 5. // Test case - 5.
// Case with invalid object names. // Case with invalid object names.
{bucketName: bucket, PartID: 1, expectedError: fmt.Errorf("%s", "Object name invalid: minio-bucket/")}, {bucketName: bucket, PartID: 1, expectedError: fmt.Errorf("%s", "Object name invalid: minio-bucket/")},
// Test case - 6. // Test case - 6.
// Valid object and bucket names but non-existent bucket. // Valid object and bucket names but non-existent bucket.
{bucketName: "abc", objName: "def", PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: abc")}, {bucketName: "abc", objName: "def", uploadID: resN.UploadID, PartID: 1, expectedError: fmt.Errorf("%s", "Bucket not found: abc")},
// Test Case - 7. // Test Case - 7.
// Existing bucket, but using a bucket on which NewMultipartUpload is not Initiated. // Existing bucket, but using a bucket on which NewMultipartUpload is not Initiated.
{bucketName: "unused-bucket", objName: "def", uploadID: "xyz", PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id xyz")}, {bucketName: "unused-bucket", objName: "def", uploadID: "xyz", PartID: 1, expectedError: fmt.Errorf("%s", "Invalid upload id xyz")},
@ -1041,10 +1056,10 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
shouldPass bool shouldPass bool
}{ }{
// Test cases with invalid bucket names ( Test number 1-4 ). // Test cases with invalid bucket names ( Test number 1-4 ).
{".test", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: ".test"}, false}, {".test", "", "", "", "", 0, ListMultipartsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
{"Test", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "Test"}, false}, {"Test", "", "", "", "", 0, ListMultipartsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "---"}, false}, {"---", "", "", "", "", 0, ListMultipartsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "ad"}, false}, {"ad", "", "", "", "", 0, ListMultipartsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
// Valid bucket names, but they do not exist (Test number 5-7). // Valid bucket names, but they do not exist (Test number 5-7).
{"volatile-bucket-1", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false}, {"volatile-bucket-1", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
{"volatile-bucket-2", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false}, {"volatile-bucket-2", "", "", "", "", 0, ListMultipartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
@ -1357,15 +1372,15 @@ func testListObjectPartsDiskNotFound(obj ObjectLayer, instanceType string, disks
shouldPass bool shouldPass bool
}{ }{
// Test cases with invalid bucket names (Test number 1-4). // Test cases with invalid bucket names (Test number 1-4).
{".test", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: ".test"}, false}, {".test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
{"Test", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "Test"}, false}, {"Test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "---"}, false}, {"---", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "ad"}, false}, {"ad", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
// Test cases for listing uploadID with single part. // Test cases for listing uploadID with single part.
// Valid bucket names, but they do not exist (Test number 5-7). // Valid bucket names, but they do not exist (Test number 5-7).
{"volatile-bucket-1", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false}, {"volatile-bucket-1", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
{"volatile-bucket-2", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false}, {"volatile-bucket-2", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
{"volatile-bucket-3", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false}, {"volatile-bucket-3", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
// Test case for Asserting for invalid objectName (Test number 8). // Test case for Asserting for invalid objectName (Test number 8).
{bucketNames[0], "", "", 0, 0, ListPartsInfo{}, ObjectNameInvalid{Bucket: bucketNames[0]}, false}, {bucketNames[0], "", "", 0, 0, ListPartsInfo{}, ObjectNameInvalid{Bucket: bucketNames[0]}, false},
// Asserting for Invalid UploadID (Test number 9). // Asserting for Invalid UploadID (Test number 9).
@ -1594,15 +1609,15 @@ func testListObjectParts(obj ObjectLayer, instanceType string, t TestErrHandler)
shouldPass bool shouldPass bool
}{ }{
// Test cases with invalid bucket names (Test number 1-4). // Test cases with invalid bucket names (Test number 1-4).
{".test", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: ".test"}, false}, {".test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: ".test"}, false},
{"Test", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "Test"}, false}, {"Test", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "---"}, false}, {"---", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "ad"}, false}, {"ad", "", "", 0, 0, ListPartsInfo{}, BucketNameInvalid{Bucket: "ad"}, false},
// Test cases for listing uploadID with single part. // Test cases for listing uploadID with single part.
// Valid bucket names, but they do not exist (Test number 5-7). // Valid bucket names, but they do not exist (Test number 5-7).
{"volatile-bucket-1", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false}, {"volatile-bucket-1", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-1"}, false},
{"volatile-bucket-2", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false}, {"volatile-bucket-2", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-2"}, false},
{"volatile-bucket-3", "", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false}, {"volatile-bucket-3", "test1", "", 0, 0, ListPartsInfo{}, BucketNotFound{Bucket: "volatile-bucket-3"}, false},
// Test case for Asserting for invalid objectName (Test number 8). // Test case for Asserting for invalid objectName (Test number 8).
{bucketNames[0], "", "", 0, 0, ListPartsInfo{}, ObjectNameInvalid{Bucket: bucketNames[0]}, false}, {bucketNames[0], "", "", 0, 0, ListPartsInfo{}, ObjectNameInvalid{Bucket: bucketNames[0]}, false},
// Asserting for Invalid UploadID (Test number 9). // Asserting for Invalid UploadID (Test number 9).
@ -1809,15 +1824,15 @@ func testObjectCompleteMultipartUpload(obj ObjectLayer, instanceType string, t T
shouldPass bool shouldPass bool
}{ }{
// Test cases with invalid bucket names (Test number 1-4). // Test cases with invalid bucket names (Test number 1-4).
{".test", "", "", []CompletePart{}, "", BucketNotFound{Bucket: ".test"}, false}, {".test", "", "", []CompletePart{}, "", BucketNameInvalid{Bucket: ".test"}, false},
{"Test", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "Test"}, false}, {"Test", "", "", []CompletePart{}, "", BucketNameInvalid{Bucket: "Test"}, false},
{"---", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "---"}, false}, {"---", "", "", []CompletePart{}, "", BucketNameInvalid{Bucket: "---"}, false},
{"ad", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "ad"}, false}, {"ad", "", "", []CompletePart{}, "", BucketNameInvalid{Bucket: "ad"}, false},
// Test cases for listing uploadID with single part. // Test cases for listing uploadID with single part.
// Valid bucket names, but they do not exist (Test number 5-7). // Valid bucket names, but they do not exist (Test number 5-7).
{"volatile-bucket-1", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-1"}, false}, {"volatile-bucket-1", "test1", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-1"}, false},
{"volatile-bucket-2", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-2"}, false}, {"volatile-bucket-2", "test1", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-2"}, false},
{"volatile-bucket-3", "", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-3"}, false}, {"volatile-bucket-3", "test1", "", []CompletePart{}, "", BucketNotFound{Bucket: "volatile-bucket-3"}, false},
// Test case for Asserting for invalid objectName (Test number 8). // Test case for Asserting for invalid objectName (Test number 8).
{bucketNames[0], "", "", []CompletePart{}, "", ObjectNameInvalid{Bucket: bucketNames[0]}, false}, {bucketNames[0], "", "", []CompletePart{}, "", ObjectNameInvalid{Bucket: bucketNames[0]}, false},
// Asserting for Invalid UploadID (Test number 9). // Asserting for Invalid UploadID (Test number 9).

View File

@ -81,13 +81,13 @@ func testObjectAPIPutObject(obj ObjectLayer, instanceType string, t TestErrHandl
expectedError error expectedError error
}{ }{
// Cases with invalid bucket name. // Cases with invalid bucket name.
0: {bucketName: ".test", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: ".test"}}, 0: {bucketName: ".test", objName: "obj", inputData: []byte(""), expectedError: BucketNameInvalid{Bucket: ".test"}},
1: {bucketName: "------", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "------"}}, 1: {bucketName: "------", objName: "obj", inputData: []byte(""), expectedError: BucketNameInvalid{Bucket: "------"}},
2: { 2: {
bucketName: "$this-is-not-valid-too", objName: "obj", inputData: []byte(""), bucketName: "$this-is-not-valid-too", objName: "obj", inputData: []byte(""),
expectedError: BucketNotFound{Bucket: "$this-is-not-valid-too"}, expectedError: BucketNameInvalid{Bucket: "$this-is-not-valid-too"},
}, },
3: {bucketName: "a", objName: "obj", inputData: []byte(""), expectedError: BucketNotFound{Bucket: "a"}}, 3: {bucketName: "a", objName: "obj", inputData: []byte(""), expectedError: BucketNameInvalid{Bucket: "a"}},
// Case with invalid object names. // Case with invalid object names.
4: {bucketName: bucket, inputData: []byte(""), expectedError: ObjectNameInvalid{Bucket: bucket, Object: ""}}, 4: {bucketName: bucket, inputData: []byte(""), expectedError: ObjectNameInvalid{Bucket: bucket, Object: ""}},

View File

@ -111,7 +111,7 @@ func testPathTraversalExploit(obj ObjectLayer, instanceType, bucketName string,
z := obj.(*erasureServerPools) z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
erasureDisks := xl.getDisks() erasureDisks := xl.getDisks()
parts, errs := readAllFileInfo(ctx, erasureDisks, bucketName, objectName, "", false, false) parts, errs := readAllFileInfo(ctx, erasureDisks, "", bucketName, objectName, "", false, false)
for i := range parts { for i := range parts {
if errs[i] == nil { if errs[i] == nil {
if parts[i].Name == objectName { if parts[i].Name == objectName {

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2022 MinIO, Inc. // Copyright (c) 2015-2023 MinIO, Inc.
// //
// This file is part of MinIO Object Storage stack // This file is part of MinIO Object Storage stack
// //

View File

@ -164,7 +164,7 @@ func testPostPolicyReservedBucketExploit(obj ObjectLayer, instanceType string, d
z := obj.(*erasureServerPools) z := obj.(*erasureServerPools)
xl := z.serverPools[0].sets[0] xl := z.serverPools[0].sets[0]
erasureDisks := xl.getDisks() erasureDisks := xl.getDisks()
parts, errs := readAllFileInfo(ctx, erasureDisks, bucketName, objectName+"/upload.txt", "", false, false) parts, errs := readAllFileInfo(ctx, erasureDisks, "", bucketName, objectName+"/upload.txt", "", false, false)
for i := range parts { for i := range parts {
if errs[i] == nil { if errs[i] == nil {
if parts[i].Name == objectName+"/upload.txt" { if parts[i].Name == objectName+"/upload.txt" {

View File

@ -393,6 +393,7 @@ type DeleteVersionHandlerParams struct {
type MetadataHandlerParams struct { type MetadataHandlerParams struct {
DiskID string `msg:"id"` DiskID string `msg:"id"`
Volume string `msg:"v"` Volume string `msg:"v"`
OrigVolume string `msg:"ov"`
FilePath string `msg:"fp"` FilePath string `msg:"fp"`
UpdateOpts UpdateMetadataOpts `msg:"uo"` UpdateOpts UpdateMetadataOpts `msg:"uo"`
FI FileInfo `msg:"fi"` FI FileInfo `msg:"fi"`

View File

@ -2942,6 +2942,12 @@ func (z *MetadataHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) {
err = msgp.WrapError(err, "Volume") err = msgp.WrapError(err, "Volume")
return return
} }
case "ov":
z.OrigVolume, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "OrigVolume")
return
}
case "fp": case "fp":
z.FilePath, err = dc.ReadString() z.FilePath, err = dc.ReadString()
if err != nil { if err != nil {
@ -2996,9 +3002,9 @@ func (z *MetadataHandlerParams) DecodeMsg(dc *msgp.Reader) (err error) {
// EncodeMsg implements msgp.Encodable // EncodeMsg implements msgp.Encodable
func (z *MetadataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) { func (z *MetadataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 5 // map header, size 6
// write "id" // write "id"
err = en.Append(0x85, 0xa2, 0x69, 0x64) err = en.Append(0x86, 0xa2, 0x69, 0x64)
if err != nil { if err != nil {
return return
} }
@ -3017,6 +3023,16 @@ func (z *MetadataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) {
err = msgp.WrapError(err, "Volume") err = msgp.WrapError(err, "Volume")
return return
} }
// write "ov"
err = en.Append(0xa2, 0x6f, 0x76)
if err != nil {
return
}
err = en.WriteString(z.OrigVolume)
if err != nil {
err = msgp.WrapError(err, "OrigVolume")
return
}
// write "fp" // write "fp"
err = en.Append(0xa2, 0x66, 0x70) err = en.Append(0xa2, 0x66, 0x70)
if err != nil { if err != nil {
@ -3059,13 +3075,16 @@ func (z *MetadataHandlerParams) EncodeMsg(en *msgp.Writer) (err error) {
// MarshalMsg implements msgp.Marshaler // MarshalMsg implements msgp.Marshaler
func (z *MetadataHandlerParams) MarshalMsg(b []byte) (o []byte, err error) { func (z *MetadataHandlerParams) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize()) o = msgp.Require(b, z.Msgsize())
// map header, size 5 // map header, size 6
// string "id" // string "id"
o = append(o, 0x85, 0xa2, 0x69, 0x64) o = append(o, 0x86, 0xa2, 0x69, 0x64)
o = msgp.AppendString(o, z.DiskID) o = msgp.AppendString(o, z.DiskID)
// string "v" // string "v"
o = append(o, 0xa1, 0x76) o = append(o, 0xa1, 0x76)
o = msgp.AppendString(o, z.Volume) o = msgp.AppendString(o, z.Volume)
// string "ov"
o = append(o, 0xa2, 0x6f, 0x76)
o = msgp.AppendString(o, z.OrigVolume)
// string "fp" // string "fp"
o = append(o, 0xa2, 0x66, 0x70) o = append(o, 0xa2, 0x66, 0x70)
o = msgp.AppendString(o, z.FilePath) o = msgp.AppendString(o, z.FilePath)
@ -3115,6 +3134,12 @@ func (z *MetadataHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
err = msgp.WrapError(err, "Volume") err = msgp.WrapError(err, "Volume")
return return
} }
case "ov":
z.OrigVolume, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "OrigVolume")
return
}
case "fp": case "fp":
z.FilePath, bts, err = msgp.ReadStringBytes(bts) z.FilePath, bts, err = msgp.ReadStringBytes(bts)
if err != nil { if err != nil {
@ -3170,7 +3195,7 @@ func (z *MetadataHandlerParams) UnmarshalMsg(bts []byte) (o []byte, err error) {
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MetadataHandlerParams) Msgsize() (s int) { func (z *MetadataHandlerParams) Msgsize() (s int) {
s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.FilePath) + 3 + 1 + 3 + msgp.BoolSize + 3 + z.FI.Msgsize() s = 1 + 3 + msgp.StringPrefixSize + len(z.DiskID) + 2 + msgp.StringPrefixSize + len(z.Volume) + 3 + msgp.StringPrefixSize + len(z.OrigVolume) + 3 + msgp.StringPrefixSize + len(z.FilePath) + 3 + 1 + 3 + msgp.BoolSize + 3 + z.FI.Msgsize()
return return
} }

View File

@ -82,17 +82,17 @@ type StorageAPI interface {
// Metadata operations // Metadata operations
DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) error DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) error
DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions, opts DeleteOptions) []error DeleteVersions(ctx context.Context, volume string, versions []FileInfoVersions, opts DeleteOptions) []error
WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) error WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) error
UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) error UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) error
ReadVersion(ctx context.Context, volume, path, versionID string, opts ReadOptions) (FileInfo, error) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (FileInfo, error)
ReadXL(ctx context.Context, volume, path string, readData bool) (RawFileInfo, error) ReadXL(ctx context.Context, volume, path string, readData bool) (RawFileInfo, error)
RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (uint64, error) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (uint64, error)
// File operations. // File operations.
ListDir(ctx context.Context, volume, dirPath string, count int) ([]string, error) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) ([]string, error)
ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error) ReadFile(ctx context.Context, volume string, path string, offset int64, buf []byte, verifier *BitrotVerifier) (n int64, err error)
AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error) AppendFile(ctx context.Context, volume string, path string, buf []byte) (err error)
CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error CreateFile(ctx context.Context, origvolume, olume, path string, size int64, reader io.Reader) error
ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error)
RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) error
CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error
@ -198,7 +198,7 @@ func (p *unrecognizedDisk) DeleteVol(ctx context.Context, volume string, forceDe
return errDiskNotFound return errDiskNotFound
} }
func (p *unrecognizedDisk) ListDir(ctx context.Context, volume, dirPath string, count int) ([]string, error) { func (p *unrecognizedDisk) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) ([]string, error) {
return nil, errDiskNotFound return nil, errDiskNotFound
} }
@ -210,7 +210,7 @@ func (p *unrecognizedDisk) AppendFile(ctx context.Context, volume string, path s
return errDiskNotFound return errDiskNotFound
} }
func (p *unrecognizedDisk) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error { func (p *unrecognizedDisk) CreateFile(ctx context.Context, origvolume, volume, path string, size int64, reader io.Reader) error {
return errDiskNotFound return errDiskNotFound
} }
@ -260,11 +260,11 @@ func (p *unrecognizedDisk) UpdateMetadata(ctx context.Context, volume, path stri
return errDiskNotFound return errDiskNotFound
} }
func (p *unrecognizedDisk) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) { func (p *unrecognizedDisk) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) (err error) {
return errDiskNotFound return errDiskNotFound
} }
func (p *unrecognizedDisk) ReadVersion(ctx context.Context, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) { func (p *unrecognizedDisk) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
return fi, errDiskNotFound return fi, errDiskNotFound
} }

View File

@ -366,11 +366,13 @@ func (client *storageRESTClient) AppendFile(ctx context.Context, volume string,
return err return err
} }
func (client *storageRESTClient) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) error { func (client *storageRESTClient) CreateFile(ctx context.Context, origvolume, volume, path string, size int64, reader io.Reader) error {
values := make(url.Values) values := make(url.Values)
values.Set(storageRESTVolume, volume) values.Set(storageRESTVolume, volume)
values.Set(storageRESTFilePath, path) values.Set(storageRESTFilePath, path)
values.Set(storageRESTLength, strconv.Itoa(int(size))) values.Set(storageRESTLength, strconv.Itoa(int(size)))
values.Set(storageRESTOrigVolume, origvolume)
respBody, err := client.call(ctx, storageRESTMethodCreateFile, values, io.NopCloser(reader), size) respBody, err := client.call(ctx, storageRESTMethodCreateFile, values, io.NopCloser(reader), size)
defer xhttp.DrainBody(respBody) defer xhttp.DrainBody(respBody)
if err != nil { if err != nil {
@ -380,12 +382,13 @@ func (client *storageRESTClient) CreateFile(ctx context.Context, volume, path st
return err return err
} }
func (client *storageRESTClient) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) error { func (client *storageRESTClient) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) error {
_, err := storageWriteMetadataHandler.Call(ctx, client.gridConn, &MetadataHandlerParams{ _, err := storageWriteMetadataHandler.Call(ctx, client.gridConn, &MetadataHandlerParams{
DiskID: client.diskID, DiskID: client.diskID,
Volume: volume, OrigVolume: origvolume,
FilePath: path, Volume: volume,
FI: fi, FilePath: path,
FI: fi,
}) })
return toStorageErr(err) return toStorageErr(err)
} }
@ -478,16 +481,17 @@ func readMsgpReaderPoolPut(r *msgp.Reader) {
} }
} }
func (client *storageRESTClient) ReadVersion(ctx context.Context, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) { func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
// Use websocket when not reading data. // Use websocket when not reading data.
if !opts.ReadData { if !opts.ReadData {
resp, err := storageReadVersionHandler.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{ resp, err := storageReadVersionHandler.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
storageRESTDiskID: client.diskID, storageRESTDiskID: client.diskID,
storageRESTVolume: volume, storageRESTOrigVolume: origvolume,
storageRESTFilePath: path, storageRESTVolume: volume,
storageRESTVersionID: versionID, storageRESTFilePath: path,
storageRESTReadData: strconv.FormatBool(opts.ReadData), storageRESTVersionID: versionID,
storageRESTHealing: strconv.FormatBool(opts.Healing), storageRESTReadData: strconv.FormatBool(opts.ReadData),
storageRESTHealing: strconv.FormatBool(opts.Healing),
})) }))
if err != nil { if err != nil {
return fi, toStorageErr(err) return fi, toStorageErr(err)
@ -496,6 +500,7 @@ func (client *storageRESTClient) ReadVersion(ctx context.Context, volume, path,
} }
values := make(url.Values) values := make(url.Values)
values.Set(storageRESTOrigVolume, origvolume)
values.Set(storageRESTVolume, volume) values.Set(storageRESTVolume, volume)
values.Set(storageRESTFilePath, path) values.Set(storageRESTFilePath, path)
values.Set(storageRESTVersionID, versionID) values.Set(storageRESTVersionID, versionID)
@ -612,11 +617,13 @@ func (client *storageRESTClient) ReadFile(ctx context.Context, volume string, pa
} }
// ListDir - lists a directory. // ListDir - lists a directory.
func (client *storageRESTClient) ListDir(ctx context.Context, volume, dirPath string, count int) (entries []string, err error) { func (client *storageRESTClient) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (entries []string, err error) {
values := make(url.Values) values := make(url.Values)
values.Set(storageRESTVolume, volume) values.Set(storageRESTVolume, volume)
values.Set(storageRESTDirPath, dirPath) values.Set(storageRESTDirPath, dirPath)
values.Set(storageRESTCount, strconv.Itoa(count)) values.Set(storageRESTCount, strconv.Itoa(count))
values.Set(storageRESTOrigVolume, origvolume)
respBody, err := client.call(ctx, storageRESTMethodListDir, values, nil, -1) respBody, err := client.call(ctx, storageRESTMethodListDir, values, nil, -1)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -20,7 +20,9 @@ package cmd
//go:generate msgp -file $GOFILE -unexported //go:generate msgp -file $GOFILE -unexported
const ( const (
storageRESTVersion = "v55" // ReadAll, RenameFile migrate to websockets // Added orig-volume support for CreateFile, WriteMetadata, ReadVersion, ListDir
// this is needed for performance optimization on bucket checks.
storageRESTVersion = "v56"
storageRESTVersionPrefix = SlashSeparator + storageRESTVersion storageRESTVersionPrefix = SlashSeparator + storageRESTVersion
storageRESTPrefix = minioReservedBucketPath + "/storage" storageRESTPrefix = minioReservedBucketPath + "/storage"
) )
@ -43,7 +45,6 @@ const (
storageRESTMethodStatInfoFile = "/statfile" storageRESTMethodStatInfoFile = "/statfile"
storageRESTMethodReadMultiple = "/readmultiple" storageRESTMethodReadMultiple = "/readmultiple"
storageRESTMethodCleanAbandoned = "/cleanabandoned" storageRESTMethodCleanAbandoned = "/cleanabandoned"
storageRESTMethodLinkXL = "/linkxl"
) )
const ( const (
@ -69,6 +70,7 @@ const (
storageRESTGlob = "glob" storageRESTGlob = "glob"
storageRESTMetrics = "metrics" storageRESTMetrics = "metrics"
storageRESTDriveQuorum = "drive-quorum" storageRESTDriveQuorum = "drive-quorum"
storageRESTOrigVolume = "orig-volume"
) )
type nsScannerOptions struct { type nsScannerOptions struct {

View File

@ -330,8 +330,10 @@ func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Req
if !s.IsValid(w, r) { if !s.IsValid(w, r) {
return return
} }
volume := r.Form.Get(storageRESTVolume) volume := r.Form.Get(storageRESTVolume)
filePath := r.Form.Get(storageRESTFilePath) filePath := r.Form.Get(storageRESTFilePath)
origvolume := r.Form.Get(storageRESTOrigVolume)
fileSizeStr := r.Form.Get(storageRESTLength) fileSizeStr := r.Form.Get(storageRESTLength)
fileSize, err := strconv.Atoi(fileSizeStr) fileSize, err := strconv.Atoi(fileSizeStr)
@ -341,7 +343,7 @@ func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Req
} }
done, body := keepHTTPReqResponseAlive(w, r) done, body := keepHTTPReqResponseAlive(w, r)
done(s.getStorage().CreateFile(r.Context(), volume, filePath, int64(fileSize), body)) done(s.getStorage().CreateFile(r.Context(), origvolume, volume, filePath, int64(fileSize), body))
} }
var storageDeleteVersionHandler = grid.NewSingleHandler[*DeleteVersionHandlerParams, grid.NoPayload](grid.HandlerDeleteVersion, func() *DeleteVersionHandlerParams { var storageDeleteVersionHandler = grid.NewSingleHandler[*DeleteVersionHandlerParams, grid.NoPayload](grid.HandlerDeleteVersion, func() *DeleteVersionHandlerParams {
@ -371,6 +373,7 @@ func (s *storageRESTServer) ReadVersionHandlerWS(params *grid.MSS) (*FileInfo, *
if !s.checkID(params.Get(storageRESTDiskID)) { if !s.checkID(params.Get(storageRESTDiskID)) {
return nil, grid.NewRemoteErr(errDiskNotFound) return nil, grid.NewRemoteErr(errDiskNotFound)
} }
origvolume := params.Get(storageRESTOrigVolume)
volume := params.Get(storageRESTVolume) volume := params.Get(storageRESTVolume)
filePath := params.Get(storageRESTFilePath) filePath := params.Get(storageRESTFilePath)
versionID := params.Get(storageRESTVersionID) versionID := params.Get(storageRESTVersionID)
@ -384,7 +387,7 @@ func (s *storageRESTServer) ReadVersionHandlerWS(params *grid.MSS) (*FileInfo, *
return nil, grid.NewRemoteErr(err) return nil, grid.NewRemoteErr(err)
} }
fi, err := s.getStorage().ReadVersion(context.Background(), volume, filePath, versionID, ReadOptions{ReadData: readData, Healing: healing}) fi, err := s.getStorage().ReadVersion(context.Background(), origvolume, volume, filePath, versionID, ReadOptions{ReadData: readData, Healing: healing})
if err != nil { if err != nil {
return nil, grid.NewRemoteErr(err) return nil, grid.NewRemoteErr(err)
} }
@ -396,6 +399,7 @@ func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Re
if !s.IsValid(w, r) { if !s.IsValid(w, r) {
return return
} }
origvolume := r.Form.Get(storageRESTOrigVolume)
volume := r.Form.Get(storageRESTVolume) volume := r.Form.Get(storageRESTVolume)
filePath := r.Form.Get(storageRESTFilePath) filePath := r.Form.Get(storageRESTFilePath)
versionID := r.Form.Get(storageRESTVersionID) versionID := r.Form.Get(storageRESTVersionID)
@ -409,7 +413,7 @@ func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Re
s.writeErrorResponse(w, err) s.writeErrorResponse(w, err)
return return
} }
fi, err := s.getStorage().ReadVersion(r.Context(), volume, filePath, versionID, ReadOptions{ReadData: readData, Healing: healing}) fi, err := s.getStorage().ReadVersion(r.Context(), origvolume, volume, filePath, versionID, ReadOptions{ReadData: readData, Healing: healing})
if err != nil { if err != nil {
s.writeErrorResponse(w, err) s.writeErrorResponse(w, err)
return return
@ -427,10 +431,12 @@ func (s *storageRESTServer) WriteMetadataHandler(p *MetadataHandlerParams) (np g
if !s.checkID(p.DiskID) { if !s.checkID(p.DiskID) {
return grid.NewNPErr(errDiskNotFound) return grid.NewNPErr(errDiskNotFound)
} }
volume := p.Volume volume := p.Volume
filePath := p.FilePath filePath := p.FilePath
origvolume := p.OrigVolume
err := s.getStorage().WriteMetadata(context.Background(), volume, filePath, p.FI) err := s.getStorage().WriteMetadata(context.Background(), origvolume, volume, filePath, p.FI)
return np, grid.NewRemoteErr(err) return np, grid.NewRemoteErr(err)
} }
@ -650,13 +656,14 @@ func (s *storageRESTServer) ListDirHandler(w http.ResponseWriter, r *http.Reques
} }
volume := r.Form.Get(storageRESTVolume) volume := r.Form.Get(storageRESTVolume)
dirPath := r.Form.Get(storageRESTDirPath) dirPath := r.Form.Get(storageRESTDirPath)
origvolume := r.Form.Get(storageRESTOrigVolume)
count, err := strconv.Atoi(r.Form.Get(storageRESTCount)) count, err := strconv.Atoi(r.Form.Get(storageRESTCount))
if err != nil { if err != nil {
s.writeErrorResponse(w, err) s.writeErrorResponse(w, err)
return return
} }
entries, err := s.getStorage().ListDir(r.Context(), volume, dirPath, count) entries, err := s.getStorage().ListDir(r.Context(), origvolume, volume, dirPath, count)
if err != nil { if err != nil {
s.writeErrorResponse(w, err) s.writeErrorResponse(w, err)
return return

View File

@ -97,7 +97,7 @@ func testStorageAPIListDir(t *testing.T, storage StorageAPI) {
} }
for i, testCase := range testCases { for i, testCase := range testCases {
result, err := storage.ListDir(context.Background(), testCase.volumeName, testCase.prefix, -1) result, err := storage.ListDir(context.Background(), "", testCase.volumeName, testCase.prefix, -1)
expectErr := (err != nil) expectErr := (err != nil)
if expectErr != testCase.expectErr { if expectErr != testCase.expectErr {

View File

@ -439,14 +439,14 @@ func (p *xlStorageDiskIDCheck) DeleteVol(ctx context.Context, volume string, for
return w.Run(func() error { return p.storage.DeleteVol(ctx, volume, forceDelete) }) return w.Run(func() error { return p.storage.DeleteVol(ctx, volume, forceDelete) })
} }
func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, volume, dirPath string, count int) (s []string, err error) { func (p *xlStorageDiskIDCheck) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (s []string, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricListDir, volume, dirPath) ctx, done, err := p.TrackDiskHealth(ctx, storageMetricListDir, volume, dirPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
defer done(&err) defer done(&err)
return p.storage.ListDir(ctx, volume, dirPath, count) return p.storage.ListDir(ctx, origvolume, volume, dirPath, count)
} }
// Legacy API - does not have any deadlines // Legacy API - does not have any deadlines
@ -476,14 +476,14 @@ func (p *xlStorageDiskIDCheck) AppendFile(ctx context.Context, volume string, pa
}) })
} }
func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, volume, path string, size int64, reader io.Reader) (err error) { func (p *xlStorageDiskIDCheck) CreateFile(ctx context.Context, origvolume, volume, path string, size int64, reader io.Reader) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricCreateFile, volume, path) ctx, done, err := p.TrackDiskHealth(ctx, storageMetricCreateFile, volume, path)
if err != nil { if err != nil {
return err return err
} }
defer done(&err) defer done(&err)
return p.storage.CreateFile(ctx, volume, path, size, io.NopCloser(reader)) return p.storage.CreateFile(ctx, origvolume, volume, path, size, io.NopCloser(reader))
} }
func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) { func (p *xlStorageDiskIDCheck) ReadFileStream(ctx context.Context, volume, path string, offset, length int64) (io.ReadCloser, error) {
@ -663,7 +663,7 @@ func (p *xlStorageDiskIDCheck) UpdateMetadata(ctx context.Context, volume, path
return w.Run(func() error { return p.storage.UpdateMetadata(ctx, volume, path, fi, opts) }) return w.Run(func() error { return p.storage.UpdateMetadata(ctx, volume, path, fi, opts) })
} }
func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) { func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) (err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricWriteMetadata, volume, path) ctx, done, err := p.TrackDiskHealth(ctx, storageMetricWriteMetadata, volume, path)
if err != nil { if err != nil {
return err return err
@ -671,10 +671,10 @@ func (p *xlStorageDiskIDCheck) WriteMetadata(ctx context.Context, volume, path s
defer done(&err) defer done(&err)
w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout()) w := xioutil.NewDeadlineWorker(globalDriveConfig.GetMaxTimeout())
return w.Run(func() error { return p.storage.WriteMetadata(ctx, volume, path, fi) }) return w.Run(func() error { return p.storage.WriteMetadata(ctx, origvolume, volume, path, fi) })
} }
func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) { func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadVersion, volume, path) ctx, done, err := p.TrackDiskHealth(ctx, storageMetricReadVersion, volume, path)
if err != nil { if err != nil {
return fi, err return fi, err
@ -682,7 +682,7 @@ func (p *xlStorageDiskIDCheck) ReadVersion(ctx context.Context, volume, path, ve
defer done(&err) defer done(&err)
return xioutil.WithDeadline[FileInfo](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result FileInfo, err error) { return xioutil.WithDeadline[FileInfo](ctx, globalDriveConfig.GetMaxTimeout(), func(ctx context.Context) (result FileInfo, err error) {
return p.storage.ReadVersion(ctx, volume, path, versionID, opts) return p.storage.ReadVersion(ctx, origvolume, volume, path, versionID, opts)
}) })
} }

View File

@ -1016,11 +1016,23 @@ func (s *xlStorage) DeleteVol(ctx context.Context, volume string, forceDelete bo
// ListDir - return all the entries at the given directory path. // ListDir - return all the entries at the given directory path.
// If an entry is a directory it will be returned with a trailing SlashSeparator. // If an entry is a directory it will be returned with a trailing SlashSeparator.
func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count int) (entries []string, err error) { func (s *xlStorage) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (entries []string, err error) {
if contextCanceled(ctx) { if contextCanceled(ctx) {
return nil, ctx.Err() return nil, ctx.Err()
} }
if origvolume != "" {
if !skipAccessChecks(origvolume) {
origvolumeDir, err := s.getVolDir(origvolume)
if err != nil {
return nil, err
}
if err = Access(origvolumeDir); err != nil {
return nil, convertAccessError(err, errVolumeAccessDenied)
}
}
}
// Verify if volume is valid and it exists. // Verify if volume is valid and it exists.
volumeDir, err := s.getVolDir(volume) volumeDir, err := s.getVolDir(volume)
if err != nil { if err != nil {
@ -1034,15 +1046,9 @@ func (s *xlStorage) ListDir(ctx context.Context, volume, dirPath string, count i
entries, err = readDir(dirPathAbs) entries, err = readDir(dirPathAbs)
} }
if err != nil { if err != nil {
if err == errFileNotFound { if errors.Is(err, errFileNotFound) && !skipAccessChecks(volume) {
if !skipAccessChecks(volume) { if ierr := Access(volumeDir); ierr != nil {
if ierr := Access(volumeDir); ierr != nil { return nil, convertAccessError(ierr, errVolumeAccessDenied)
if osIsNotExist(ierr) {
return nil, errVolumeNotFound
} else if isSysErrIO(ierr) {
return nil, errFaultyDisk
}
}
} }
} }
return nil, err return nil, err
@ -1225,7 +1231,7 @@ func (s *xlStorage) DeleteVersion(ctx context.Context, volume, path string, fi F
metaDataPoolPut(buf) // Never used, return it metaDataPoolPut(buf) // Never used, return it
if fi.Deleted && forceDelMarker { if fi.Deleted && forceDelMarker {
// Create a new xl.meta with a delete marker in it // Create a new xl.meta with a delete marker in it
return s.WriteMetadata(ctx, volume, path, fi) return s.WriteMetadata(ctx, "", volume, path, fi)
} }
s.RLock() s.RLock()
@ -1344,8 +1350,22 @@ func (s *xlStorage) UpdateMetadata(ctx context.Context, volume, path string, fi
} }
// WriteMetadata - writes FileInfo metadata for path at `xl.meta` // WriteMetadata - writes FileInfo metadata for path at `xl.meta`
func (s *xlStorage) WriteMetadata(ctx context.Context, volume, path string, fi FileInfo) (err error) { func (s *xlStorage) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) (err error) {
if fi.Fresh { if fi.Fresh {
if origvolume != "" {
origvolumeDir, err := s.getVolDir(origvolume)
if err != nil {
return err
}
if !skipAccessChecks(origvolume) {
// Stat a volume entry.
if err = Access(origvolumeDir); err != nil {
return convertAccessError(err, errVolumeAccessDenied)
}
}
}
var xlMeta xlMetaV2 var xlMeta xlMetaV2
if err := xlMeta.AddVersion(fi); err != nil { if err := xlMeta.AddVersion(fi); err != nil {
return err return err
@ -1521,7 +1541,21 @@ type ReadOptions struct {
// ReadVersion - reads metadata and returns FileInfo at path `xl.meta` // ReadVersion - reads metadata and returns FileInfo at path `xl.meta`
// for all objects less than `32KiB` this call returns data as well // for all objects less than `32KiB` this call returns data as well
// along with metadata. // along with metadata.
func (s *xlStorage) ReadVersion(ctx context.Context, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) { func (s *xlStorage) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
if origvolume != "" {
origvolumeDir, err := s.getVolDir(origvolume)
if err != nil {
return fi, err
}
if !skipAccessChecks(origvolume) {
// Stat a volume entry.
if err = Access(origvolumeDir); err != nil {
return fi, convertAccessError(err, errVolumeAccessDenied)
}
}
}
volumeDir, err := s.getVolDir(volume) volumeDir, err := s.getVolDir(volume)
if err != nil { if err != nil {
return fi, err return fi, err
@ -1968,7 +2002,21 @@ func (c closeWrapper) Close() error {
} }
// CreateFile - creates the file. // CreateFile - creates the file.
func (s *xlStorage) CreateFile(ctx context.Context, volume, path string, fileSize int64, r io.Reader) (err error) { func (s *xlStorage) CreateFile(ctx context.Context, origvolume, volume, path string, fileSize int64, r io.Reader) (err error) {
if origvolume != "" {
origvolumeDir, err := s.getVolDir(origvolume)
if err != nil {
return err
}
if !skipAccessChecks(origvolume) {
// Stat a volume entry.
if err = Access(origvolumeDir); err != nil {
return convertAccessError(err, errVolumeAccessDenied)
}
}
}
volumeDir, err := s.getVolDir(volume) volumeDir, err := s.getVolDir(volume)
if err != nil { if err != nil {
return err return err
@ -2355,23 +2403,13 @@ func (s *xlStorage) RenameData(ctx context.Context, srcVolume, srcPath string, f
if !skipAccessChecks(srcVolume) { if !skipAccessChecks(srcVolume) {
// Stat a volume entry. // Stat a volume entry.
if err = Access(srcVolumeDir); err != nil { if err = Access(srcVolumeDir); err != nil {
if osIsNotExist(err) { return 0, convertAccessError(err, errVolumeAccessDenied)
return 0, errVolumeNotFound
} else if isSysErrIO(err) {
return 0, errFaultyDisk
}
return 0, err
} }
} }
if !skipAccessChecks(dstVolume) { if !skipAccessChecks(dstVolume) {
if err = Access(dstVolumeDir); err != nil { if err = Access(dstVolumeDir); err != nil {
if osIsNotExist(err) { return 0, convertAccessError(err, errVolumeAccessDenied)
return 0, errVolumeNotFound
} else if isSysErrIO(err) {
return 0, errFaultyDisk
}
return 0, err
} }
} }

View File

@ -235,7 +235,7 @@ func TestXLStorageReadVersionLegacy(t *testing.T) {
t.Fatalf("Unable to create a file \"as-file\", %s", err) t.Fatalf("Unable to create a file \"as-file\", %s", err)
} }
fi, err := xlStorage.ReadVersion(context.Background(), "exists-legacy", "as-file", "", ReadOptions{}) fi, err := xlStorage.ReadVersion(context.Background(), "", "exists-legacy", "as-file", "", ReadOptions{})
if err != nil { if err != nil {
t.Fatalf("Unable to read older 'xl.json' content: %s", err) t.Fatalf("Unable to read older 'xl.json' content: %s", err)
} }
@ -325,7 +325,7 @@ func TestXLStorageReadVersion(t *testing.T) {
// Run through all the test cases and validate for ReadVersion. // Run through all the test cases and validate for ReadVersion.
for i, testCase := range testCases { for i, testCase := range testCases {
_, err = xlStorage.ReadVersion(context.Background(), testCase.volume, testCase.path, "", ReadOptions{}) _, err = xlStorage.ReadVersion(context.Background(), "", testCase.volume, testCase.path, "", ReadOptions{})
if err != testCase.err { if err != testCase.err {
t.Fatalf("TestXLStorage %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err) t.Fatalf("TestXLStorage %d: Expected err \"%s\", got err \"%s\"", i+1, testCase.err, err)
} }
@ -858,7 +858,7 @@ func TestXLStorageListDir(t *testing.T) {
for i, testCase := range testCases { for i, testCase := range testCases {
var dirList []string var dirList []string
dirList, err = xlStorage.ListDir(context.Background(), testCase.srcVol, testCase.srcPath, -1) dirList, err = xlStorage.ListDir(context.Background(), "", testCase.srcVol, testCase.srcPath, -1)
if err != testCase.expectedErr { if err != testCase.expectedErr {
t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err) t.Errorf("TestXLStorage case %d: Expected: \"%s\", got: \"%s\"", i+1, testCase.expectedErr, err)
} }
@ -1657,7 +1657,7 @@ func TestXLStorageDeleteVersion(t *testing.T) {
Checksums: nil, Checksums: nil,
}, },
} }
if err := xl.WriteMetadata(ctx, volume, object, fi); err != nil { if err := xl.WriteMetadata(ctx, "", volume, object, fi); err != nil {
t.Fatalf("Unable to create object, %s", err) t.Fatalf("Unable to create object, %s", err)
} }
} }
@ -1666,7 +1666,7 @@ func TestXLStorageDeleteVersion(t *testing.T) {
t.Helper() t.Helper()
for i := range versions { for i := range versions {
shouldExist := !deleted[i] shouldExist := !deleted[i]
fi, err := xl.ReadVersion(ctx, volume, object, versions[i], ReadOptions{}) fi, err := xl.ReadVersion(ctx, "", volume, object, versions[i], ReadOptions{})
if shouldExist { if shouldExist {
if err != nil { if err != nil {
t.Fatalf("Version %s should exist, but got err %v", versions[i], err) t.Fatalf("Version %s should exist, but got err %v", versions[i], err)
@ -1713,7 +1713,7 @@ func TestXLStorageDeleteVersion(t *testing.T) {
checkVerExist(t) checkVerExist(t)
// Meta should be deleted now... // Meta should be deleted now...
fi, err := xl.ReadVersion(ctx, volume, object, "", ReadOptions{}) fi, err := xl.ReadVersion(ctx, "", volume, object, "", ReadOptions{})
if err != errFileNotFound { if err != errFileNotFound {
t.Fatalf("Object %s should not exist, but returned: %#v", object, fi) t.Fatalf("Object %s should not exist, but returned: %#v", object, fi)
} }
@ -1871,7 +1871,7 @@ func TestXLStorageVerifyFile(t *testing.T) {
algo = HighwayHash256S algo = HighwayHash256S
shardSize := int64(1024 * 1024) shardSize := int64(1024 * 1024)
shard := make([]byte, shardSize) shard := make([]byte, shardSize)
w := newStreamingBitrotWriter(storage, volName, fileName, size, algo, shardSize) w := newStreamingBitrotWriter(storage, "", volName, fileName, size, algo, shardSize)
reader := bytes.NewReader(data) reader := bytes.NewReader(data)
for { for {
// Using io.Copy instead of this loop will not work for us as io.Copy // Using io.Copy instead of this loop will not work for us as io.Copy