fix: walk missing entries with opts.Marker set (#19661)

'opts.Marker` is causing many missed entries if used since results are returned unsorted. Also since pools are serialized.

Switch to do fully concurrent listing and merging across pools to return sorted entries.

Returning errors on listings is impossible with the current API, so document that.

Return an error at once if no drives are found instead of just returning an empty listing and no error.
This commit is contained in:
Klaus Post 2024-05-03 10:26:51 -07:00 committed by GitHub
parent 1526e7ece3
commit 4afb59e63f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 182 additions and 154 deletions

View File

@ -2034,44 +2034,38 @@ func (z *erasureServerPools) HealBucket(ctx context.Context, bucket string, opts
}
// Walk a bucket, optionally prefix recursively, until we have returned
// all the content to objectInfo channel, it is callers responsibility
// to allocate a receive channel for ObjectInfo, upon any unhandled
// error walker returns error. Optionally if context.Done() is received
// then Walk() stops the walker.
// all the contents of the provided bucket+prefix.
// TODO: Note that most errors will result in a truncated listing.
func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, results chan<- ObjectInfo, opts WalkOptions) error {
if err := checkListObjsArgs(ctx, bucket, prefix, ""); err != nil {
// Upon error close the channel.
xioutil.SafeClose(results)
return err
}
vcfg, _ := globalBucketVersioningSys.Get(bucket)
ctx, cancel := context.WithCancel(ctx)
go func() {
defer cancel()
defer xioutil.SafeClose(results)
var entries []chan metaCacheEntry
for _, erasureSet := range z.serverPools {
var wg sync.WaitGroup
for _, set := range erasureSet.sets {
for poolIdx, erasureSet := range z.serverPools {
for setIdx, set := range erasureSet.sets {
set := set
wg.Add(1)
go func() {
defer wg.Done()
listOut := make(chan metaCacheEntry, 1)
entries = append(entries, listOut)
disks, infos, _ := set.getOnlineDisksWithHealingAndInfo(true)
if len(disks) == 0 {
xioutil.SafeClose(results)
cancel()
return fmt.Errorf("Walk: no online disks found in pool %d, set %d", setIdx, poolIdx)
}
go func() {
defer xioutil.SafeClose(listOut)
send := func(e metaCacheEntry) {
if e.isDir() {
// Ignore directories.
return
}
send := func(objInfo ObjectInfo) bool {
select {
case listOut <- e:
case <-ctx.Done():
return false
case results <- objInfo:
return true
}
}
@ -2108,56 +2102,6 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
if opts.LatestOnly {
requestedVersions = 1
}
loadEntry := func(entry metaCacheEntry) {
if entry.isDir() {
return
}
if opts.LatestOnly {
fi, err := entry.fileInfo(bucket)
if err != nil {
cancel()
return
}
if opts.Filter != nil {
if opts.Filter(fi) {
if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) {
return
}
}
} else {
if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) {
return
}
}
} else {
fivs, err := entry.fileInfoVersions(bucket)
if err != nil {
cancel()
return
}
// Note: entry.fileInfoVersions returns versions sorted in reverse chronological order based on ModTime
if opts.VersionsSort == WalkVersionsSortAsc {
versionsSorter(fivs.Versions).reverse()
}
for _, version := range fivs.Versions {
if opts.Filter != nil {
if opts.Filter(version) {
if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) {
return
}
}
} else {
if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) {
return
}
}
}
}
}
// However many we ask, versions must exist on ~50%
listingQuorum := (askDisks + 1) / 2
@ -2184,13 +2128,13 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
filterPrefix: filterPrefix,
recursive: true,
forwardTo: opts.Marker,
minDisks: 1,
minDisks: listingQuorum,
reportNotFound: false,
agreed: loadEntry,
agreed: send,
partial: func(entries metaCacheEntries, _ []error) {
entry, ok := entries.resolve(&resolver)
if ok {
loadEntry(*entry)
send(*entry)
}
},
finished: nil,
@ -2203,8 +2147,71 @@ func (z *erasureServerPools) Walk(ctx context.Context, bucket, prefix string, re
}
}()
}
wg.Wait()
}
// Convert and filter merged entries.
merged := make(chan metaCacheEntry, 100)
vcfg, _ := globalBucketVersioningSys.Get(bucket)
go func() {
defer cancel()
defer xioutil.SafeClose(results)
send := func(oi ObjectInfo) bool {
select {
case results <- oi:
return true
case <-ctx.Done():
return false
}
}
for entry := range merged {
if opts.LatestOnly {
fi, err := entry.fileInfo(bucket)
if err != nil {
return
}
if opts.Filter != nil {
if opts.Filter(fi) {
if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) {
return
}
}
} else {
if !send(fi.ToObjectInfo(bucket, fi.Name, vcfg != nil && vcfg.Versioned(fi.Name))) {
return
}
}
continue
}
fivs, err := entry.fileInfoVersions(bucket)
if err != nil {
return
}
// Note: entry.fileInfoVersions returns versions sorted in reverse chronological order based on ModTime
if opts.VersionsSort == WalkVersionsSortAsc {
versionsSorter(fivs.Versions).reverse()
}
for _, version := range fivs.Versions {
if opts.Filter != nil {
if opts.Filter(version) {
if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) {
return
}
}
} else {
if !send(version.ToObjectInfo(bucket, version.Name, vcfg != nil && vcfg.Versioned(version.Name))) {
return
}
}
}
}
}()
go func() {
// Merge all entries from all disks.
// We leave quorum at 1, since entries are already resolved to have the desired quorum.
// mergeEntryChannels will close 'merged' channel upon completion or cancellation.
storageLogIf(ctx, mergeEntryChannels(ctx, entries, merged, 1))
}()
return nil

View File

@ -217,6 +217,27 @@ func prepareErasure(ctx context.Context, nDisks int) (ObjectLayer, []string, err
return nil, nil, err
}
// Wait up to 10 seconds for disks to come online.
pools := obj.(*erasureServerPools)
t := time.Now()
for _, pool := range pools.serverPools {
for _, sets := range pool.erasureDisks {
for _, s := range sets {
if !s.IsLocal() {
for {
if s.IsOnline() {
break
}
time.Sleep(100 * time.Millisecond)
if time.Since(t) > 10*time.Second {
return nil, nil, errors.New("timeout waiting for disk to come online")
}
}
}
}
}
}
return obj, fsDirs, nil
}