fix: use the right channel to feed the data in (#18605)

this PR fixes a regression in batch replication
where we weren't sending any data from the Walk()
results due to incorrect channels being used.
This commit is contained in:
Harshavardhana 2023-12-06 18:17:03 -08:00 committed by GitHub
parent 7350a29fec
commit 53ce92b9ca
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 34 additions and 39 deletions

View File

@ -29,5 +29,8 @@ linters:
issues:
exclude-use-default: false
exclude:
- "empty-block:"
- "unused-parameter:"
- "dot-imports:"
- should have a package comment
- error strings should not be capitalized or end with punctuation or a newline

View File

@ -8,7 +8,6 @@ GOOS := $(shell go env GOOS)
VERSION ?= $(shell git describe --tags)
TAG ?= "quay.io/minio/minio:$(VERSION)"
GOLANGCI_VERSION = v1.51.2
GOLANGCI_DIR = .bin/golangci/$(GOLANGCI_VERSION)
GOLANGCI = $(GOLANGCI_DIR)/golangci-lint
@ -23,7 +22,7 @@ help: ## print this help
getdeps: ## fetch necessary dependencies
@mkdir -p ${GOPATH}/bin
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR) $(GOLANGCI_VERSION)
@echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOLANGCI_DIR)
@echo "Installing msgp" && go install -v github.com/tinylib/msgp@6ac204f0b4d48d17ab4fa442134c7fba13127a4e
@echo "Installing stringer" && go install -v golang.org/x/tools/cmd/stringer@latest

View File

@ -1007,12 +1007,9 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
// None of the provided metadata filters match skip the object.
return false
}
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
if (r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest {
return false
}
return true
// if one of source or target is non MinIO, just replicate the top most version like `mc mirror`
return !((r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3) && !info.IsLatest)
}
u, err := url.Parse(r.Target.Endpoint)
@ -1123,8 +1120,7 @@ func (r *BatchJobReplicateV1) Start(ctx context.Context, api ObjectLayer, job Ba
// one of source/target is s3, skip delete marker and all versions under the same object name.
s3Type := r.Target.Type == BatchJobReplicateResourceS3 || r.Source.Type == BatchJobReplicateResourceS3
results := make(chan ObjectInfo, 100)
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, results, WalkOptions{
if err := api.Walk(ctx, r.Source.Bucket, r.Source.Prefix, walkCh, WalkOptions{
Marker: lastObject,
Filter: selectObj,
AskDisks: walkQuorum,

View File

@ -337,13 +337,13 @@ type SMA struct {
filledBuf bool
}
func newSMA(len int) *SMA {
if len <= 0 {
len = defaultWindowSize
func newSMA(ln int) *SMA {
if ln <= 0 {
ln = defaultWindowSize
}
return &SMA{
buf: make([]float64, len),
window: len,
buf: make([]float64, ln),
window: ln,
idx: 0,
}
}

View File

@ -373,15 +373,14 @@ func checkReplicateDelete(ctx context.Context, bucket string, dobj ObjectToDelet
if oi.DeleteMarker && (validReplStatus || replicate) {
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
continue
} else {
// can be the case that other cluster is down and duplicate `mc rm --vid`
// is issued - this still needs to be replicated back to the other target
if !oi.VersionPurgeStatus.Empty() {
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
}
continue
}
// can be the case that other cluster is down and duplicate `mc rm --vid`
// is issued - this still needs to be replicated back to the other target
if !oi.VersionPurgeStatus.Empty() {
replicate = oi.VersionPurgeStatus == Pending || oi.VersionPurgeStatus == Failed
dsc.Set(newReplicateTargetDecision(tgtArn, replicate, sync))
}
continue
}
tgt := globalBucketTargetSys.GetRemoteTargetClient(bucket, tgtArn)
// the target online status should not be used here while deciding

View File

@ -581,12 +581,12 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
foundAny = true
break
}
if next := f.updateCache.searchParent(parent); next == nil {
next := f.updateCache.searchParent(parent)
if next == nil {
foundAny = true
break
} else {
parent = *next
}
parent = *next
}
if !foundAny {
// Add non-compacted empty entry.

View File

@ -72,15 +72,15 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
// Decrement offset.
offset -= int64(len(block))
continue
} else {
// Skip until offset.
block = block[offset:]
// Reset the offset for next iteration to read everything
// from subsequent blocks.
offset = 0
}
// Skip until offset.
block = block[offset:]
// Reset the offset for next iteration to read everything
// from subsequent blocks.
offset = 0
// We have written all the blocks, write the last remaining block.
if write < int64(len(block)) {
n, err := dst.Write(block[:write])

View File

@ -148,12 +148,11 @@ func (config *TierConfigMgr) Remove(ctx context.Context, tier string) error {
return err
} else if inuse {
return errTierBackendNotEmpty
} else {
config.Lock()
delete(config.Tiers, tier)
delete(config.drivercache, tier)
config.Unlock()
}
config.Lock()
delete(config.Tiers, tier)
delete(config.drivercache, tier)
config.Unlock()
return nil
}

View File

@ -1311,9 +1311,8 @@ func (c Config) getTargetKVS(subSys, target string, redactSecrets bool) KVS {
// clonedKV := kv
// clonedKV.Value = redactedSecret
// resultKVS = append(resultKVS, clonedKV)
} else {
resultKVS = append(resultKVS, kv)
}
resultKVS = append(resultKVS, kv)
}
return resultKVS