remove all the frivolous logs, that may or may not be actionable (#18922)

for actionable, inspections we have `mc support inspect`

we do not need double logging, healing will report relevant
errors if any, in terms of quorum lost etc.
This commit is contained in:
Harshavardhana 2024-01-30 18:11:45 -08:00 committed by GitHub
parent 057192913c
commit caac9d216e
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 13 additions and 123 deletions

View File

@ -20,16 +20,12 @@ package cmd
import (
"bytes"
"context"
"encoding/hex"
"fmt"
"hash"
"io"
"strings"
"sync"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
)
// Calculates bitrot in chunks and writes the hash into the stream.
@ -154,29 +150,12 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
// Can never happen unless there are programmer bugs
return 0, errUnexpected
}
ignoredErrs := []error{
errDiskNotFound,
}
if strings.HasPrefix(b.volume, minioMetaBucket) {
ignoredErrs = append(ignoredErrs,
errFileNotFound,
errVolumeNotFound,
errFileVersionNotFound,
)
}
if b.rc == nil {
// For the first ReadAt() call we need to open the stream for reading.
b.currOffset = offset
streamOffset := (offset/b.shardSize)*int64(b.h.Size()) + offset
if len(b.data) == 0 && b.tillOffset != streamOffset {
b.rc, err = b.disk.ReadFileStream(context.TODO(), b.volume, b.filePath, streamOffset, b.tillOffset-streamOffset)
if err != nil {
if !IsErr(err, ignoredErrs...) {
logger.LogOnceIf(GlobalContext,
fmt.Errorf("Reading erasure shards at (%s: %s/%s) returned '%w', will attempt to reconstruct if we have quorum",
b.disk, b.volume, b.filePath, err), "bitrot-read-file-stream-"+b.volume+"-"+b.filePath)
}
}
} else {
b.rc = io.NewSectionReader(bytes.NewReader(b.data), streamOffset, b.tillOffset-streamOffset)
}
@ -198,10 +177,7 @@ func (b *streamingBitrotReader) ReadAt(buf []byte, offset int64) (int, error) {
return 0, err
}
b.h.Write(buf)
if !bytes.Equal(b.h.Sum(nil), b.hashBytes) {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s - content hash does not match - expected %s, got %s",
b.disk, b.volume, b.filePath, hex.EncodeToString(b.hashBytes), hex.EncodeToString(b.h.Sum(nil))))
return 0, errFileCorrupt
}
b.currOffset += int64(len(buf))

View File

@ -19,11 +19,8 @@ package cmd
import (
"context"
"fmt"
"hash"
"io"
"github.com/minio/minio/internal/logger"
)
// Implementation to calculate bitrot for the whole file.
@ -38,12 +35,10 @@ type wholeBitrotWriter struct {
func (b *wholeBitrotWriter) Write(p []byte) (int, error) {
err := b.disk.AppendFile(context.TODO(), b.volume, b.filePath, p)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
return 0, err
}
_, err = b.Hash.Write(p)
if err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s returned %w", b.disk, err))
return 0, err
}
return len(p), nil
@ -72,12 +67,10 @@ func (b *wholeBitrotReader) ReadAt(buf []byte, offset int64) (n int, err error)
if b.buf == nil {
b.buf = make([]byte, b.tillOffset-offset)
if _, err := b.disk.ReadFile(context.TODO(), b.volume, b.filePath, offset, b.buf, b.verifier); err != nil {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, err))
return 0, err
}
}
if len(b.buf) < len(buf) {
logger.LogIf(GlobalContext, fmt.Errorf("Drive: %s -> %s/%s returned %w", b.disk, b.volume, b.filePath, errLessData))
return 0, errLessData
}
n = copy(buf, b.buf)

View File

@ -80,11 +80,9 @@ func (e *Erasure) EncodeData(ctx context.Context, data []byte) ([][]byte, error)
}
encoded, err := e.encoder().Split(data)
if err != nil {
logger.LogIf(ctx, err)
return nil, err
}
if err = e.encoder().Encode(encoded); err != nil {
logger.LogIf(ctx, err)
return nil, err
}
return encoded, nil
@ -111,11 +109,7 @@ func (e *Erasure) DecodeDataBlocks(data [][]byte) error {
// DecodeDataAndParityBlocks decodes the given erasure-coded data and verifies it.
// It returns an error if the decoding failed.
func (e *Erasure) DecodeDataAndParityBlocks(ctx context.Context, data [][]byte) error {
if err := e.encoder().Reconstruct(data); err != nil {
logger.LogIf(ctx, err)
return err
}
return nil
return e.encoder().Reconstruct(data)
}
// ShardSize - returns actual shared size from erasure blockSize.

View File

@ -26,7 +26,6 @@ import (
"sync/atomic"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
)
// Reads in parallel from readers.
@ -211,11 +210,9 @@ func (p *parallelReader) Read(dst [][]byte) ([][]byte, error) {
// A set of preferred drives can be supplied. In that case they will be used and the data reconstructed.
func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.ReaderAt, offset, length, totalLength int64, prefer []bool) (written int64, derr error) {
if offset < 0 || length < 0 {
logger.LogIf(ctx, errInvalidArgument)
return -1, errInvalidArgument
}
if offset+length > totalLength {
logger.LogIf(ctx, errInvalidArgument)
return -1, errInvalidArgument
}
@ -269,7 +266,6 @@ func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.Read
}
if err = e.DecodeDataBlocks(bufs); err != nil {
logger.LogIf(ctx, err)
return -1, err
}
@ -282,7 +278,6 @@ func (e Erasure) Decode(ctx context.Context, writer io.Writer, readers []io.Read
}
if bytesWritten != length {
logger.LogIf(ctx, errLessData)
return bytesWritten, errLessData
}
@ -321,7 +316,6 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea
}
if err = e.DecodeDataAndParityBlocks(ctx, bufs); err != nil {
logger.LogOnceIf(ctx, err, "erasure-heal-decode")
return err
}
@ -332,7 +326,6 @@ func (e Erasure) Heal(ctx context.Context, writers []io.Writer, readers []io.Rea
}
if err = w.Write(ctx, bufs); err != nil {
logger.LogOnceIf(ctx, err, "erasure-heal-write")
return err
}
}

View File

@ -22,9 +22,6 @@ import (
"fmt"
"io"
"sync"
"github.com/minio/minio/internal/hash"
"github.com/minio/minio/internal/logger"
)
// Writes in parallel to writers
@ -92,29 +89,26 @@ func (e *Erasure) Encode(ctx context.Context, src io.Reader, writers []io.Writer
io.EOF,
io.ErrUnexpectedEOF,
}...) {
if !hash.IsChecksumMismatch(err) {
logger.LogIf(ctx, err)
}
return 0, err
}
}
eof := err == io.EOF || err == io.ErrUnexpectedEOF
if n == 0 && total != 0 {
// Reached EOF, nothing more to be done.
break
}
// We take care of the situation where if n == 0 and total == 0 by creating empty data and parity files.
blocks, err = e.EncodeData(ctx, buf[:n])
if err != nil {
logger.LogIf(ctx, err)
return 0, err
}
if err = writer.Write(ctx, blocks); err != nil {
logger.LogIf(ctx, err)
return 0, err
}
total += int64(n)
if eof {
break

View File

@ -20,9 +20,7 @@ package cmd
import (
"context"
"errors"
"fmt"
"hash/crc32"
"io"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/sync/errgroup"
@ -148,26 +146,6 @@ func hashOrder(key string, cardinality int) []int {
return nums
}
var readFileInfoIgnoredErrs = append(objectOpIgnoredErrs,
errFileNotFound,
errVolumeNotFound,
errFileVersionNotFound,
io.ErrUnexpectedEOF, // some times we would read without locks, ignore these errors
io.EOF, // some times we would read without locks, ignore these errors
)
func readFileInfo(ctx context.Context, disk StorageAPI, origbucket, bucket, object, versionID string, opts ReadOptions) (FileInfo, error) {
fi, err := disk.ReadVersion(ctx, origbucket, bucket, object, versionID, opts)
if err != nil && !IsErr(err, readFileInfoIgnoredErrs...) {
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
disk.String(), bucket, object, err),
disk.String())
}
return fi, err
}
// Reads all `xl.meta` metadata as a FileInfo slice.
// Returns error slice indicating the failed metadata reads.
func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string, bucket, object, versionID string, readData, healing bool) ([]FileInfo, []error) {
@ -186,7 +164,7 @@ func readAllFileInfo(ctx context.Context, disks []StorageAPI, origbucket string,
if disks[index] == nil {
return errDiskNotFound
}
metadataArray[index], err = readFileInfo(ctx, disks[index], origbucket, bucket, object, versionID, opts)
metadataArray[index], err = disks[index].ReadVersion(ctx, origbucket, bucket, object, versionID, opts)
return err
}, index)
}
@ -330,15 +308,12 @@ var (
// returns error if totalSize is -1, partSize is 0, partIndex is 0.
func calculatePartSizeFromIdx(ctx context.Context, totalSize int64, partSize int64, partIndex int) (currPartSize int64, err error) {
if totalSize < -1 {
logger.LogIf(ctx, errInvalidArgument)
return 0, errInvalidArgument
}
if partSize == 0 {
logger.LogIf(ctx, errPartSizeZero)
return 0, errPartSizeZero
}
if partIndex < 1 {
logger.LogIf(ctx, errPartSizeIndex)
return 0, errPartSizeIndex
}
if totalSize == -1 {

View File

@ -46,7 +46,6 @@ import (
"github.com/minio/pkg/v2/mimedb"
"github.com/minio/pkg/v2/sync/errgroup"
"github.com/minio/pkg/v2/wildcard"
"github.com/tinylib/msgp/msgp"
)
// list all errors which can be ignored in object operations.
@ -546,29 +545,6 @@ func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object st
return m, err
}
var readRawFileInfoErrs = append(objectOpIgnoredErrs,
errFileNotFound,
errFileNameTooLong,
errVolumeNotFound,
errFileVersionNotFound,
io.ErrUnexpectedEOF, // some times we would read without locks, ignore these errors
io.EOF, // some times we would read without locks, ignore these errors
msgp.ErrShortBytes,
context.DeadlineExceeded,
context.Canceled,
)
func readRawFileInfo(ctx context.Context, disk StorageAPI, bucket, object string, readData bool) (RawFileInfo, error) {
rf, err := disk.ReadXL(ctx, bucket, object, readData)
if err != nil && !IsErr(err, readRawFileInfoErrs...) {
logger.LogOnceIf(ctx, fmt.Errorf("Drive %s, path (%s/%s) returned an error (%w)",
disk.String(), bucket, object, err),
disk.String())
}
return rf, err
}
func fileInfoFromRaw(ri RawFileInfo, bucket, object string, readData, inclFreeVers, allParts bool) (FileInfo, error) {
var xl xlMetaV2
if err := xl.LoadOrConvert(ri.Buf); err != nil {
@ -611,7 +587,7 @@ func readAllRawFileInfo(ctx context.Context, disks []StorageAPI, bucket, object
if disks[index] == nil {
return errDiskNotFound
}
rf, err := readRawFileInfo(ctx, disks[index], bucket, object, readData)
rf, err := disks[index].ReadXL(ctx, bucket, object, readData)
if err != nil {
return err
}
@ -791,10 +767,10 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
if opts.VersionID != "" {
// Read a specific version ID
fi, err = readFileInfo(ctx, disk, "", bucket, object, opts.VersionID, ropts)
fi, err = disk.ReadVersion(ctx, "", bucket, object, opts.VersionID, ropts)
} else {
// Read the latest version
rfi, err = readRawFileInfo(ctx, disk, bucket, object, readData)
rfi, err = disk.ReadXL(ctx, bucket, object, readData)
if err == nil {
fi, err = fileInfoFromRaw(rfi, bucket, object, readData, opts.InclFreeVersions, true)
}
@ -829,7 +805,11 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
var missingBlocks int
for i := range errs {
if errors.Is(errs[i], errFileNotFound) {
if IsErr(errs[i],
errFileNotFound,
errFileVersionNotFound,
errFileCorrupt,
) {
missingBlocks++
}
}

View File

@ -25,7 +25,6 @@ import (
"strings"
"github.com/klauspost/reedsolomon"
"github.com/minio/minio/internal/logger"
)
// getDataBlockLen - get length of data blocks from encoded blocks.
@ -43,19 +42,16 @@ func getDataBlockLen(enBlocks [][]byte, dataBlocks int) int {
func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, dataBlocks int, offset int64, length int64) (int64, error) {
// Offset and out size cannot be negative.
if offset < 0 || length < 0 {
logger.LogIf(ctx, errUnexpected)
return 0, errUnexpected
}
// Do we have enough blocks?
if len(enBlocks) < dataBlocks {
logger.LogIf(ctx, fmt.Errorf("diskBlocks(%d)/dataBlocks(%d) - %w", len(enBlocks), dataBlocks, reedsolomon.ErrTooFewShards))
return 0, reedsolomon.ErrTooFewShards
}
// Do we have enough data?
if int64(getDataBlockLen(enBlocks, dataBlocks)) < length {
logger.LogIf(ctx, fmt.Errorf("getDataBlockLen(enBlocks, dataBlocks)(%d)/length(%d) - %w", getDataBlockLen(enBlocks, dataBlocks), length, reedsolomon.ErrShortData))
return 0, reedsolomon.ErrShortData
}
@ -85,11 +81,6 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
if write < int64(len(block)) {
n, err := dst.Write(block[:write])
if err != nil {
// The writer will be closed in case of range queries, which will emit ErrClosedPipe.
// The reader pipe might be closed at ListObjects io.EOF ignore it.
if err != io.ErrClosedPipe && err != io.EOF {
logger.LogIf(ctx, err)
}
return 0, err
}
totalWritten += int64(n)
@ -99,11 +90,6 @@ func writeDataBlocks(ctx context.Context, dst io.Writer, enBlocks [][]byte, data
// Copy the block.
n, err := dst.Write(block)
if err != nil {
// The writer will be closed in case of range queries, which will emit ErrClosedPipe.
// The reader pipe might be closed at ListObjects io.EOF ignore it.
if err != io.ErrClosedPipe && err != io.EOF {
logger.LogIf(ctx, err)
}
return 0, err
}

View File

@ -497,7 +497,6 @@ func (er erasureObjects) nsScanner(ctx context.Context, buckets []BucketInfo, wa
cache.Info.SkipHealing = healing
cache.Info.NextCycle = wantCycle
if cache.Info.Name != bucket.Name {
logger.LogIf(ctx, fmt.Errorf("cache name mismatch: %s != %s", cache.Info.Name, bucket.Name))
cache.Info = dataUsageCacheInfo{
Name: bucket.Name,
LastUpdate: time.Time{},