add ruleguard support, fix all the reported issues (#10335)

This commit is contained in:
Harshavardhana 2020-08-24 12:11:20 -07:00 committed by GitHub
parent bc2ebe0021
commit caad314faa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
46 changed files with 803 additions and 128 deletions

View File

@ -17,11 +17,12 @@ checks:
getdeps:
@mkdir -p ${GOPATH}/bin
@which golangci-lint 1>/dev/null || (echo "Installing golangci-lint" && curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(GOPATH)/bin v1.27.0)
@which ruleguard 1>/dev/null || (echo "Installing ruleguard" && GO111MODULE=off go get github.com/quasilyte/go-ruleguard/...)
crosscompile:
@(env bash $(PWD)/buildscripts/cross-compile.sh)
verifiers: getdeps fmt lint
verifiers: getdeps fmt lint ruleguard
fmt:
@echo "Running $@ check"
@ -33,6 +34,10 @@ lint:
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
ruleguard:
@echo "Running $@ check"
@ruleguard -rules ruleguard.rules.go github.com/minio/minio/...
# Builds minio, runs the verifiers then runs the tests.
check: test
test: verifiers build

View File

@ -328,7 +328,7 @@ func (d *dataUsageCache) bucketsUsageInfo(buckets []BucketInfo) map[string]Bucke
flat := d.flatten(*e)
dst[bucket.Name] = BucketUsageInfo{
Size: uint64(flat.Size),
ObjectsCount: uint64(flat.Objects),
ObjectsCount: flat.Objects,
ObjectSizesHistogram: flat.ObjSizes.toMap(),
}
}
@ -345,7 +345,7 @@ func (d *dataUsageCache) bucketUsageInfo(bucket string) BucketUsageInfo {
flat := d.flatten(*e)
return BucketUsageInfo{
Size: uint64(flat.Size),
ObjectsCount: uint64(flat.Objects),
ObjectsCount: flat.Objects,
ObjectSizesHistogram: flat.ObjSizes.toMap(),
}
}

View File

@ -46,7 +46,7 @@ const (
cacheMetaJSONFile = "cache.json"
cacheDataFile = "part.1"
cacheMetaVersion = "1.0.0"
cacheExpiryDays = time.Duration(90 * time.Hour * 24) // defaults to 90 days
cacheExpiryDays = 90 * time.Hour * 24 // defaults to 90 days
// SSECacheEncrypted is the metadata key indicating that the object
// is a cache entry encrypted with cache KMS master key in globalCacheKMS.
SSECacheEncrypted = "X-Minio-Internal-Encrypted-Cache"

View File

@ -102,7 +102,7 @@ func (c *cacheControl) isStale(modTime time.Time) bool {
func cacheControlOpts(o ObjectInfo) *cacheControl {
c := cacheControl{}
m := o.UserDefined
if o.Expires != timeSentinel {
if !o.Expires.Equal(timeSentinel) {
c.expiry = o.Expires
}

View File

@ -45,7 +45,7 @@ func TestGetCacheControlOpts(t *testing.T) {
t.Run("", func(t *testing.T) {
m := make(map[string]string)
m["cache-control"] = testCase.cacheControlHeaderVal
if testCase.expiryHeaderVal != timeSentinel {
if !testCase.expiryHeaderVal.Equal(timeSentinel) {
m["expires"] = testCase.expiryHeaderVal.String()
}
c := cacheControlOpts(ObjectInfo{UserDefined: m, Expires: testCase.expiryHeaderVal})

View File

@ -172,7 +172,7 @@ func getMetadata(objInfo ObjectInfo) map[string]string {
if objInfo.ContentEncoding != "" {
metadata["content-encoding"] = objInfo.ContentEncoding
}
if objInfo.Expires != timeSentinel {
if !objInfo.Expires.Equal(timeSentinel) {
metadata["expires"] = objInfo.Expires.Format(http.TimeFormat)
}
for k, v := range objInfo.UserDefined {

View File

@ -27,7 +27,7 @@ func TestCacheMetadataObjInfo(t *testing.T) {
if objInfo.Size != 0 {
t.Fatal("Unexpected object info value for Size", objInfo.Size)
}
if objInfo.ModTime != timeSentinel {
if !objInfo.ModTime.Equal(timeSentinel) {
t.Fatal("Unexpected object info value for ModTime ", objInfo.ModTime)
}
if objInfo.IsDir {

View File

@ -26,7 +26,7 @@ const (
dynamicTimeoutIncreaseThresholdPct = 0.33 // Upper threshold for failures in order to increase timeout
dynamicTimeoutDecreaseThresholdPct = 0.10 // Lower threshold for failures in order to decrease timeout
dynamicTimeoutLogSize = 16
maxDuration = time.Duration(1<<63 - 1)
maxDuration = 1<<63 - 1
)
// timeouts that are dynamically adapted based on actual usage results
@ -110,7 +110,7 @@ func (dt *dynamicTimeout) adjust(entries [dynamicTimeoutLogSize]time.Duration) {
// We are hitting the timeout relatively few times, so decrease the timeout
average = average * 125 / 100 // Add buffer of 25% on top of average
timeout := (atomic.LoadInt64(&dt.timeout) + int64(average)) / 2 // Middle between current timeout and average success
timeout := (atomic.LoadInt64(&dt.timeout) + average) / 2 // Middle between current timeout and average success
if timeout < dt.minimum {
timeout = dt.minimum
}

View File

@ -666,7 +666,7 @@ func (o *ObjectInfo) GetDecryptedRange(rs *HTTPRangeSpec) (encOff, encLength, sk
if rs == nil {
// No range, so offsets refer to the whole object.
return 0, int64(o.Size), 0, 0, 0, nil
return 0, o.Size, 0, 0, 0, nil
}
// Assemble slice of (decrypted) part sizes in `sizes`

View File

@ -373,7 +373,7 @@ func TestGetDecryptedRange(t *testing.T) {
sum := int64(0)
for i, s := range sizes {
r[i].Number = i
r[i].Size = int64(getEncSize(s))
r[i].Size = getEncSize(s)
sum += r[i].Size
}
return ObjectInfo{

View File

@ -125,7 +125,7 @@ func (e *Erasure) ShardFileSize(totalLength int64) int64 {
return -1
}
numShards := totalLength / e.blockSize
lastBlockSize := totalLength % int64(e.blockSize)
lastBlockSize := totalLength % e.blockSize
lastShardSize := ceilFrac(lastBlockSize, int64(e.dataBlocks))
return numShards*e.ShardSize() + lastShardSize
}
@ -134,7 +134,7 @@ func (e *Erasure) ShardFileSize(totalLength int64) int64 {
func (e *Erasure) ShardFileOffset(startOffset, length, totalLength int64) int64 {
shardSize := e.ShardSize()
shardFileSize := e.ShardFileSize(totalLength)
endShard := (startOffset + int64(length)) / e.blockSize
endShard := (startOffset + length) / e.blockSize
tillOffset := endShard*shardSize + shardSize
if tillOffset > shardFileSize {
tillOffset = shardFileSize

View File

@ -83,7 +83,7 @@ func TestCommonTime(t *testing.T) {
for i, testCase := range testCases {
// Obtain a common mod time from modTimes slice.
ctime, _ := commonTime(testCase.times)
if testCase.time != ctime {
if !testCase.time.Equal(ctime) {
t.Fatalf("Test case %d, expect to pass but failed. Wanted modTime: %s, got modTime: %s\n", i+1, testCase.time, ctime)
}
}

View File

@ -365,7 +365,7 @@ func migrateCacheData(ctx context.Context, c *diskCache, bucket, object, oldfile
}
actualSize, _ = sio.EncryptedSize(uint64(st.Size()))
}
_, err = c.bitrotWriteToCache(destDir, cacheDataFile, reader, uint64(actualSize))
_, err = c.bitrotWriteToCache(destDir, cacheDataFile, reader, actualSize)
return err
}

View File

@ -478,7 +478,7 @@ func TestGCSAttrsToObjectInfo(t *testing.T) {
if objInfo.Bucket != attrs.Bucket {
t.Fatalf("Test failed with Bucket mistmatch, expected %s, got %s", attrs.Bucket, objInfo.Bucket)
}
if objInfo.ModTime != attrs.Updated {
if !objInfo.ModTime.Equal(attrs.Updated) {
t.Fatalf("Test failed with ModTime mistmatch, expected %s, got %s", attrs.Updated, objInfo.ModTime)
}
if objInfo.Size != attrs.Size {

View File

@ -36,22 +36,22 @@ func setInternalTCPParameters(c syscall.RawConn) error {
// TCPFastOpenConnect sets the underlying socket to use
// the TCP fast open connect. This feature is supported
// since Linux 4.11.
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_FASTOPEN_CONNECT, 1)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_FASTOPEN_CONNECT, 1)
// The time (in seconds) the connection needs to remain idle before
// TCP starts sending keepalive probes, set this to 5 secs
// system defaults to 7200 secs!!!
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, 5)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, 5)
// Number of probes.
// ~ cat /proc/sys/net/ipv4/tcp_keepalive_probes (defaults to 9, we reduce it to 5)
// 9
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, 5)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, 5)
// Wait time after successful probe in seconds.
// ~ cat /proc/sys/net/ipv4/tcp_keepalive_intvl (defaults to 75 secs, we reduce it to 3 secs)
// 75
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, 3)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, 3)
})
}
@ -85,7 +85,7 @@ func NewCustomDialContext(dialTimeout time.Duration) DialContext {
// TCPFastOpenConnect sets the underlying socket to use
// the TCP fast open connect. This feature is supported
// since Linux 4.11.
_ = syscall.SetsockoptInt(int(fd), syscall.IPPROTO_TCP, unix.TCP_FASTOPEN_CONNECT, 1)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_FASTOPEN_CONNECT, 1)
})
},
}

View File

@ -106,7 +106,7 @@ func healingMetricsPrometheus(ch chan<- prometheus.Metric) {
}
healMetricsNamespace := "self_heal"
dur := time.Duration(-1)
var dur time.Duration
if !bgSeq.lastHealActivity.IsZero() {
dur = time.Since(bgSeq.lastHealActivity)
}

View File

@ -280,22 +280,21 @@ func TestExtractHostPort(t *testing.T) {
for i, testCase := range testCases {
host, port, err := extractHostPort(testCase.addr)
if testCase.expectedErr == nil {
if err != nil {
t.Fatalf("Test %d: should succeed but failed with err: %v", i+1, err)
}
if testCase.expectedErr == nil && err != nil {
t.Fatalf("Test %d: should succeed but failed with err: %v", i+1, err)
}
if testCase.expectedErr != nil && err == nil {
t.Fatalf("Test %d:, should fail but succeeded.", i+1)
}
if err == nil {
if host != testCase.host {
t.Fatalf("Test %d: expected: %v, found: %v", i+1, testCase.host, host)
}
if port != testCase.port {
t.Fatalf("Test %d: expected: %v, found: %v", i+1, testCase.port, port)
}
}
if testCase.expectedErr != nil {
if err == nil {
t.Fatalf("Test %d:, should fail but succeeded.", i+1)
}
if testCase.expectedErr != nil && err != nil {
if testCase.expectedErr.Error() != err.Error() {
t.Fatalf("Test %d: failed with different error, expected: '%v', found:'%v'.", i+1, testCase.expectedErr, err)
}

View File

@ -22,5 +22,5 @@ package cmd
import "syscall"
func direntInode(dirent *syscall.Dirent) uint64 {
return uint64(dirent.Ino)
return dirent.Ino
}

View File

@ -217,14 +217,14 @@ func (client *peerRESTClient) doNetOBDTest(ctx context.Context, dataSize int64,
finish()
end := time.Now()
latency := float64(end.Sub(start).Seconds())
latency := end.Sub(start).Seconds()
if latency > maxLatencyForSizeThreads(dataSize, threadCount) {
slowSample()
}
/* Throughput = (total data transferred across all threads / time taken) */
throughput := float64(float64((after - before)) / latency)
throughput := float64((after - before)) / latency
latencies = append(latencies, latency)
throughputs = append(throughputs, throughput)
@ -272,7 +272,7 @@ func maxLatencyForSizeThreads(size int64, threadCount uint) float64 {
// 10 Gbit | 2s
// 1 Gbit | inf
throughput := float64(int64(size) * int64(threadCount))
throughput := float64(size * int64(threadCount))
if throughput >= Gbit100 {
return 2.0
} else if throughput >= Gbit40 {

View File

@ -301,8 +301,8 @@ func printStorageInfo(storageInfo StorageInfo) {
func printCacheStorageInfo(storageInfo CacheStorageInfo) {
msg := fmt.Sprintf("%s %s Free, %s Total", color.Blue("Cache Capacity:"),
humanize.IBytes(uint64(storageInfo.Free)),
humanize.IBytes(uint64(storageInfo.Total)))
humanize.IBytes(storageInfo.Free),
humanize.IBytes(storageInfo.Total))
logStartupMessage(msg)
}

View File

@ -62,7 +62,7 @@ func validateCredentialfields(t *testing.T, testNum int, expectedCredentials cre
if expectedCredentials.accessKey != actualCredential.accessKey {
t.Errorf("Test %d: AccessKey mismatch: Expected \"%s\", got \"%s\"", testNum, expectedCredentials.accessKey, actualCredential.accessKey)
}
if expectedCredentials.scope.date != actualCredential.scope.date {
if !expectedCredentials.scope.date.Equal(actualCredential.scope.date) {
t.Errorf("Test %d: Date mismatch:Expected \"%s\", got \"%s\"", testNum, expectedCredentials.scope.date, actualCredential.scope.date)
}
if expectedCredentials.scope.region != actualCredential.scope.region {

View File

@ -184,7 +184,7 @@ func TestParseHexUint(t *testing.T) {
for _, tt := range tests {
got, err := parseHexUint([]byte(tt.in))
if tt.wantErr != "" {
if !strings.Contains(fmt.Sprint(err), tt.wantErr) {
if err != nil && !strings.Contains(err.Error(), tt.wantErr) {
t.Errorf("parseHexUint(%q) = %v, %v; want error %q", tt.in, got, err, tt.wantErr)
}
} else {

View File

@ -19,7 +19,6 @@ package cmd
import (
"context"
"encoding/xml"
"fmt"
"net/http"
xhttp "github.com/minio/minio/cmd/http"
@ -46,7 +45,7 @@ func writeSTSErrorResponse(ctx context.Context, w http.ResponseWriter, isErrCode
stsErrorResponse.RequestID = w.Header().Get(xhttp.AmzRequestID)
stsErrorResponse.Error.Message = err.Description
if errCtxt != nil {
stsErrorResponse.Error.Message = fmt.Sprintf("%v", errCtxt)
stsErrorResponse.Error.Message = errCtxt.Error()
}
var logKind logger.Kind
switch errCode {

View File

@ -158,8 +158,8 @@ func calculateStreamContentLength(dataLen, chunkSize int64) int64 {
if dataLen <= 0 {
return 0
}
chunksCount := int64(dataLen / chunkSize)
remainingBytes := int64(dataLen % chunkSize)
chunksCount := dataLen / chunkSize
remainingBytes := dataLen % chunkSize
var streamLen int64
streamLen += chunksCount * calculateSignedChunkLength(chunkSize)
if remainingBytes > 0 {

View File

@ -75,7 +75,7 @@ func TestReleaseTagToNFromTimeConversion(t *testing.T) {
if err != nil && err.Error() != testCase.errStr {
t.Errorf("Test %d: Expected %v but got %v", i+1, testCase.errStr, err.Error())
}
if err == nil && tagTime != testCase.t {
if err == nil && !tagTime.Equal(testCase.t) {
t.Errorf("Test %d: Expected %v but got %v", i+1, testCase.t, tagTime)
}
}

View File

@ -380,10 +380,10 @@ func (j xlMetaV2Object) ToFileInfo(volume, path string) (FileInfo, error) {
}
fi.Parts = make([]ObjectPartInfo, len(j.PartNumbers))
for i := range fi.Parts {
fi.Parts[i].Number = int(j.PartNumbers[i])
fi.Parts[i].Size = int64(j.PartSizes[i])
fi.Parts[i].Number = j.PartNumbers[i]
fi.Parts[i].Size = j.PartSizes[i]
fi.Parts[i].ETag = j.PartETags[i]
fi.Parts[i].ActualSize = int64(j.PartActualSizes[i])
fi.Parts[i].ActualSize = j.PartActualSizes[i]
}
fi.Erasure.Checksums = make([]ChecksumInfo, len(j.PartSizes))
for i := range fi.Parts {

2
go.sum
View File

@ -35,6 +35,7 @@ github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmV
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
github.com/bcicen/jstream v0.0.0-20190220045926-16c1f8af81c2 h1:M+TYzBcNIRyzPRg66ndEqUMd7oWDmhvdQmaPC6EZNwM=
github.com/bcicen/jstream v0.0.0-20190220045926-16c1f8af81c2/go.mod h1:RDu/qcrnpEdJC/p8tx34+YBFqqX71lB7dOX9QE+ZC4M=
github.com/beevik/ntp v0.2.0 h1:sGsd+kAXzT0bfVfzJfce04g+dSRfrs+tbQW8lweuYgw=
@ -219,6 +220,7 @@ github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANyt
github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=

View File

@ -241,7 +241,7 @@ func TestExpectedExpiryTime(t *testing.T) {
for i, tc := range testCases {
t.Run(fmt.Sprintf("Test %d", i+1), func(t *testing.T) {
got := expectedExpiryTime(tc.modTime, tc.days)
if got != tc.expected {
if !got.Equal(tc.expected) {
t.Fatalf("Expected %v to be equal to %v", got, tc.expected)
}
})

View File

@ -84,7 +84,7 @@ func GetOBDInfo(ctx context.Context, drive, fsPath string) (Latency, Throughput,
return Latency{}, Throughput{}, fmt.Errorf("Expected to write %d, but only wrote %d", blockSize, n)
}
latencyInSecs := time.Since(startTime).Seconds()
latencies[i] = float64(latencyInSecs)
latencies[i] = latencyInSecs
}
// Sync every full writes fdatasync

View File

@ -1,4 +1,4 @@
// +build linux
// +build linux,!s390x,!arm,!386
/*
* MinIO Cloud Storage, (C) 2015, 2016, 2017 MinIO, Inc.
@ -30,13 +30,13 @@ func GetInfo(path string) (info Info, err error) {
if err != nil {
return Info{}, err
}
reservedBlocks := uint64(s.Bfree) - uint64(s.Bavail)
reservedBlocks := s.Bfree - s.Bavail
info = Info{
Total: uint64(s.Frsize) * (uint64(s.Blocks) - reservedBlocks),
Free: uint64(s.Frsize) * uint64(s.Bavail),
Files: uint64(s.Files),
Ffree: uint64(s.Ffree),
FSType: getFSType(int64(s.Type)),
Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks),
Free: uint64(s.Frsize) * s.Bavail,
Files: s.Files,
Ffree: s.Ffree,
FSType: getFSType(s.Type),
}
// Check for overflows.
// https://github.com/minio/minio/issues/8035

View File

@ -0,0 +1,80 @@
// +build linux,arm linux,386
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
import (
"fmt"
"strconv"
"syscall"
)
// fsType2StringMap - list of filesystems supported on linux
var fsType2StringMap = map[string]string{
"1021994": "TMPFS",
"137d": "EXT",
"4244": "HFS",
"4d44": "MSDOS",
"52654973": "REISERFS",
"5346544e": "NTFS",
"58465342": "XFS",
"61756673": "AUFS",
"6969": "NFS",
"ef51": "EXT2OLD",
"ef53": "EXT4",
"f15f": "ecryptfs",
"794c7630": "overlayfs",
"2fc12fc1": "zfs",
"ff534d42": "cifs",
"53464846": "wslfs",
}
// getFSType returns the filesystem type of the underlying mounted filesystem
func getFSType(ftype int32) string {
fsTypeHex := strconv.FormatInt(int64(ftype), 16)
fsTypeString, ok := fsType2StringMap[fsTypeHex]
if !ok {
return "UNKNOWN"
}
return fsTypeString
}
// GetInfo returns total and free bytes available in a directory, e.g. `/`.
func GetInfo(path string) (info Info, err error) {
s := syscall.Statfs_t{}
err = syscall.Statfs(path, &s)
if err != nil {
return Info{}, err
}
reservedBlocks := s.Bfree - s.Bavail
info = Info{
Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks),
Free: uint64(s.Frsize) * s.Bavail,
Files: s.Files,
Ffree: s.Ffree,
FSType: getFSType(s.Type),
}
// Check for overflows.
// https://github.com/minio/minio/issues/8035
// XFS can show wrong values at times error out
// in such scenarios.
if info.Free > info.Total {
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
}
return info, nil
}

View File

@ -0,0 +1,80 @@
// +build linux,s390x
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package disk
import (
"fmt"
"strconv"
"syscall"
)
// fsType2StringMap - list of filesystems supported on linux
var fsType2StringMap = map[string]string{
"1021994": "TMPFS",
"137d": "EXT",
"4244": "HFS",
"4d44": "MSDOS",
"52654973": "REISERFS",
"5346544e": "NTFS",
"58465342": "XFS",
"61756673": "AUFS",
"6969": "NFS",
"ef51": "EXT2OLD",
"ef53": "EXT4",
"f15f": "ecryptfs",
"794c7630": "overlayfs",
"2fc12fc1": "zfs",
"ff534d42": "cifs",
"53464846": "wslfs",
}
// getFSType returns the filesystem type of the underlying mounted filesystem
func getFSType(ftype uint32) string {
fsTypeHex := strconv.FormatUint(uint64(ftype), 16)
fsTypeString, ok := fsType2StringMap[fsTypeHex]
if !ok {
return "UNKNOWN"
}
return fsTypeString
}
// GetInfo returns total and free bytes available in a directory, e.g. `/`.
func GetInfo(path string) (info Info, err error) {
s := syscall.Statfs_t{}
err = syscall.Statfs(path, &s)
if err != nil {
return Info{}, err
}
reservedBlocks := s.Bfree - s.Bavail
info = Info{
Total: uint64(s.Frsize) * (s.Blocks - reservedBlocks),
Free: uint64(s.Frsize) * s.Bavail,
Files: s.Files,
Ffree: s.Ffree,
FSType: getFSType(s.Type),
}
// Check for overflows.
// https://github.com/minio/minio/issues/8035
// XFS can show wrong values at times error out
// in such scenarios.
if info.Free > info.Total {
return info, fmt.Errorf("detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'", info.Free, info.Total, path)
}
return info, nil
}

View File

@ -1,4 +1,4 @@
// +build linux
// +build linux,!s390x,!arm,!386
/*
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.

View File

@ -45,7 +45,7 @@ func log(format string, data ...interface{}) {
// DRWMutexAcquireTimeout - tolerance limit to wait for lock acquisition before.
const DRWMutexAcquireTimeout = 1 * time.Second // 1 second.
const drwMutexInfinite = time.Duration(1<<63 - 1)
const drwMutexInfinite = 1<<63 - 1
// A DRWMutex is a distributed mutual exclusion lock.
type DRWMutex struct {

View File

@ -46,7 +46,7 @@ func NewLRWMutex() *LRWMutex {
func (lm *LRWMutex) Lock() {
const isWriteLock = true
lm.lockLoop(context.Background(), lm.id, lm.source, time.Duration(math.MaxInt64), isWriteLock)
lm.lockLoop(context.Background(), lm.id, lm.source, math.MaxInt64, isWriteLock)
}
// GetLock tries to get a write lock on lm before the timeout occurs.
@ -63,7 +63,7 @@ func (lm *LRWMutex) GetLock(ctx context.Context, id string, source string, timeo
func (lm *LRWMutex) RLock() {
const isWriteLock = false
lm.lockLoop(context.Background(), lm.id, lm.source, time.Duration(1<<63-1), isWriteLock)
lm.lockLoop(context.Background(), lm.id, lm.source, 1<<63-1, isWriteLock)
}
// GetRLock tries to get a read lock on lm before the timeout occurs.

View File

@ -81,11 +81,11 @@ func (adm AdminClient) newRetryTimer(ctx context.Context, maxRetry int, unit tim
}
//sleep = random_between(0, min(cap, base * 2 ** attempt))
sleep := unit * time.Duration(1<<uint(attempt))
sleep := unit * 1 << uint(attempt)
if sleep > cap {
sleep = cap
}
if jitter != NoJitter {
if jitter > NoJitter {
sleep -= time.Duration(adm.random.Float64() * float64(sleep) * jitter)
}
return sleep

View File

@ -76,11 +76,11 @@ func NewTimerWithJitter(ctx context.Context, unit time.Duration, cap time.Durati
attempt = maxAttempt
}
//sleep = random_between(0, min(cap, base * 2 ** attempt))
sleep := unit * time.Duration(1<<uint(attempt))
sleep := unit * 1 << uint(attempt)
if sleep > cap {
sleep = cap
}
if jitter != NoJitter {
if jitter > NoJitter {
sleep -= time.Duration(rand.Float64() * float64(sleep) * jitter)
}
return sleep

View File

@ -186,11 +186,11 @@ func populate(columnDataMap map[string]*Column, input *jsonValue, tree *schema.T
}
var jsonData []byte
if jsonData, rerr = sjson.SetBytes([]byte{}, "key", key.Value()); err != nil {
if jsonData, rerr = sjson.SetBytes([]byte{}, "key", key.Value()); rerr != nil {
return false
}
if jsonData, rerr = sjson.SetBytes(jsonData, "value", value.Value()); err != nil {
if jsonData, rerr = sjson.SetBytes(jsonData, "value", value.Value()); rerr != nil {
return false
}
@ -199,7 +199,7 @@ func populate(columnDataMap map[string]*Column, input *jsonValue, tree *schema.T
return false
}
if columnDataMap, rerr = populate(columnDataMap, jv, keyValueElement.Children, firstValueRL); err != nil {
if columnDataMap, rerr = populate(columnDataMap, jv, keyValueElement.Children, firstValueRL); rerr != nil {
return false
}

View File

@ -922,7 +922,7 @@ func (p *Statistics) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("null_count", thrift.I64, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:null_count: ", p), err)
}
if err := oprot.WriteI64(int64(*p.NullCount)); err != nil {
if err := oprot.WriteI64(*p.NullCount); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.null_count (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -937,7 +937,7 @@ func (p *Statistics) writeField4(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("distinct_count", thrift.I64, 4); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:distinct_count: ", p), err)
}
if err := oprot.WriteI64(int64(*p.DistinctCount)); err != nil {
if err := oprot.WriteI64(*p.DistinctCount); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.distinct_count (4) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -1492,7 +1492,7 @@ func (p *DecimalType) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("scale", thrift.I32, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:scale: ", p), err)
}
if err := oprot.WriteI32(int32(p.Scale)); err != nil {
if err := oprot.WriteI32(p.Scale); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.scale (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -1505,7 +1505,7 @@ func (p *DecimalType) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("precision", thrift.I32, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:precision: ", p), err)
}
if err := oprot.WriteI32(int32(p.Precision)); err != nil {
if err := oprot.WriteI32(p.Precision); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.precision (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -2020,7 +2020,7 @@ func (p *TimestampType) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("isAdjustedToUTC", thrift.BOOL, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:isAdjustedToUTC: ", p), err)
}
if err := oprot.WriteBool(bool(p.IsAdjustedToUTC)); err != nil {
if err := oprot.WriteBool(p.IsAdjustedToUTC); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.isAdjustedToUTC (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -2171,7 +2171,7 @@ func (p *TimeType) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("isAdjustedToUTC", thrift.BOOL, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:isAdjustedToUTC: ", p), err)
}
if err := oprot.WriteBool(bool(p.IsAdjustedToUTC)); err != nil {
if err := oprot.WriteBool(p.IsAdjustedToUTC); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.isAdjustedToUTC (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -2277,7 +2277,7 @@ func (p *IntType) ReadField1(iprot thrift.TProtocol) error {
if v, err := iprot.ReadByte(); err != nil {
return thrift.PrependError("error reading field 1: ", err)
} else {
temp := int8(v)
temp := v
p.BitWidth = temp
}
return nil
@ -2317,7 +2317,7 @@ func (p *IntType) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("bitWidth", thrift.BYTE, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:bitWidth: ", p), err)
}
if err := oprot.WriteByte(int8(p.BitWidth)); err != nil {
if err := oprot.WriteByte(p.BitWidth); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.bitWidth (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -2330,7 +2330,7 @@ func (p *IntType) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("isSigned", thrift.BOOL, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:isSigned: ", p), err)
}
if err := oprot.WriteBool(bool(p.IsSigned)); err != nil {
if err := oprot.WriteBool(p.IsSigned); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.isSigned (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -3558,7 +3558,7 @@ func (p *SchemaElement) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("type_length", thrift.I32, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:type_length: ", p), err)
}
if err := oprot.WriteI32(int32(*p.TypeLength)); err != nil {
if err := oprot.WriteI32(*p.TypeLength); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.type_length (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -3601,7 +3601,7 @@ func (p *SchemaElement) writeField5(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_children", thrift.I32, 5); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:num_children: ", p), err)
}
if err := oprot.WriteI32(int32(*p.NumChildren)); err != nil {
if err := oprot.WriteI32(*p.NumChildren); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_children (5) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -3631,7 +3631,7 @@ func (p *SchemaElement) writeField7(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("scale", thrift.I32, 7); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:scale: ", p), err)
}
if err := oprot.WriteI32(int32(*p.Scale)); err != nil {
if err := oprot.WriteI32(*p.Scale); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.scale (7) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -3646,7 +3646,7 @@ func (p *SchemaElement) writeField8(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("precision", thrift.I32, 8); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:precision: ", p), err)
}
if err := oprot.WriteI32(int32(*p.Precision)); err != nil {
if err := oprot.WriteI32(*p.Precision); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.precision (8) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -3661,7 +3661,7 @@ func (p *SchemaElement) writeField9(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("field_id", thrift.I32, 9); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:field_id: ", p), err)
}
if err := oprot.WriteI32(int32(*p.FieldID)); err != nil {
if err := oprot.WriteI32(*p.FieldID); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.field_id (9) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -3892,7 +3892,7 @@ func (p *DataPageHeader) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_values", thrift.I32, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err)
}
if err := oprot.WriteI32(int32(p.NumValues)); err != nil {
if err := oprot.WriteI32(p.NumValues); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4161,7 +4161,7 @@ func (p *DictionaryPageHeader) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_values", thrift.I32, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err)
}
if err := oprot.WriteI32(int32(p.NumValues)); err != nil {
if err := oprot.WriteI32(p.NumValues); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4188,7 +4188,7 @@ func (p *DictionaryPageHeader) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("is_sorted", thrift.BOOL, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:is_sorted: ", p), err)
}
if err := oprot.WriteBool(bool(*p.IsSorted)); err != nil {
if err := oprot.WriteBool(*p.IsSorted); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.is_sorted (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4494,7 +4494,7 @@ func (p *DataPageHeaderV2) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_values", thrift.I32, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:num_values: ", p), err)
}
if err := oprot.WriteI32(int32(p.NumValues)); err != nil {
if err := oprot.WriteI32(p.NumValues); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_values (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4507,7 +4507,7 @@ func (p *DataPageHeaderV2) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_nulls", thrift.I32, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:num_nulls: ", p), err)
}
if err := oprot.WriteI32(int32(p.NumNulls)); err != nil {
if err := oprot.WriteI32(p.NumNulls); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_nulls (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4520,7 +4520,7 @@ func (p *DataPageHeaderV2) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_rows", thrift.I32, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err)
}
if err := oprot.WriteI32(int32(p.NumRows)); err != nil {
if err := oprot.WriteI32(p.NumRows); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4546,7 +4546,7 @@ func (p *DataPageHeaderV2) writeField5(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("definition_levels_byte_length", thrift.I32, 5); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:definition_levels_byte_length: ", p), err)
}
if err := oprot.WriteI32(int32(p.DefinitionLevelsByteLength)); err != nil {
if err := oprot.WriteI32(p.DefinitionLevelsByteLength); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.definition_levels_byte_length (5) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4559,7 +4559,7 @@ func (p *DataPageHeaderV2) writeField6(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("repetition_levels_byte_length", thrift.I32, 6); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:repetition_levels_byte_length: ", p), err)
}
if err := oprot.WriteI32(int32(p.RepetitionLevelsByteLength)); err != nil {
if err := oprot.WriteI32(p.RepetitionLevelsByteLength); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.repetition_levels_byte_length (6) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4573,7 +4573,7 @@ func (p *DataPageHeaderV2) writeField7(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("is_compressed", thrift.BOOL, 7); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:is_compressed: ", p), err)
}
if err := oprot.WriteBool(bool(p.IsCompressed)); err != nil {
if err := oprot.WriteBool(p.IsCompressed); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.is_compressed (7) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4911,7 +4911,7 @@ func (p *PageHeader) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("uncompressed_page_size", thrift.I32, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:uncompressed_page_size: ", p), err)
}
if err := oprot.WriteI32(int32(p.UncompressedPageSize)); err != nil {
if err := oprot.WriteI32(p.UncompressedPageSize); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.uncompressed_page_size (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4924,7 +4924,7 @@ func (p *PageHeader) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("compressed_page_size", thrift.I32, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:compressed_page_size: ", p), err)
}
if err := oprot.WriteI32(int32(p.CompressedPageSize)); err != nil {
if err := oprot.WriteI32(p.CompressedPageSize); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.compressed_page_size (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -4938,7 +4938,7 @@ func (p *PageHeader) writeField4(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("crc", thrift.I32, 4); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:crc: ", p), err)
}
if err := oprot.WriteI32(int32(*p.Crc)); err != nil {
if err := oprot.WriteI32(*p.Crc); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.crc (4) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -5302,7 +5302,7 @@ func (p *SortingColumn) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("column_idx", thrift.I32, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:column_idx: ", p), err)
}
if err := oprot.WriteI32(int32(p.ColumnIdx)); err != nil {
if err := oprot.WriteI32(p.ColumnIdx); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.column_idx (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -5315,7 +5315,7 @@ func (p *SortingColumn) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("descending", thrift.BOOL, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:descending: ", p), err)
}
if err := oprot.WriteBool(bool(p.Descending)); err != nil {
if err := oprot.WriteBool(p.Descending); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.descending (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -5328,7 +5328,7 @@ func (p *SortingColumn) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("nulls_first", thrift.BOOL, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:nulls_first: ", p), err)
}
if err := oprot.WriteBool(bool(p.NullsFirst)); err != nil {
if err := oprot.WriteBool(p.NullsFirst); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.nulls_first (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -5511,7 +5511,7 @@ func (p *PageEncodingStats) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("count", thrift.I32, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:count: ", p), err)
}
if err := oprot.WriteI32(int32(p.Count)); err != nil {
if err := oprot.WriteI32(p.Count); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.count (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6071,7 +6071,7 @@ func (p *ColumnMetaData) writeField5(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_values", thrift.I64, 5); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:num_values: ", p), err)
}
if err := oprot.WriteI64(int64(p.NumValues)); err != nil {
if err := oprot.WriteI64(p.NumValues); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_values (5) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6084,7 +6084,7 @@ func (p *ColumnMetaData) writeField6(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("total_uncompressed_size", thrift.I64, 6); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:total_uncompressed_size: ", p), err)
}
if err := oprot.WriteI64(int64(p.TotalUncompressedSize)); err != nil {
if err := oprot.WriteI64(p.TotalUncompressedSize); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.total_uncompressed_size (6) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6097,7 +6097,7 @@ func (p *ColumnMetaData) writeField7(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("total_compressed_size", thrift.I64, 7); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:total_compressed_size: ", p), err)
}
if err := oprot.WriteI64(int64(p.TotalCompressedSize)); err != nil {
if err := oprot.WriteI64(p.TotalCompressedSize); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.total_compressed_size (7) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6133,7 +6133,7 @@ func (p *ColumnMetaData) writeField9(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("data_page_offset", thrift.I64, 9); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:data_page_offset: ", p), err)
}
if err := oprot.WriteI64(int64(p.DataPageOffset)); err != nil {
if err := oprot.WriteI64(p.DataPageOffset); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.data_page_offset (9) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6147,7 +6147,7 @@ func (p *ColumnMetaData) writeField10(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("index_page_offset", thrift.I64, 10); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:index_page_offset: ", p), err)
}
if err := oprot.WriteI64(int64(*p.IndexPageOffset)); err != nil {
if err := oprot.WriteI64(*p.IndexPageOffset); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.index_page_offset (10) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6162,7 +6162,7 @@ func (p *ColumnMetaData) writeField11(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("dictionary_page_offset", thrift.I64, 11); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:dictionary_page_offset: ", p), err)
}
if err := oprot.WriteI64(int64(*p.DictionaryPageOffset)); err != nil {
if err := oprot.WriteI64(*p.DictionaryPageOffset); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.dictionary_page_offset (11) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6505,7 +6505,7 @@ func (p *ColumnChunk) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("file_offset", thrift.I64, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:file_offset: ", p), err)
}
if err := oprot.WriteI64(int64(p.FileOffset)); err != nil {
if err := oprot.WriteI64(p.FileOffset); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.file_offset (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6534,7 +6534,7 @@ func (p *ColumnChunk) writeField4(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("offset_index_offset", thrift.I64, 4); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:offset_index_offset: ", p), err)
}
if err := oprot.WriteI64(int64(*p.OffsetIndexOffset)); err != nil {
if err := oprot.WriteI64(*p.OffsetIndexOffset); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.offset_index_offset (4) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6549,7 +6549,7 @@ func (p *ColumnChunk) writeField5(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("offset_index_length", thrift.I32, 5); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:offset_index_length: ", p), err)
}
if err := oprot.WriteI32(int32(*p.OffsetIndexLength)); err != nil {
if err := oprot.WriteI32(*p.OffsetIndexLength); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.offset_index_length (5) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6564,7 +6564,7 @@ func (p *ColumnChunk) writeField6(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("column_index_offset", thrift.I64, 6); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:column_index_offset: ", p), err)
}
if err := oprot.WriteI64(int64(*p.ColumnIndexOffset)); err != nil {
if err := oprot.WriteI64(*p.ColumnIndexOffset); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.column_index_offset (6) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6579,7 +6579,7 @@ func (p *ColumnChunk) writeField7(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("column_index_length", thrift.I32, 7); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:column_index_length: ", p), err)
}
if err := oprot.WriteI32(int32(*p.ColumnIndexLength)); err != nil {
if err := oprot.WriteI32(*p.ColumnIndexLength); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.column_index_length (7) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6807,7 +6807,7 @@ func (p *RowGroup) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("total_byte_size", thrift.I64, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:total_byte_size: ", p), err)
}
if err := oprot.WriteI64(int64(p.TotalByteSize)); err != nil {
if err := oprot.WriteI64(p.TotalByteSize); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.total_byte_size (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -6820,7 +6820,7 @@ func (p *RowGroup) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_rows", thrift.I64, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err)
}
if err := oprot.WriteI64(int64(p.NumRows)); err != nil {
if err := oprot.WriteI64(p.NumRows); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -7220,7 +7220,7 @@ func (p *PageLocation) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("offset", thrift.I64, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:offset: ", p), err)
}
if err := oprot.WriteI64(int64(p.Offset)); err != nil {
if err := oprot.WriteI64(p.Offset); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.offset (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -7233,7 +7233,7 @@ func (p *PageLocation) writeField2(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("compressed_page_size", thrift.I32, 2); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:compressed_page_size: ", p), err)
}
if err := oprot.WriteI32(int32(p.CompressedPageSize)); err != nil {
if err := oprot.WriteI32(p.CompressedPageSize); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.compressed_page_size (2) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -7246,7 +7246,7 @@ func (p *PageLocation) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("first_row_index", thrift.I64, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:first_row_index: ", p), err)
}
if err := oprot.WriteI64(int64(p.FirstRowIndex)); err != nil {
if err := oprot.WriteI64(p.FirstRowIndex); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.first_row_index (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -7646,7 +7646,7 @@ func (p *ColumnIndex) writeField1(oprot thrift.TProtocol) (err error) {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.NullPages {
if err := oprot.WriteBool(bool(v)); err != nil {
if err := oprot.WriteBool(v); err != nil {
return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err)
}
}
@ -7723,7 +7723,7 @@ func (p *ColumnIndex) writeField5(oprot thrift.TProtocol) (err error) {
return thrift.PrependError("error writing list begin: ", err)
}
for _, v := range p.NullCounts {
if err := oprot.WriteI64(int64(v)); err != nil {
if err := oprot.WriteI64(v); err != nil {
return thrift.PrependError(fmt.Sprintf("%T. (0) field write error: ", p), err)
}
}
@ -8058,7 +8058,7 @@ func (p *FileMetaData) writeField1(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("version", thrift.I32, 1); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:version: ", p), err)
}
if err := oprot.WriteI32(int32(p.Version)); err != nil {
if err := oprot.WriteI32(p.Version); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.version (1) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {
@ -8092,7 +8092,7 @@ func (p *FileMetaData) writeField3(oprot thrift.TProtocol) (err error) {
if err := oprot.WriteFieldBegin("num_rows", thrift.I64, 3); err != nil {
return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:num_rows: ", p), err)
}
if err := oprot.WriteI64(int64(p.NumRows)); err != nil {
if err := oprot.WriteI64(p.NumRows); err != nil {
return thrift.PrependError(fmt.Sprintf("%T.num_rows (3) field write error: ", p), err)
}
if err := oprot.WriteFieldEnd(); err != nil {

View File

@ -64,7 +64,7 @@ func (r *Reader) Read(dst sql.Record) (rec sql.Record, rerr error) {
case parquetgen.Type_INT32:
value = int64(v.Value.(int32))
case parquetgen.Type_INT64:
value = int64(v.Value.(int64))
value = v.Value.(int64)
case parquetgen.Type_FLOAT:
value = float64(v.Value.(float32))
case parquetgen.Type_DOUBLE:

View File

@ -246,6 +246,8 @@ func (e *ListExpr) evalNode(r Record) (*Value, error) {
return FromArray(res), nil
}
const floatCmpTolerance = 0.000001
func (e *In) evalInNode(r Record, lhs *Value) (*Value, error) {
// Compare two values in terms of in-ness.
var cmp func(a, b Value) bool
@ -275,7 +277,8 @@ func (e *In) evalInNode(r Record, lhs *Value) (*Value, error) {
aF, aOK := a.ToFloat()
bF, bOK := b.ToFloat()
return aOK && bOK && aF == bF
diff := math.Abs(aF - bF)
return aOK && bOK && diff < floatCmpTolerance
}
var rhs Value

View File

@ -785,6 +785,7 @@ func intCompare(op string, left, right int64) bool {
}
func floatCompare(op string, left, right float64) bool {
diff := math.Abs(left - right)
switch op {
case opLt:
return left < right
@ -795,9 +796,9 @@ func floatCompare(op string, left, right float64) bool {
case opGte:
return left >= right
case opEq:
return left == right
return diff < floatCmpTolerance
case opIneq:
return left != right
return diff > floatCmpTolerance
}
// This case does not happen
return false

View File

@ -559,7 +559,8 @@ func TestValue_bytesToFloat(t *testing.T) {
value: tt.fields.value,
}
got, got1 := v.bytesToFloat()
if got != tt.want {
diff := math.Abs(got - tt.want)
if diff > floatCmpTolerance {
t.Errorf("bytesToFloat() got = %v, want %v", got, tt.want)
}
if got1 != tt.wantOK {

View File

@ -1,4 +1,4 @@
// +build linux
// +build linux,!arm,!386
/*
* MinIO Cloud Storage, (C) 2016,2017 MinIO, Inc.
@ -64,13 +64,12 @@ func getSysinfoMemoryLimit() (limit uint64, err error) {
// Some fields in syscall.Sysinfo_t have different integer sizes
// in different platform architectures. Cast all fields to uint64.
totalRAM := uint64(si.Totalram)
unit := uint64(si.Unit)
unit := si.Unit
totalRAM := si.Totalram
// Total RAM is always the multiplicative value
// of unit size and total ram.
limit = unit * totalRAM
return limit, nil
return uint64(unit) * totalRAM, nil
}
// GetStats - return system statistics, currently only

View File

@ -0,0 +1,86 @@
// +build linux,arm linux,386
/*
* MinIO Cloud Storage, (C) 2020 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sys
import (
"os"
"syscall"
"github.com/minio/minio/pkg/cgroup"
)
// Get the final system memory limit chosen by the user.
// by default without any configuration on a vanilla Linux
// system you would see physical RAM limit. If cgroup
// is configured at some point in time this function
// would return the memory limit chosen for the given pid.
func getMemoryLimit() (sysLimit uint64, err error) {
if sysLimit, err = getSysinfoMemoryLimit(); err != nil {
// Physical memory info is not accessible, just exit here.
return 0, err
}
// Following code is deliberately ignoring the error.
cGroupLimit, gerr := cgroup.GetMemoryLimit(os.Getpid())
if gerr != nil {
// Upon error just return system limit.
return sysLimit, nil
}
// cgroup limit is lesser than system limit means
// user wants to limit the memory usage further
// treat cgroup limit as the system limit.
if cGroupLimit <= sysLimit {
sysLimit = cGroupLimit
}
// Final system limit.
return sysLimit, nil
}
// Get physical RAM size of the node.
func getSysinfoMemoryLimit() (limit uint64, err error) {
var si syscall.Sysinfo_t
if err = syscall.Sysinfo(&si); err != nil {
return 0, err
}
// Some fields in syscall.Sysinfo_t have different integer sizes
// in different platform architectures. Cast all fields to uint64.
unit := si.Unit
totalRAM := si.Totalram
// Total RAM is always the multiplicative value
// of unit size and total ram.
return uint64(unit) * uint64(totalRAM), nil
}
// GetStats - return system statistics, currently only
// supported value is TotalRAM.
func GetStats() (stats Stats, err error) {
var limit uint64
limit, err = getMemoryLimit()
if err != nil {
return Stats{}, err
}
stats.TotalRAM = limit
return stats, nil
}

420
ruleguard.rules.go Normal file
View File

@ -0,0 +1,420 @@
// +build ignore
package gorules
import "github.com/quasilyte/go-ruleguard/dsl/fluent"
// This is a collection of rules for ruleguard: https://github.com/quasilyte/go-ruleguard
// Remove extra conversions: mdempsky/unconvert
func unconvert(m fluent.Matcher) {
m.Match("int($x)").Where(m["x"].Type.Is("int") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("float32($x)").Where(m["x"].Type.Is("float32") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("float64($x)").Where(m["x"].Type.Is("float64") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
// m.Match("byte($x)").Where(m["x"].Type.Is("byte")).Report("unnecessary conversion").Suggest("$x")
// m.Match("rune($x)").Where(m["x"].Type.Is("rune")).Report("unnecessary conversion").Suggest("$x")
m.Match("bool($x)").Where(m["x"].Type.Is("bool") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("int8($x)").Where(m["x"].Type.Is("int8") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("int16($x)").Where(m["x"].Type.Is("int16") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("int32($x)").Where(m["x"].Type.Is("int32") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("int64($x)").Where(m["x"].Type.Is("int64") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("uint8($x)").Where(m["x"].Type.Is("uint8") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("uint16($x)").Where(m["x"].Type.Is("uint16") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("uint32($x)").Where(m["x"].Type.Is("uint32") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("uint64($x)").Where(m["x"].Type.Is("uint64") && !m["x"].Const).Report("unnecessary conversion").Suggest("$x")
m.Match("time.Duration($x)").Where(m["x"].Type.Is("time.Duration") && !m["x"].Text.Matches("^[0-9]*$")).Report("unnecessary conversion").Suggest("$x")
}
// Don't use == or != with time.Time
// https://github.com/dominikh/go-tools/issues/47 : Wontfix
func timeeq(m fluent.Matcher) {
m.Match("$t0 == $t1").Where(m["t0"].Type.Is("time.Time")).Report("using == with time.Time")
m.Match("$t0 != $t1").Where(m["t0"].Type.Is("time.Time")).Report("using != with time.Time")
}
// Wrong err in error check
func wrongerr(m fluent.Matcher) {
m.Match("if $*_, $err0 := $*_; $err1 != nil { $*_ }").
Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("if $*_, $err0 := $*_; $err1 != nil { $*_ }").
Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("if $*_, $err0 = $*_; $err1 != nil { $*_ }").
Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("if $*_, $err0 = $*_; $err1 != nil { $*_ }").
Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("if $*_, $err0 := $*_; $err1 == nil { $*_ }").
Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("if $*_, $err0 := $*_; $err1 == nil { $*_ }").
Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("if $*_, $err0 = $*_; $err1 == nil { $*_ }").
Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("if $*_, $err0 = $*_; $err1 == nil { $*_ }").
Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("$*_, $err0 := $*_; if $err1 != nil { $*_ }").
Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("$*_, $err0 := $*_; if $err1 != nil { $*_ }").
Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("$*_, $err0 := $*_; if $err1 == nil { $*_ }").
Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("$*_, $err0 := $*_; if $err1 == nil { $*_ }").
Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("$*_, $err0 = $*_; if $err1 != nil { $*_ }").
Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("$*_, $err0 = $*_; if $err1 != nil { $*_ }").
Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("$*_, $err0 = $*_; if $err1 == nil { $*_ }").
Where(m["err0"].Text == "err" && m["err0"].Type.Is("error") && m["err1"].Text != "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
m.Match("$*_, $err0 = $*_; if $err1 == nil { $*_ }").
Where(m["err0"].Text != "err" && m["err0"].Type.Is("error") && m["err1"].Text == "err" && m["err1"].Type.Is("error")).
Report("maybe wrong err in error check")
}
// err but no an error
func errnoterror(m fluent.Matcher) {
// Would be easier to check for all err identifiers instead, but then how do we get the type from m[] ?
m.Match(
"if $*_, err := $x; $err != nil { $*_ } else if $_ { $*_ }",
"if $*_, err := $x; $err != nil { $*_ } else { $*_ }",
"if $*_, err := $x; $err != nil { $*_ }",
"if $*_, err = $x; $err != nil { $*_ } else if $_ { $*_ }",
"if $*_, err = $x; $err != nil { $*_ } else { $*_ }",
"if $*_, err = $x; $err != nil { $*_ }",
"$*_, err := $x; if $err != nil { $*_ } else if $_ { $*_ }",
"$*_, err := $x; if $err != nil { $*_ } else { $*_ }",
"$*_, err := $x; if $err != nil { $*_ }",
"$*_, err = $x; if $err != nil { $*_ } else if $_ { $*_ }",
"$*_, err = $x; if $err != nil { $*_ } else { $*_ }",
"$*_, err = $x; if $err != nil { $*_ }",
).
Where(m["err"].Text == "err" && !m["err"].Type.Is("error") && m["x"].Text != "recover()").
Report("err variable not error type")
}
// Identical if and else bodies
func ifbodythenbody(m fluent.Matcher) {
m.Match("if $*_ { $body } else { $body }").
Report("identical if and else bodies")
// Lots of false positives.
// m.Match("if $*_ { $body } else if $*_ { $body }").
// Report("identical if and else bodies")
}
// Odd inequality: A - B < 0 instead of !=
// Too many false positives.
/*
func subtractnoteq(m fluent.Matcher) {
m.Match("$a - $b < 0").Report("consider $a != $b")
m.Match("$a - $b > 0").Report("consider $a != $b")
m.Match("0 < $a - $b").Report("consider $a != $b")
m.Match("0 > $a - $b").Report("consider $a != $b")
}
*/
// Self-assignment
func selfassign(m fluent.Matcher) {
m.Match("$x = $x").Report("useless self-assignment")
}
// Odd nested ifs
func oddnestedif(m fluent.Matcher) {
m.Match("if $x { if $x { $*_ }; $*_ }",
"if $x == $y { if $x != $y {$*_ }; $*_ }",
"if $x != $y { if $x == $y {$*_ }; $*_ }",
"if $x { if !$x { $*_ }; $*_ }",
"if !$x { if $x { $*_ }; $*_ }").
Report("odd nested ifs")
m.Match("for $x { if $x { $*_ }; $*_ }",
"for $x == $y { if $x != $y {$*_ }; $*_ }",
"for $x != $y { if $x == $y {$*_ }; $*_ }",
"for $x { if !$x { $*_ }; $*_ }",
"for !$x { if $x { $*_ }; $*_ }").
Report("odd nested for/ifs")
}
// odd bitwise expressions
func oddbitwise(m fluent.Matcher) {
m.Match("$x | $x",
"$x | ^$x",
"^$x | $x").
Report("odd bitwise OR")
m.Match("$x & $x",
"$x & ^$x",
"^$x & $x").
Report("odd bitwise AND")
m.Match("$x &^ $x").
Report("odd bitwise AND-NOT")
}
// odd sequence of if tests with return
func ifreturn(m fluent.Matcher) {
m.Match("if $x { return $*_ }; if $x {$*_ }").Report("odd sequence of if test")
m.Match("if $x { return $*_ }; if !$x {$*_ }").Report("odd sequence of if test")
m.Match("if !$x { return $*_ }; if $x {$*_ }").Report("odd sequence of if test")
m.Match("if $x == $y { return $*_ }; if $x != $y {$*_ }").Report("odd sequence of if test")
m.Match("if $x != $y { return $*_ }; if $x == $y {$*_ }").Report("odd sequence of if test")
}
func oddifsequence(m fluent.Matcher) {
/*
m.Match("if $x { $*_ }; if $x {$*_ }").Report("odd sequence of if test")
m.Match("if $x == $y { $*_ }; if $y == $x {$*_ }").Report("odd sequence of if tests")
m.Match("if $x != $y { $*_ }; if $y != $x {$*_ }").Report("odd sequence of if tests")
m.Match("if $x < $y { $*_ }; if $y > $x {$*_ }").Report("odd sequence of if tests")
m.Match("if $x <= $y { $*_ }; if $y >= $x {$*_ }").Report("odd sequence of if tests")
m.Match("if $x > $y { $*_ }; if $y < $x {$*_ }").Report("odd sequence of if tests")
m.Match("if $x >= $y { $*_ }; if $y <= $x {$*_ }").Report("odd sequence of if tests")
*/
}
// odd sequence of nested if tests
func nestedifsequence(m fluent.Matcher) {
/*
m.Match("if $x < $y { if $x >= $y {$*_ }; $*_ }").Report("odd sequence of nested if tests")
m.Match("if $x <= $y { if $x > $y {$*_ }; $*_ }").Report("odd sequence of nested if tests")
m.Match("if $x > $y { if $x <= $y {$*_ }; $*_ }").Report("odd sequence of nested if tests")
m.Match("if $x >= $y { if $x < $y {$*_ }; $*_ }").Report("odd sequence of nested if tests")
*/
}
// odd sequence of assignments
func identicalassignments(m fluent.Matcher) {
m.Match("$x = $y; $y = $x").Report("odd sequence of assignments")
}
func oddcompoundop(m fluent.Matcher) {
m.Match("$x += $x + $_",
"$x += $x - $_").
Report("odd += expression")
m.Match("$x -= $x + $_",
"$x -= $x - $_").
Report("odd -= expression")
}
func constswitch(m fluent.Matcher) {
m.Match("switch $x { $*_ }", "switch $*_; $x { $*_ }").
Where(m["x"].Const && !m["x"].Text.Matches(`^runtime\.`)).
Report("constant switch")
}
func oddcomparisons(m fluent.Matcher) {
m.Match(
"$x - $y == 0",
"$x - $y != 0",
"$x - $y < 0",
"$x - $y <= 0",
"$x - $y > 0",
"$x - $y >= 0",
"$x ^ $y == 0",
"$x ^ $y != 0",
).Report("odd comparison")
}
func oddmathbits(m fluent.Matcher) {
m.Match(
"64 - bits.LeadingZeros64($x)",
"32 - bits.LeadingZeros32($x)",
"16 - bits.LeadingZeros16($x)",
"8 - bits.LeadingZeros8($x)",
).Report("odd math/bits expression: use bits.Len*() instead?")
}
func floateq(m fluent.Matcher) {
m.Match(
"$x == $y",
"$x != $y",
).
Where(m["x"].Type.Is("float32") && !m["x"].Const && !m["y"].Text.Matches("0(.0+)?")).
Report("floating point tested for equality")
m.Match(
"$x == $y",
"$x != $y",
).
Where(m["x"].Type.Is("float64") && !m["x"].Const && !m["y"].Text.Matches("0(.0+)?")).
Report("floating point tested for equality")
m.Match("switch $x { $*_ }", "switch $*_; $x { $*_ }").
Where(m["x"].Type.Is("float32")).
Report("floating point as switch expression")
m.Match("switch $x { $*_ }", "switch $*_; $x { $*_ }").
Where(m["x"].Type.Is("float64")).
Report("floating point as switch expression")
}
func badexponent(m fluent.Matcher) {
m.Match(
"2 ^ $x",
"10 ^ $x",
).
Report("caret (^) is not exponentiation")
}
func floatloop(m fluent.Matcher) {
m.Match(
"for $i := $x; $i < $y; $i += $z { $*_ }",
"for $i = $x; $i < $y; $i += $z { $*_ }",
).
Where(m["i"].Type.Is("float64")).
Report("floating point for loop counter")
m.Match(
"for $i := $x; $i < $y; $i += $z { $*_ }",
"for $i = $x; $i < $y; $i += $z { $*_ }",
).
Where(m["i"].Type.Is("float32")).
Report("floating point for loop counter")
}
func urlredacted(m fluent.Matcher) {
m.Match(
"log.Println($x, $*_)",
"log.Println($*_, $x, $*_)",
"log.Println($*_, $x)",
"log.Printf($*_, $x, $*_)",
"log.Printf($*_, $x)",
"log.Println($x, $*_)",
"log.Println($*_, $x, $*_)",
"log.Println($*_, $x)",
"log.Printf($*_, $x, $*_)",
"log.Printf($*_, $x)",
).
Where(m["x"].Type.Is("*url.URL")).
Report("consider $x.Redacted() when outputting URLs")
}
func sprinterr(m fluent.Matcher) {
m.Match(`fmt.Sprint($err)`,
`fmt.Sprintf("%s", $err)`,
`fmt.Sprintf("%v", $err)`,
).
Where(m["err"].Type.Is("error")).
Report("maybe call $err.Error() instead of fmt.Sprint()?")
}
func largeloopcopy(m fluent.Matcher) {
m.Match(
`for $_, $v := range $_ { $*_ }`,
).
Where(m["v"].Type.Size > 512).
Report(`loop copies large value each iteration`)
}
func joinpath(m fluent.Matcher) {
m.Match(
`strings.Join($_, "/")`,
`strings.Join($_, "\\")`,
"strings.Join($_, `\\`)",
).
Report(`did you mean path.Join() or filepath.Join() ?`)
}
func readfull(m fluent.Matcher) {
m.Match(`$n, $err := io.ReadFull($_, $slice)
if $err != nil || $n != len($slice) {
$*_
}`,
`$n, $err := io.ReadFull($_, $slice)
if $n != len($slice) || $err != nil {
$*_
}`,
`$n, $err = io.ReadFull($_, $slice)
if $err != nil || $n != len($slice) {
$*_
}`,
`$n, $err = io.ReadFull($_, $slice)
if $n != len($slice) || $err != nil {
$*_
}`,
`if $n, $err := io.ReadFull($_, $slice); $n != len($slice) || $err != nil {
$*_
}`,
`if $n, $err := io.ReadFull($_, $slice); $err != nil || $n != len($slice) {
$*_
}`,
`if $n, $err = io.ReadFull($_, $slice); $n != len($slice) || $err != nil {
$*_
}`,
`if $n, $err = io.ReadFull($_, $slice); $err != nil || $n != len($slice) {
$*_
}`,
).Report("io.ReadFull() returns err == nil iff n == len(slice)")
}
func nilerr(m fluent.Matcher) {
m.Match(
`if err == nil { return err }`,
`if err == nil { return $*_, err }`,
).
Report(`return nil error instead of nil value`)
}
func mailaddress(m fluent.Matcher) {
m.Match(
"fmt.Sprintf(`\"%s\" <%s>`, $NAME, $EMAIL)",
"fmt.Sprintf(`\"%s\"<%s>`, $NAME, $EMAIL)",
"fmt.Sprintf(`%s <%s>`, $NAME, $EMAIL)",
"fmt.Sprintf(`%s<%s>`, $NAME, $EMAIL)",
`fmt.Sprintf("\"%s\"<%s>", $NAME, $EMAIL)`,
`fmt.Sprintf("\"%s\" <%s>", $NAME, $EMAIL)`,
`fmt.Sprintf("%s<%s>", $NAME, $EMAIL)`,
`fmt.Sprintf("%s <%s>", $NAME, $EMAIL)`,
).
Report("use net/mail Address.String() instead of fmt.Sprintf()").
Suggest("(&mail.Address{Name:$NAME, Address:$EMAIL}).String()")
}