Remove deprecated io/ioutil (#15707)

This commit is contained in:
Klaus Post 2022-09-19 20:05:16 +02:00 committed by GitHub
parent 0b6175b742
commit ff12080ff5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
89 changed files with 315 additions and 370 deletions

View File

@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
@ -74,7 +73,7 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@ -671,7 +670,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
if objectAPI == nil {
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@ -871,7 +870,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(reader, sz))
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(reader, sz))
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue
@ -1032,7 +1031,7 @@ func (a adminAPIHandlers) ImportBucketMetadataHandler(w http.ResponseWriter, r *
continue
}
case bucketQuotaConfigFile:
data, err := ioutil.ReadAll(reader)
data, err := io.ReadAll(reader)
if err != nil {
rpt.SetStatus(bucket, fileName, err)
continue

View File

@ -22,7 +22,6 @@ import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"strings"
"time"
@ -319,7 +318,7 @@ func (a adminAPIHandlers) SRPeerGetIDPSettings(w http.ResponseWriter, r *http.Re
}
func parseJSONBody(ctx context.Context, body io.Reader, v interface{}, encryptionKey string) error {
data, err := ioutil.ReadAll(body)
data, err := io.ReadAll(body)
if err != nil {
return SRError{
Cause: err,

View File

@ -22,7 +22,6 @@ import (
"encoding/json"
"errors"
"io"
"io/ioutil"
"net/http"
"os"
"sort"
@ -229,7 +228,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@ -1464,7 +1463,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
return
}
iamPolicyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
iamPolicyBytes, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
@ -1848,7 +1847,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
return
}
data, err := ioutil.ReadAll(r.Body)
data, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
return
@ -1871,7 +1870,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var allPolicies map[string]iampolicy.Policy
data, err = ioutil.ReadAll(f)
data, err = io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allPoliciesFile, ""), r.URL)
return
@ -1906,7 +1905,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var userAccts map[string]madmin.AddOrUpdateUserReq
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allUsersFile, ""), r.URL)
return
@ -1983,7 +1982,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var grpInfos map[string]GroupInfo
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allGroupsFile, ""), r.URL)
return
@ -2021,7 +2020,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var serviceAcctReqs map[string]madmin.SRSvcAccCreate
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, allSvcAcctsFile, ""), r.URL)
return
@ -2117,7 +2116,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var userPolicyMap map[string]MappedPolicy
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, userPolicyMappingsFile, ""), r.URL)
return
@ -2156,7 +2155,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var grpPolicyMap map[string]MappedPolicy
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, groupPolicyMappingsFile, ""), r.URL)
return
@ -2185,7 +2184,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var userPolicyMap map[string]MappedPolicy
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsUserPolicyMappingsFile, ""), r.URL)
return
@ -2224,7 +2223,7 @@ func (a adminAPIHandlers) ImportIAM(w http.ResponseWriter, r *http.Request) {
default:
defer f.Close()
var grpPolicyMap map[string]MappedPolicy
data, err := ioutil.ReadAll(f)
data, err := io.ReadAll(f)
if err != nil {
writeErrorResponseJSON(ctx, w, importErrorWithAPIErr(ctx, ErrInvalidRequest, err, stsGroupPolicyMappingsFile, ""), r.URL)
return

View File

@ -24,7 +24,7 @@ import (
"encoding/hex"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/url"
"os"
@ -388,7 +388,7 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
req.ContentLength = int64(len(buf))
sum := sha256.Sum256(buf)
req.Header.Set("X-Amz-Content-Sha256", hex.EncodeToString(sum[:]))
req.Body = ioutil.NopCloser(bytes.NewReader(buf))
req.Body = io.NopCloser(bytes.NewReader(buf))
req = signer.SignV4(*req, accessKey, secretKey, "", "")
// 3.1 Execute the request.

View File

@ -27,7 +27,6 @@ import (
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"math"
"math/rand"
"net/http"
@ -758,7 +757,7 @@ func (a adminAPIHandlers) ProfileHandler(w http.ResponseWriter, r *http.Request)
}
}
// read request body
io.CopyN(ioutil.Discard, r.Body, 1)
io.CopyN(io.Discard, r.Body, 1)
globalProfilerMu.Lock()

View File

@ -22,7 +22,6 @@ import (
"context"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@ -220,7 +219,7 @@ func testServicesCmdHandler(cmd cmdType, t *testing.T) {
adminTestBed.router.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
resp, _ := ioutil.ReadAll(rec.Body)
resp, _ := io.ReadAll(rec.Body)
t.Errorf("Expected to receive %d status code but received %d. Body (%s)",
http.StatusOK, rec.Code, string(resp))
}

View File

@ -25,7 +25,6 @@ import (
"encoding/hex"
"errors"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
@ -333,14 +332,14 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
var locationConstraint string
if action == policy.CreateBucketAction {
// To extract region from XML in request body, get copy of request body.
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
payload, err := io.ReadAll(io.LimitReader(r.Body, maxLocationConstraintSize))
if err != nil {
logger.LogIf(ctx, err, logger.Application)
return cred, owner, ErrMalformedXML
}
// Populate payload to extract location constraint.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
r.Body = io.NopCloser(bytes.NewReader(payload))
var s3Error APIErrorCode
locationConstraint, s3Error = parseLocationConstraint(r)
@ -349,7 +348,7 @@ func checkRequestAuthTypeCredential(ctx context.Context, r *http.Request, action
}
// Populate payload again to handle it in HTTP handler.
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
r.Body = io.NopCloser(bytes.NewReader(payload))
}
if cred.AccessKey != "" {
logger.GetReqInfo(ctx).AccessKey = cred.AccessKey

View File

@ -21,7 +21,6 @@ import (
"bytes"
"context"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@ -44,7 +43,7 @@ func TestGetRequestAuthType(t *testing.T) {
req *http.Request
authT authType
}
nopCloser := ioutil.NopCloser(io.LimitReader(&nullReader{}, 1024))
nopCloser := io.NopCloser(io.LimitReader(&nullReader{}, 1024))
testCases := []testCase{
// Test case - 1
// Check for generic signature v4 header.
@ -406,7 +405,7 @@ func TestIsReqAuthenticated(t *testing.T) {
for i, testCase := range testCases {
s3Error := isReqAuthenticated(ctx, testCase.req, globalSite.Region, serviceS3)
if s3Error != testCase.s3Error {
if _, err := ioutil.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
if _, err := io.ReadAll(testCase.req.Body); toAPIErrorCode(ctx, err) != testCase.s3Error {
t.Fatalf("Test %d: Unexpected S3 error: want %d - got %d (got after reading request %s)", i, testCase.s3Error, s3Error, toAPIError(ctx, err).Code)
}
}

View File

@ -21,7 +21,7 @@ import (
"bytes"
"encoding/xml"
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"strconv"
@ -885,7 +885,7 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa
}
// read the response body.
actualContent, err = ioutil.ReadAll(rec.Body)
actualContent, err = io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d : MinIO %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}

View File

@ -21,7 +21,6 @@ import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
humanize "github.com/dustin/go-humanize"
@ -79,7 +78,7 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht
return
}
bucketPolicyBytes, err := ioutil.ReadAll(io.LimitReader(r.Body, r.ContentLength))
bucketPolicyBytes, err := io.ReadAll(io.LimitReader(r.Body, r.ContentLength))
if err != nil {
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL)
return

View File

@ -21,7 +21,6 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"reflect"
@ -487,7 +486,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV4.Code)
}
// read the response body.
bucketPolicyReadBuf, err := ioutil.ReadAll(recV4.Body)
bucketPolicyReadBuf, err := io.ReadAll(recV4.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
@ -525,7 +524,7 @@ func testGetBucketPolicyHandler(obj ObjectLayer, instanceType, bucketName string
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, recV2.Code)
}
// read the response body.
bucketPolicyReadBuf, err = ioutil.ReadAll(recV2.Body)
bucketPolicyReadBuf, err = io.ReadAll(recV2.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}

View File

@ -27,7 +27,6 @@ import (
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"math/rand"
"net"
"net/http"
@ -564,7 +563,7 @@ func readFromSecret(sp string) (string, error) {
if isFile(pathJoin("/run/secrets/", sp)) {
sp = pathJoin("/run/secrets/", sp)
}
credBuf, err := ioutil.ReadFile(sp)
credBuf, err := os.ReadFile(sp)
if err != nil {
if os.IsNotExist(err) { // ignore if file doesn't exist.
return "", nil

View File

@ -19,7 +19,7 @@ package cmd
import (
"errors"
"io/ioutil"
"os"
"reflect"
"testing"
)
@ -45,7 +45,7 @@ func Test_readFromSecret(t *testing.T) {
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "testfile")
tmpfile, err := os.CreateTemp("", "testfile")
if err != nil {
t.Error(err)
}
@ -157,7 +157,7 @@ MINIO_ROOT_PASSWORD=minio123`,
for _, testCase := range testCases {
testCase := testCase
t.Run("", func(t *testing.T) {
tmpfile, err := ioutil.TempFile("", "testfile")
tmpfile, err := os.CreateTemp("", "testfile")
if err != nil {
t.Error(err)
}

View File

@ -21,7 +21,7 @@ import (
"bytes"
"context"
"errors"
"io/ioutil"
"io"
"net/http"
"github.com/minio/minio/internal/hash"
@ -41,7 +41,7 @@ func readConfigWithMetadata(ctx context.Context, store objectIO, configFile stri
}
defer r.Close()
buf, err := ioutil.ReadAll(r)
buf, err := io.ReadAll(r)
if err != nil {
return nil, ObjectInfo{}, err
}

View File

@ -19,7 +19,6 @@ package cmd
import (
"fmt"
"io/ioutil"
"os"
"testing"
@ -47,7 +46,7 @@ func TestServerConfigMigrateV1(t *testing.T) {
// Create a V1 config json file and store it
configJSON := "{ \"version\":\"1\", \"accessKeyId\":\"abcde\", \"secretAccessKey\":\"abcdefgh\"}"
configPath := rootPath + "/fsUsers.json"
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
@ -167,7 +166,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
configPath := rootPath + SlashSeparator + minioConfigFile
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
// Fire a migrateConfig()
@ -180,7 +179,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
// Create a V2 config json file and store it
configJSON := "{ \"version\":\"2\", \"credentials\": {\"accessKeyId\":\"" + accessKey + "\", \"secretAccessKey\":\"" + secretKey + "\", \"region\":\"us-east-1\"}, \"mongoLogger\":{\"addr\":\"127.0.0.1:3543\", \"db\":\"foodb\", \"collection\":\"foo\"}, \"syslogLogger\":{\"network\":\"127.0.0.1:543\", \"addr\":\"addr\"}, \"fileLogger\":{\"filename\":\"log.out\"}}"
if err := ioutil.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte(configJSON), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
@ -226,7 +225,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
configPath := rootPath + SlashSeparator + minioConfigFile
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
@ -320,7 +319,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
for i := 3; i <= 17; i++ {
// Create a corrupted config file
if err := ioutil.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
if err := os.WriteFile(configPath, []byte(fmt.Sprintf("{ \"version\":\"%d\", \"credential\": { \"accessKey\": 1 } }", i)),
0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}
@ -332,7 +331,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
}
// Create a corrupted config file for version '2'.
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil {
if err := os.WriteFile(configPath, []byte("{ \"version\":\"2\", \"credentials\": { \"accessKeyId\": 1 } }"), 0o644); err != nil {
t.Fatal("Unexpected error: ", err)
}

View File

@ -24,7 +24,6 @@ import (
"encoding/binary"
"errors"
"io"
"io/ioutil"
"os"
"path"
"sort"
@ -296,7 +295,7 @@ func (d *dataUpdateTracker) startSaver(ctx context.Context, interval time.Durati
}
for _, drive := range drives {
cacheFormatPath := pathJoin(drive, dataUpdateTrackerFilename)
err := ioutil.WriteFile(cacheFormatPath, buf.Bytes(), os.ModePerm)
err := os.WriteFile(cacheFormatPath, buf.Bytes(), os.ModePerm)
if err != nil {
if osIsNotExist(err) {
continue

View File

@ -22,7 +22,6 @@ import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
@ -502,7 +501,7 @@ func createUsageTestFiles(t *testing.T, base, bucket string, files []usageTestFi
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(filepath.Join(base, bucket, f.name), make([]byte, f.size), os.ModePerm)
err = os.WriteFile(filepath.Join(base, bucket, f.name), make([]byte, f.size), os.ModePerm)
if err != nil {
t.Fatal(err)
}
@ -520,7 +519,7 @@ func generateUsageTestFiles(t *testing.T, base, bucket string, nFolders, nFiles,
}
for j := 0; j < nFiles; j++ {
name := filepath.Join(base, bucket, fmt.Sprint(i), fmt.Sprint(j)+".txt")
err = ioutil.WriteFile(name, pl, os.ModePerm)
err = os.WriteFile(name, pl, os.ModePerm)
if err != nil {
t.Fatal(err)
}

View File

@ -27,7 +27,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"path"
@ -851,7 +850,7 @@ func (c *diskCache) Put(ctx context.Context, bucket, object string, data io.Read
// Caches the object to disk
func (c *diskCache) put(ctx context.Context, bucket, object string, data io.Reader, size int64, rs *HTTPRangeSpec, opts ObjectOptions, incHitsOnly, writeback bool) (oi ObjectInfo, err error) {
if !c.diskSpaceAvailable(size) {
io.Copy(ioutil.Discard, data)
io.Copy(io.Discard, data)
return oi, errDiskFull
}
cachePath := getCacheSHADir(c.dir, bucket, object)
@ -1311,7 +1310,7 @@ func (c *diskCache) NewMultipartUpload(ctx context.Context, bucket, object, uID
func (c *diskCache) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data io.Reader, size int64, opts ObjectOptions) (partInfo PartInfo, err error) {
oi := PartInfo{}
if !c.diskSpaceAvailable(size) {
io.Copy(ioutil.Discard, data)
io.Copy(io.Discard, data)
return oi, errDiskFull
}
cachePath := getMultipartCacheSHADir(c.dir, bucket, object)

View File

@ -21,7 +21,6 @@ package cmd
import (
"errors"
"io/ioutil"
"os"
"github.com/djherbis/atime"
@ -30,7 +29,7 @@ import (
// Return error if Atime is disabled on the O/S
func checkAtimeSupport(dir string) (err error) {
file, err := ioutil.TempFile(dir, "prefix")
file, err := os.CreateTemp(dir, "prefix")
if err != nil {
return
}

View File

@ -23,7 +23,6 @@ package cmd
import (
"errors"
"io"
"io/ioutil"
"os"
"time"
@ -32,7 +31,7 @@ import (
// Return error if Atime is disabled on the O/S
func checkAtimeSupport(dir string) (err error) {
file, err := ioutil.TempFile(dir, "prefix")
file, err := os.CreateTemp(dir, "prefix")
if err != nil {
return
}
@ -45,7 +44,7 @@ func checkAtimeSupport(dir string) (err error) {
// add a sleep to ensure atime change is detected
time.Sleep(10 * time.Millisecond)
if _, err = io.Copy(ioutil.Discard, file); err != nil {
if _, err = io.Copy(io.Discard, file); err != nil {
return
}

View File

@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"testing"
)
@ -46,7 +45,7 @@ type DummyDataGen struct {
// Given the function:
//
// f := func(r io.Reader) string {
// b, _ := ioutil.ReadAll(r)
// b, _ := io.ReadAll(r)
// return string(b)
// }
//
@ -115,7 +114,7 @@ func (d *DummyDataGen) Seek(offset int64, whence int) (int64, error) {
func TestDummyDataGenerator(t *testing.T) {
readAll := func(r io.Reader) string {
b, _ := ioutil.ReadAll(r)
b, _ := io.ReadAll(r)
return string(b)
}
checkEq := func(a, b string) {

View File

@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strconv"
@ -206,7 +205,7 @@ func TestDeleteObjectsVersioned(t *testing.T) {
}
}
if _, err = ioutil.ReadFile(pathJoin(fsDirs[0], bucketName, "dir/obj1", "xl.meta")); err == nil {
if _, err = os.ReadFile(pathJoin(fsDirs[0], bucketName, "dir/obj1", "xl.meta")); err == nil {
t.Fatalf("xl.meta still present after removal")
}
}
@ -543,7 +542,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
}
if gr != nil {
_, err = io.Copy(ioutil.Discard, gr)
_, err = io.Copy(io.Discard, gr)
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
}
@ -588,7 +587,7 @@ func TestGetObjectNoQuorum(t *testing.T) {
}
}
if gr != nil {
_, err = io.Copy(ioutil.Discard, gr)
_, err = io.Copy(io.Discard, gr)
if err != toObjectErr(errErasureReadQuorum, bucket, object) {
t.Errorf("Expected GetObject to fail with %v, but failed with %v", toObjectErr(errErasureReadQuorum, bucket, object), err)
}

View File

@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"path/filepath"
@ -421,7 +420,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
// get old cached metadata
oldMetaPath := pathJoin(oldCacheBucketsPath, bucket, object, cacheMetaJSONFile)
metaPath := pathJoin(destdir, cacheMetaJSONFile)
metaBytes, err := ioutil.ReadFile(oldMetaPath)
metaBytes, err := os.ReadFile(oldMetaPath)
if err != nil {
return err
}
@ -459,7 +458,7 @@ func migrateOldCache(ctx context.Context, c *diskCache) error {
return err
}
if err = ioutil.WriteFile(metaPath, jsonData, 0o644); err != nil {
if err = os.WriteFile(metaPath, jsonData, 0o644); err != nil {
return err
}
}

View File

@ -23,11 +23,11 @@ import (
"errors"
"fmt"
"io/fs"
"io/ioutil"
"os"
"reflect"
"sync"
humanize "github.com/dustin/go-humanize"
"github.com/dustin/go-humanize"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/config/storageclass"
@ -203,7 +203,7 @@ func formatErasureMigrate(export string) ([]byte, fs.FileInfo, error) {
}
migrate := func(formatPath string, formatData []byte) ([]byte, fs.FileInfo, error) {
if err = ioutil.WriteFile(formatPath, formatData, 0o666); err != nil {
if err = os.WriteFile(formatPath, formatData, 0o666); err != nil {
return nil, nil, err
}
formatFi, err := Lstat(formatPath)

View File

@ -22,7 +22,6 @@ import (
"encoding/hex"
"encoding/json"
"errors"
"io/ioutil"
"os"
"reflect"
"testing"
@ -122,7 +121,7 @@ func TestFormatErasureMigrate(t *testing.T) {
t.Fatal(err)
}
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
if err = os.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
t.Fatal(err)
}
@ -140,7 +139,7 @@ func TestFormatErasureMigrate(t *testing.T) {
t.Fatalf("expected version: %s, got: %s", formatErasureVersionV3, migratedVersion)
}
b, err = ioutil.ReadFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile))
b, err = os.ReadFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile))
if err != nil {
t.Fatal(err)
}
@ -170,7 +169,7 @@ func TestFormatErasureMigrate(t *testing.T) {
t.Fatal(err)
}
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
if err = os.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
t.Fatal(err)
}
@ -190,7 +189,7 @@ func TestFormatErasureMigrate(t *testing.T) {
t.Fatal(err)
}
if err = ioutil.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
if err = os.WriteFile(pathJoin(rootPath, minioMetaBucket, formatConfigFile), b, os.FileMode(0o644)); err != nil {
t.Fatal(err)
}

View File

@ -20,7 +20,6 @@ package cmd
import (
"bytes"
"io"
"io/ioutil"
"os"
"path"
"testing"
@ -267,7 +266,7 @@ func TestFSDeletes(t *testing.T) {
if err != nil {
t.Fatal(err)
}
err = ioutil.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0o777)
err = os.WriteFile(pathJoin(path, "success-vol", "not-empty", "file"), []byte("data"), 0o777)
if err != nil {
t.Fatal(err)
}
@ -363,7 +362,7 @@ func BenchmarkFSDeleteFile(b *testing.B) {
// We need to create and delete the file sequentially inside the benchmark.
for i := 0; i < b.N; i++ {
b.StopTimer()
err = ioutil.WriteFile(filename, []byte("data"), 0o777)
err = os.WriteFile(filename, []byte("data"), 0o777)
if err != nil {
b.Fatal(err)
}
@ -540,7 +539,7 @@ func TestFSRemoveMeta(t *testing.T) {
func TestFSIsFile(t *testing.T) {
filePath := pathJoin(t.TempDir(), "tmpfile")
if err := ioutil.WriteFile(filePath, nil, 0o777); err != nil {
if err := os.WriteFile(filePath, nil, 0o777); err != nil {
t.Fatalf("Unable to create file %s", filePath)
}

View File

@ -22,7 +22,6 @@ import (
"encoding/hex"
"encoding/json"
"io"
"io/ioutil"
"os"
pathutil "path"
@ -216,7 +215,7 @@ func (m *fsMetaV1) ReadFrom(ctx context.Context, lk *lock.LockedFile) (n int64,
return 0, err
}
fsMetaBuf, err = ioutil.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
fsMetaBuf, err = io.ReadAll(io.NewSectionReader(lk, 0, fi.Size()))
if err != nil {
logger.LogIf(ctx, err)
return 0, err

View File

@ -22,7 +22,7 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"io"
"os"
"path/filepath"
"sort"
@ -246,7 +246,7 @@ func (fs *FSObjects) NewMultipartUpload(ctx context.Context, bucket, object stri
return nil, err
}
if err = ioutil.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
if err = os.WriteFile(pathJoin(uploadIDDir, fs.metaJSONFile), fsMetaBytes, 0o666); err != nil {
logger.LogIf(ctx, err)
return nil, err
}
@ -525,7 +525,7 @@ func (fs *FSObjects) ListObjectParts(ctx context.Context, bucket, object, upload
}
defer rc.Close()
fsMetaBytes, err := ioutil.ReadAll(rc)
fsMetaBytes, err := io.ReadAll(rc)
if err != nil {
return result, toObjectErr(err, bucket, object)
}

View File

@ -22,7 +22,6 @@ import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"os/user"
@ -868,7 +867,7 @@ func (fs *FSObjects) getObjectInfoNoFSLock(ctx context.Context, bucket, object s
rc, _, err := fsOpenFile(ctx, fsMetaPath, 0)
if err == nil {
fsMetaBuf, rerr := ioutil.ReadAll(rc)
fsMetaBuf, rerr := io.ReadAll(rc)
rc.Close()
if rerr == nil {
json := jsoniter.ConfigCompatibleWithStandardLibrary

View File

@ -21,7 +21,7 @@ import (
"context"
"errors"
"fmt"
"io/ioutil"
"io"
"log"
"net/url"
"os"
@ -279,7 +279,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
UseTLSConfig(newTLSConfig(getCert)).
UseShutdownTimeout(ctx.Duration("shutdown-timeout")).
UseBaseContext(GlobalContext).
UseCustomLogger(log.New(ioutil.Discard, "", 0)) // Turn-off random logging by Go stdlib
UseCustomLogger(log.New(io.Discard, "", 0)) // Turn-off random logging by Go stdlib
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)

View File

@ -18,8 +18,8 @@
package cmd
import (
"io/ioutil"
"net/http"
"os"
"runtime"
"strconv"
"sync"
@ -56,7 +56,7 @@ type apiConfig struct {
const cgroupLimitFile = "/sys/fs/cgroup/memory/memory.limit_in_bytes"
func cgroupLimit(limitFile string) (limit uint64) {
buf, err := ioutil.ReadFile(limitFile)
buf, err := os.ReadFile(limitFile)
if err != nil {
return 9223372036854771712
}

View File

@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/textproto"
@ -305,7 +304,7 @@ func extractPostPolicyFormValues(ctx context.Context, form *multipart.Form) (fil
b.WriteString(v)
}
fileSize = int64(b.Len())
filePart = ioutil.NopCloser(b)
filePart = io.NopCloser(b)
return filePart, fileName, fileSize, formValues, nil
}

View File

@ -21,7 +21,7 @@ import (
"bytes"
"context"
"encoding/xml"
"io/ioutil"
"io"
"net/http"
"net/textproto"
"os"
@ -45,13 +45,13 @@ func TestIsValidLocationContraint(t *testing.T) {
// Corrupted XML
malformedReq := &http.Request{
Body: ioutil.NopCloser(bytes.NewReader([]byte("<>"))),
Body: io.NopCloser(bytes.NewReader([]byte("<>"))),
ContentLength: int64(len("<>")),
}
// Not an XML
badRequest := &http.Request{
Body: ioutil.NopCloser(bytes.NewReader([]byte("garbage"))),
Body: io.NopCloser(bytes.NewReader([]byte("garbage"))),
ContentLength: int64(len("garbage")),
}
@ -61,7 +61,7 @@ func TestIsValidLocationContraint(t *testing.T) {
createBucketConfig.Location = location
createBucketConfigBytes, _ := xml.Marshal(createBucketConfig)
createBucketConfigBuffer := bytes.NewReader(createBucketConfigBytes)
req.Body = ioutil.NopCloser(createBucketConfigBuffer)
req.Body = io.NopCloser(createBucketConfigBuffer)
req.ContentLength = int64(createBucketConfigBuffer.Len())
return req
}

View File

@ -20,7 +20,7 @@ package cmd
import (
"bufio"
"bytes"
"io/ioutil"
"io"
"net/http"
"net/url"
"sort"
@ -49,7 +49,7 @@ func BenchmarkLockArgs(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
req.Body = ioutil.NopCloser(bytes.NewReader(argBytes))
req.Body = io.NopCloser(bytes.NewReader(argBytes))
getLockArgs(req)
}
}
@ -68,7 +68,7 @@ func BenchmarkLockArgsOld(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
req.Body = ioutil.NopCloser(bytes.NewReader([]byte(`obj.txt`)))
req.Body = io.NopCloser(bytes.NewReader([]byte(`obj.txt`)))
getLockArgsOld(req)
}
}

File diff suppressed because one or more lines are too long

View File

@ -23,7 +23,6 @@ import (
"crypto/md5"
"encoding/hex"
"errors"
"io/ioutil"
"os"
"path"
"testing"
@ -352,7 +351,7 @@ func testObjectAPIPutObjectStaleFiles(obj ObjectLayer, instanceType string, disk
for _, disk := range disks {
tmpMetaDir := path.Join(disk, minioMetaTmpBucket)
files, err := ioutil.ReadDir(tmpMetaDir)
files, err := os.ReadDir(tmpMetaDir)
if err != nil {
t.Fatal(err)
}
@ -431,9 +430,9 @@ func testObjectAPIMultipartPutObjectStaleFiles(obj ObjectLayer, instanceType str
for _, disk := range disks {
tmpMetaDir := path.Join(disk, minioMetaTmpBucket)
files, err := ioutil.ReadDir(tmpMetaDir)
files, err := os.ReadDir(tmpMetaDir)
if err != nil {
// Its OK to have non-existen tmpMetaDir.
// It's OK to have non-existing tmpMetaDir.
if osIsNotExist(err) {
continue
}

View File

@ -29,7 +29,6 @@ import (
"hash"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@ -540,7 +539,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
t.Fatalf("Case %d: Expected the response status to be `%d`, but instead found `%d`", i+1, testCase.expectedRespStatus, rec.Code)
}
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
actualContent, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed reading response body: <ERROR> %v", i+1, instanceType, err)
}
@ -584,7 +583,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
}
// read the response body.
actualContent, err = ioutil.ReadAll(recV2.Body)
actualContent, err = io.ReadAll(recV2.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed to read response body: <ERROR> %v", i+1, instanceType, err)
}
@ -739,7 +738,7 @@ func testAPIGetObjectWithMPHandler(obj ObjectLayer, instanceType, bucketName str
// Check response code (we make only valid requests in
// this test)
if rec.Code != http.StatusPartialContent && rec.Code != http.StatusOK {
bd, err1 := ioutil.ReadAll(rec.Body)
bd, err1 := io.ReadAll(rec.Body)
t.Fatalf("%s Object: %s Case %d ByteRange: %s: Got response status `%d` and body: %s,%v",
instanceType, object, i+1, byteRange, rec.Code, string(bd), err1)
}
@ -922,7 +921,7 @@ func testAPIGetObjectWithPartNumberHandler(obj ObjectLayer, instanceType, bucket
// Check response code (we make only valid requests in this test)
if rec.Code != http.StatusPartialContent && rec.Code != http.StatusOK {
bd, err1 := ioutil.ReadAll(rec.Body)
bd, err1 := io.ReadAll(rec.Body)
t.Fatalf("%s Object: %s ObjectIndex %d PartNumber: %d: Got response status `%d` and body: %s,%v",
instanceType, object, oindex, partNumber, rec.Code, string(bd), err1)
}
@ -1245,7 +1244,7 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam
i+1, instanceType, testCase.expectedRespStatus, rec.Code, testCase.fault)
}
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
actualContent, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d: %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
@ -3078,7 +3077,7 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s
}
// read the response body.
actualContent, err = ioutil.ReadAll(rec.Body)
actualContent, err = io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("Test %d : MinIO %s: Failed parsing response body: <ERROR> %v", i+1, instanceType, err)
}
@ -3490,7 +3489,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
// Get uploadID of the mulitpart upload initiated.
var mpartResp InitiateMultipartUploadResponse
mpartRespBytes, err := ioutil.ReadAll(rec.Result().Body)
mpartRespBytes, err := io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("[%s] Failed to read NewMultipartUpload response <ERROR> %v", instanceType, err)
}
@ -3531,7 +3530,7 @@ func testAPIPutObjectPartHandlerStreaming(obj ObjectLayer, instanceType, bucketN
apiRouter.ServeHTTP(rec, req)
if test.expectedErr != noAPIErr {
errBytes, err := ioutil.ReadAll(rec.Result().Body)
errBytes, err := io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("Test %d %s Failed to read error response from upload part request %s/%s: <ERROR> %v",
i+1, instanceType, bucketName, testObject, err)
@ -3796,7 +3795,7 @@ func testAPIPutObjectPartHandler(obj ObjectLayer, instanceType, bucketName strin
if rec.Code != http.StatusOK {
var errBytes []byte
// read the response body.
errBytes, err = ioutil.ReadAll(rec.Result().Body)
errBytes, err = io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("%s, Failed to read error response from upload part request \"%s\"/\"%s\": <ERROR> %v.",
reqType, bucketName, test.objectName, err)
@ -3878,7 +3877,7 @@ func testAPIListObjectPartsHandlerPreSign(obj ObjectLayer, instanceType, bucketN
// Get uploadID of the mulitpart upload initiated.
var mpartResp InitiateMultipartUploadResponse
mpartRespBytes, err := ioutil.ReadAll(rec.Result().Body)
mpartRespBytes, err := io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("[%s] Failed to read NewMultipartUpload response <ERROR> %v", instanceType, err)
}
@ -4103,7 +4102,7 @@ func testAPIListObjectPartsHandler(obj ObjectLayer, instanceType, bucketName str
var errBytes []byte
// read the response body.
errBytes, err = ioutil.ReadAll(rec.Result().Body)
errBytes, err = io.ReadAll(rec.Result().Body)
if err != nil {
t.Fatalf("%s,Failed to read error response list object parts request %s/%s: <ERROR> %v", reqType, bucketName, testObject, err)
}

View File

@ -19,7 +19,6 @@ package cmd
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
@ -36,7 +35,7 @@ func TestReadDirFail(t *testing.T) {
}
file := path.Join(os.TempDir(), "issue")
if err := ioutil.WriteFile(file, []byte(""), 0o644); err != nil {
if err := os.WriteFile(file, []byte(""), 0o644); err != nil {
t.Fatal(err)
}
defer os.RemoveAll(file)
@ -80,7 +79,7 @@ func setupTestReadDirFiles(t *testing.T) (testResults []result) {
entries := []string{}
for i := 0; i < 10; i++ {
name := fmt.Sprintf("file-%d", i)
if err := ioutil.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil {
if err := os.WriteFile(filepath.Join(dir, name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
testResults = append(testResults, result{dir, entries})
t.Fatalf("Unable to create file, %s", err)
@ -105,7 +104,7 @@ func setupTestReadDirGeneric(t *testing.T) (testResults []result) {
entries := []string{"mydir/"}
for i := 0; i < 10; i++ {
name := fmt.Sprintf("file-%d", i)
if err := ioutil.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil {
if err := os.WriteFile(filepath.Join(dir, "mydir", name), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
testResults = append(testResults, result{dir, entries})
t.Fatalf("Unable to write file, %s", err)
@ -130,7 +129,7 @@ func setupTestReadDirSymlink(t *testing.T) (testResults []result) {
for i := 0; i < 10; i++ {
name1 := fmt.Sprintf("file-%d", i)
name2 := fmt.Sprintf("file-%d", i+10)
if err := ioutil.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil {
if err := os.WriteFile(filepath.Join(dir, name1), []byte{}, os.ModePerm); err != nil {
// For cleanup, its required to add these entries into test results.
testResults = append(testResults, result{dir, entries})
t.Fatalf("Unable to create a file, %s", err)
@ -235,7 +234,7 @@ func TestReadDirN(t *testing.T) {
dir := t.TempDir()
for c := 1; c <= testCase.numFiles; c++ {
err := ioutil.WriteFile(filepath.Join(dir, fmt.Sprintf("%d", c)), []byte{}, os.ModePerm)
err := os.WriteFile(filepath.Join(dir, fmt.Sprintf("%d", c)), []byte{}, os.ModePerm)
if err != nil {
os.RemoveAll(dir)
t.Fatalf("Unable to create a file, %s", err)

View File

@ -22,7 +22,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"sync"
@ -185,7 +184,7 @@ func selfSpeedTest(ctx context.Context, opts speedTestOpts) (SpeedTestResult, er
fbr := firstByteRecorder{
r: r,
}
n, err := io.Copy(ioutil.Discard, &fbr)
n, err := io.Copy(io.Discard, &fbr)
r.Close()
if err == nil {
response := time.Since(t)

View File

@ -22,7 +22,7 @@ import (
"context"
"encoding/base64"
"fmt"
"io/ioutil"
"io"
"mime/multipart"
"net/http"
"net/http/httptest"
@ -222,7 +222,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
}
if testCase.malformedBody {
// Change the request body.
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("Hello,")))
req.Body = io.NopCloser(bytes.NewReader([]byte("Hello,")))
}
// Since `apiRouter` satisfies `http.Handler` it has a ServeHTTP to execute the logic ofthe handler.
// Call the ServeHTTP to execute the handler.

View File

@ -24,7 +24,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"sort"
"strings"
@ -184,7 +183,7 @@ func (api objectAPIHandlers) getObjectInArchiveFileHandler(ctx context.Context,
return
}
} else {
rc = ioutil.NopCloser(bytes.NewReader([]byte{}))
rc = io.NopCloser(bytes.NewReader([]byte{}))
}
defer rc.Close()
@ -317,7 +316,7 @@ func getFilesListFromZIPObject(ctx context.Context, objectAPI ObjectLayer, bucke
if err != nil {
return nil, ObjectInfo{}, err
}
b, err := ioutil.ReadAll(gr)
b, err := io.ReadAll(gr)
gr.Close()
if err != nil {
return nil, ObjectInfo{}, err

View File

@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"math/rand"
"os"
@ -35,7 +34,7 @@ import (
"time"
"github.com/minio/cli"
minio "github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7"
"github.com/minio/minio-go/v7/pkg/credentials"
"github.com/minio/minio/internal/auth"
"github.com/minio/minio/internal/bucket/bandwidth"
@ -516,7 +515,7 @@ func serverMain(ctx *cli.Context) {
UseIdleTimeout(ctx.Duration("idle-timeout")).
UseReadHeaderTimeout(ctx.Duration("read-header-timeout")).
UseBaseContext(GlobalContext).
UseCustomLogger(log.New(ioutil.Discard, "", 0)) // Turn-off random logging by Go stdlib
UseCustomLogger(log.New(io.Discard, "", 0)) // Turn-off random logging by Go stdlib
go func() {
globalHTTPServerErrorCh <- httpServer.Start(GlobalContext)

View File

@ -23,7 +23,6 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@ -34,7 +33,7 @@ import (
"testing"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/dustin/go-humanize"
"github.com/minio/minio-go/v7/pkg/set"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/pkg/bucket/policy"
@ -65,7 +64,7 @@ func (c *check) Assert(gotValue interface{}, expectedValue interface{}) {
}
func verifyError(c *check, response *http.Response, code, description string, statusCode int) {
data, err := ioutil.ReadAll(response.Body)
data, err := io.ReadAll(response.Body)
c.Assert(err, nil)
errorResponse := APIErrorResponse{}
err = xml.Unmarshal(data, &errorResponse)
@ -384,7 +383,7 @@ func (s *TestSuiteCommon) TestBucketPolicy(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
bucketPolicyReadBuf, err := ioutil.ReadAll(response.Body)
bucketPolicyReadBuf, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Verify if downloaded policy matches with previously uploaded.
expectedPolicy, err := policy.ParseConfig(strings.NewReader(bucketPolicyStr), bucketName)
@ -593,7 +592,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
deleteResp := DeleteObjectsResponse{}
delRespBytes, err := ioutil.ReadAll(response.Body)
delRespBytes, err := io.ReadAll(response.Body)
c.Assert(err, nil)
err = xml.Unmarshal(delRespBytes, &deleteResp)
c.Assert(err, nil)
@ -616,7 +615,7 @@ func (s *TestSuiteCommon) TestDeleteMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
deleteResp = DeleteObjectsResponse{}
delRespBytes, err = ioutil.ReadAll(response.Body)
delRespBytes, err = io.ReadAll(response.Body)
c.Assert(err, nil)
err = xml.Unmarshal(delRespBytes, &deleteResp)
c.Assert(err, nil)
@ -756,7 +755,7 @@ func (s *TestSuiteCommon) TestEmptyObject(c *check) {
var buffer bytes.Buffer
// extract the body of the response.
responseBody, err := ioutil.ReadAll(response.Body)
responseBody, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// assert the http response body content.
c.Assert(true, bytes.Equal(responseBody, buffer.Bytes()))
@ -877,7 +876,7 @@ func (s *TestSuiteCommon) TestMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
// extract the response body.
responseBody, err := ioutil.ReadAll(response.Body)
responseBody, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// assert the content body for the expected object data.
c.Assert(true, bytes.Equal(responseBody, []byte("hello one")))
@ -906,7 +905,7 @@ func (s *TestSuiteCommon) TestMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
// verify response data
responseBody, err = ioutil.ReadAll(response.Body)
responseBody, err = io.ReadAll(response.Body)
c.Assert(err, nil)
c.Assert(true, bytes.Equal(responseBody, []byte("hello two")))
@ -933,7 +932,7 @@ func (s *TestSuiteCommon) TestMultipleObjects(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
// verify object.
responseBody, err = ioutil.ReadAll(response.Body)
responseBody, err = io.ReadAll(response.Body)
c.Assert(err, nil)
c.Assert(true, bytes.Equal(responseBody, []byte("hello three")))
}
@ -1059,7 +1058,7 @@ func (s *TestSuiteCommon) TestCopyObject(c *check) {
c.Assert(response.StatusCode, http.StatusOK)
// reading the response body.
// response body is expected to have the copied content of the first uploaded object.
object, err := ioutil.ReadAll(response.Body)
object, err := io.ReadAll(response.Body)
c.Assert(err, nil)
c.Assert(string(object), "hello world")
}
@ -1233,7 +1232,7 @@ func (s *TestSuiteCommon) TestSHA256Mismatch(c *check) {
// Set the body to generate signature mismatch.
helloReader := bytes.NewReader([]byte("Hello, World"))
request.ContentLength = helloReader.Size()
request.Body = ioutil.NopCloser(helloReader)
request.Body = io.NopCloser(helloReader)
c.Assert(err, nil)
// execute the HTTP request.
@ -1562,7 +1561,7 @@ func (s *TestSuiteCommon) TestPartialContent(c *check) {
response, err = s.client.Do(request)
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusPartialContent)
partialObject, err := ioutil.ReadAll(response.Body)
partialObject, err := io.ReadAll(response.Body)
c.Assert(err, nil)
c.Assert(string(partialObject), "Wo")
@ -1628,7 +1627,7 @@ func (s *TestSuiteCommon) TestListObjectsHandler(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
for _, expectedStr := range testCase.expectedStrings {
@ -1703,7 +1702,7 @@ func (s *TestSuiteCommon) TestListObjectsSpecialCharactersHandler(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
for _, expectedStr := range testCase.expectedStrings {
@ -1837,7 +1836,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge10MiB(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
// extract the content from response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Compare putContent and getContent.
@ -1898,7 +1897,7 @@ func (s *TestSuiteCommon) TestGetObjectLarge11MiB(c *check) {
c.Assert(err, nil)
c.Assert(response.StatusCode, http.StatusOK)
// fetch the content from response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Get etag of the response content.
@ -1986,7 +1985,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectMisAligned(c *check) {
// Since only part of the object is requested, expecting response status to be http.StatusPartialContent .
c.Assert(response.StatusCode, http.StatusPartialContent)
// parse the HTTP response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Compare putContent and getContent.
@ -2052,7 +2051,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectLarge11MiB(c *check) {
// Since only part of the object is requested, expecting response status to be http.StatusPartialContent .
c.Assert(response.StatusCode, http.StatusPartialContent)
// read the downloaded content from the response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Compare putContent and getContent.
@ -2119,7 +2118,7 @@ func (s *TestSuiteCommon) TestGetPartialObjectLarge10MiB(c *check) {
// Since only part of the object is requested, expecting response status to be http.StatusPartialContent .
c.Assert(response.StatusCode, http.StatusPartialContent)
// read the downloaded content from the response body.
getContent, err := ioutil.ReadAll(response.Body)
getContent, err := io.ReadAll(response.Body)
c.Assert(err, nil)
// Compare putContent and getContent.

View File

@ -22,7 +22,6 @@ import (
"crypto/hmac"
"encoding/hex"
"io"
"io/ioutil"
"net/http"
"strconv"
"strings"
@ -83,12 +82,12 @@ func skipContentSha256Cksum(r *http.Request) bool {
// Returns SHA256 for calculating canonical-request.
func getContentSha256Cksum(r *http.Request, stype serviceType) string {
if stype == serviceSTS {
payload, err := ioutil.ReadAll(io.LimitReader(r.Body, stsRequestBodyLimit))
payload, err := io.ReadAll(io.LimitReader(r.Body, stsRequestBodyLimit))
if err != nil {
logger.CriticalIf(GlobalContext, err)
}
sum256 := sha256.Sum256(payload)
r.Body = ioutil.NopCloser(bytes.NewReader(payload))
r.Body = io.NopCloser(bytes.NewReader(payload))
return hex.EncodeToString(sum256[:])
}

View File

@ -20,7 +20,7 @@ package cmd
import (
"bytes"
"encoding/gob"
"io/ioutil"
"io"
"testing"
"time"
@ -125,7 +125,7 @@ func BenchmarkEncodeDiskInfoMsgp(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := msgp.Encode(ioutil.Discard, &v)
err := msgp.Encode(io.Discard, &v)
if err != nil {
b.Fatal(err)
}
@ -146,7 +146,7 @@ func BenchmarkEncodeDiskInfoGOB(b *testing.B) {
Error: "",
}
enc := gob.NewEncoder(ioutil.Discard)
enc := gob.NewEncoder(io.Discard)
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()
@ -200,7 +200,7 @@ func BenchmarkEncodeFileInfoMsgp(b *testing.B) {
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := msgp.Encode(ioutil.Discard, &v)
err := msgp.Encode(io.Discard, &v)
if err != nil {
b.Fatal(err)
}
@ -209,7 +209,7 @@ func BenchmarkEncodeFileInfoMsgp(b *testing.B) {
func BenchmarkEncodeFileInfoGOB(b *testing.B) {
v := FileInfo{Volume: "testbucket", Name: "src/compress/zlib/reader_test.go", VersionID: "", IsLatest: true, Deleted: false, DataDir: "5e0153cc-621a-4267-8cb6-4919140d53b3", XLV1: false, ModTime: UTCNow(), Size: 3430, Mode: 0x0, Metadata: map[string]string{"X-Minio-Internal-Server-Side-Encryption-Iv": "jIJPsrkkVYYMvc7edBrNl+7zcM7+ZwXqMb/YAjBO/ck=", "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Key-Id": "my-minio-key", "X-Minio-Internal-Server-Side-Encryption-S3-Kms-Sealed-Key": "IAAfAP2p7ZLv3UpLwBnsKkF2mtWba0qoY42tymK0szRgGvAxBNcXyHXYooe9dQpeeEJWgKUa/8R61oCy1mFwIg==", "X-Minio-Internal-Server-Side-Encryption-S3-Sealed-Key": "IAAfAPFYRDkHVirJBJxBixNj3PLWt78dFuUTyTLIdLG820J7XqLPBO4gpEEEWw/DoTsJIb+apnaem+rKtQ1h3Q==", "X-Minio-Internal-Server-Side-Encryption-Seal-Algorithm": "DAREv2-HMAC-SHA256", "content-type": "application/octet-stream", "etag": "20000f00e2c3709dc94905c6ce31e1cadbd1c064e14acdcd44cf0ac2db777eeedd88d639fcd64de16851ade8b21a9a1a"}, Parts: []ObjectPartInfo{{ETag: "", Number: 1, Size: 3430, ActualSize: 3398}}, Erasure: ErasureInfo{Algorithm: "reedsolomon", DataBlocks: 2, ParityBlocks: 2, BlockSize: 10485760, Index: 3, Distribution: []int{3, 4, 1, 2}, Checksums: []ChecksumInfo{{PartNumber: 1, Algorithm: 0x3, Hash: []uint8{}}}}}
enc := gob.NewEncoder(ioutil.Discard)
enc := gob.NewEncoder(io.Discard)
b.SetBytes(1)
b.ReportAllocs()
b.ResetTimer()

View File

@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/url"
"path"
"strconv"
@ -363,7 +362,7 @@ func (client *storageRESTClient) CreateFile(ctx context.Context, volume, path st
values.Set(storageRESTVolume, volume)
values.Set(storageRESTFilePath, path)
values.Set(storageRESTLength, strconv.Itoa(int(size)))
respBody, err := client.call(ctx, storageRESTMethodCreateFile, values, ioutil.NopCloser(reader), size)
respBody, err := client.call(ctx, storageRESTMethodCreateFile, values, io.NopCloser(reader), size)
defer xhttp.DrainBody(respBody)
if err != nil {
return err
@ -541,7 +540,7 @@ func (client *storageRESTClient) ReadAll(ctx context.Context, volume string, pat
return nil, err
}
defer xhttp.DrainBody(respBody)
return ioutil.ReadAll(respBody)
return io.ReadAll(respBody)
}
// ReadFileStream - returns a reader for the requested file.

View File

@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os/user"
"path"
@ -943,7 +942,7 @@ func waitForHTTPResponse(respBody io.Reader) (io.Reader, error) {
case 0:
return reader, nil
case 1:
errorText, err := ioutil.ReadAll(reader)
errorText, err := io.ReadAll(reader)
if err != nil {
return nil, err
}
@ -1077,7 +1076,7 @@ func waitForHTTPStream(respBody io.ReadCloser, w io.Writer) error {
}
return err
case 1:
errorText, err := ioutil.ReadAll(respBody)
errorText, err := io.ReadAll(respBody)
if err != nil {
return err
}

View File

@ -38,7 +38,6 @@ import (
"flag"
"fmt"
"io"
"io/ioutil"
"math/big"
"math/rand"
"net"
@ -100,7 +99,7 @@ func TestMain(m *testing.M) {
globalIsDistErasure = false
// Disable printing console messages during tests.
color.Output = ioutil.Discard
color.Output = io.Discard
// Minimum is error logs for testing
logger.MinimumLogLevel = logger.ErrorLvl
@ -530,12 +529,12 @@ func truncateChunkByHalfSigv4(req *http.Request) (*http.Request, error) {
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
hexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
newChunk, err := io.ReadAll(bufReader)
if err != nil {
return nil, err
}
newReq := req
newReq.Body = ioutil.NopCloser(
newReq.Body = io.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk[:len(newChunk)/2]},
[]byte(""))),
)
@ -552,14 +551,14 @@ func malformDataSigV4(req *http.Request, newByte byte) (*http.Request, error) {
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
hexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
newChunk, err := io.ReadAll(bufReader)
if err != nil {
return nil, err
}
newChunk[0] = newByte
newReq := req
newReq.Body = ioutil.NopCloser(
newReq.Body = io.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk},
[]byte(""))),
)
@ -579,13 +578,13 @@ func malformChunkSizeSigV4(req *http.Request, badSize int64) (*http.Request, err
newHexChunkSize := []byte(fmt.Sprintf("%x", n))
newChunkHdr := []byte(fmt.Sprintf("%s"+s3ChunkSignatureStr+"%s\r\n",
newHexChunkSize, chunkSignature))
newChunk, err := ioutil.ReadAll(bufReader)
newChunk, err := io.ReadAll(bufReader)
if err != nil {
return nil, err
}
newReq := req
newReq.Body = ioutil.NopCloser(
newReq.Body = io.NopCloser(
bytes.NewReader(bytes.Join([][]byte{newChunkHdr, newChunk},
[]byte(""))),
)
@ -711,10 +710,10 @@ func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64,
}
if body == nil {
// this is added to avoid panic during ioutil.ReadAll(req.Body).
// this is added to avoid panic during io.ReadAll(req.Body).
// th stack trace can be found here https://github.com/minio/minio/pull/2074 .
// This is very similar to https://github.com/golang/go/issues/7527.
req.Body = ioutil.NopCloser(bytes.NewReader([]byte("")))
req.Body = io.NopCloser(bytes.NewReader([]byte("")))
}
contentLength := calculateStreamContentLength(dataLength, chunkSize)
@ -728,7 +727,7 @@ func newTestStreamingRequest(method, urlStr string, dataLength, chunkSize int64,
body.Seek(0, 0)
// Add body
req.Body = ioutil.NopCloser(body)
req.Body = io.NopCloser(body)
req.ContentLength = contentLength
return req, nil
@ -779,7 +778,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in
}
}
req.Body = ioutil.NopCloser(bytes.NewReader(stream))
req.Body = io.NopCloser(bytes.NewReader(stream))
return req, nil
}
@ -1077,7 +1076,7 @@ func newTestRequest(method, urlStr string, contentLength int64, body io.ReadSeek
case body == nil:
hashedPayload = getSHA256Hash([]byte{})
default:
payloadBytes, err := ioutil.ReadAll(body)
payloadBytes, err := io.ReadAll(body)
if err != nil {
return nil, err
}
@ -1455,7 +1454,7 @@ func getListenNotificationURL(endPoint, bucketName string, prefixes, suffixes, e
func getRandomDisks(N int) ([]string, error) {
var erasureDisks []string
for i := 0; i < N; i++ {
path, err := ioutil.TempDir(globalTestTmpDir, "minio-")
path, err := os.MkdirTemp(globalTestTmpDir, "minio-")
if err != nil {
// Remove directories created so far.
removeRoots(erasureDisks)
@ -1586,14 +1585,14 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
rec := httptest.NewRecorder()
// reading the body to preserve it so that it can be used again for second attempt of sending unsigned HTTP request.
// If the body is read in the handler the same request cannot be made use of.
buf, err := ioutil.ReadAll(anonReq.Body)
buf, err := io.ReadAll(anonReq.Body)
if err != nil {
t.Fatal(failTestStr(anonTestStr, err.Error()))
}
// creating 2 read closer (to set as request body) from the body content.
readerOne := ioutil.NopCloser(bytes.NewBuffer(buf))
readerTwo := ioutil.NopCloser(bytes.NewBuffer(buf))
readerOne := io.NopCloser(bytes.NewBuffer(buf))
readerTwo := io.NopCloser(bytes.NewBuffer(buf))
anonReq.Body = readerOne
@ -1610,7 +1609,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
if anonReq.Method != http.MethodHead {
// read the response body.
var actualContent []byte
actualContent, err = ioutil.ReadAll(rec.Body)
actualContent, err = io.ReadAll(rec.Body)
if err != nil {
t.Fatal(failTestStr(anonTestStr, fmt.Sprintf("Failed parsing response body: <ERROR> %v", err)))
}
@ -1640,7 +1639,7 @@ func ExecObjectLayerAPIAnonTest(t *testing.T, obj ObjectLayer, testName, bucketN
// verify the response body for `ErrAccessDenied` message =.
if anonReq.Method != http.MethodHead {
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
actualContent, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatal(failTestStr(unknownSignTestStr, fmt.Sprintf("Failed parsing response body: <ERROR> %v", err)))
}
@ -1692,7 +1691,7 @@ func ExecObjectLayerAPINilTest(t TestErrHandler, bucketName, objectName, instanc
// for other type of HTTP requests compare the response body content with the expected one.
if req.Method != http.MethodHead {
// read the response body.
actualContent, err := ioutil.ReadAll(rec.Body)
actualContent, err := io.ReadAll(rec.Body)
if err != nil {
t.Fatalf("MinIO %s: Failed parsing response body: <ERROR> %v", instanceType, err)
}
@ -2234,7 +2233,7 @@ func uploadTestObject(t *testing.T, apiRouter http.Handler, creds auth.Credentia
checkRespErr := func(rec *httptest.ResponseRecorder, exp int) {
if rec.Code != exp {
b, err := ioutil.ReadAll(rec.Body)
b, err := io.ReadAll(rec.Body)
t.Fatalf("Expected: %v, Got: %v, Body: %s, err: %v", exp, rec.Code, string(b), err)
}
}

View File

@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@ -334,7 +333,7 @@ func downloadReleaseURL(u *url.URL, timeout time.Duration, mode string) (content
}
}
contentBytes, err := ioutil.ReadAll(resp.Body)
contentBytes, err := io.ReadAll(resp.Body)
if err != nil {
return content, AdminError{
Code: AdminUpdateUnexpectedFailure,
@ -522,7 +521,7 @@ func downloadBinary(u *url.URL, mode string) (readerReturn []byte, err error) {
}
// convert a Reader to bytes
binaryFile, err := ioutil.ReadAll(reader)
binaryFile, err := io.ReadAll(reader)
if err != nil {
return nil, err
}

View File

@ -20,7 +20,6 @@ package cmd
import (
"encoding/hex"
"fmt"
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
@ -233,7 +232,7 @@ func TestIsKubernetes(t *testing.T) {
// Tests if the environment we are running is Helm chart.
func TestGetHelmVersion(t *testing.T) {
createTempFile := func(content string) string {
tmpfile, err := ioutil.TempFile("", "helm-testfile-")
tmpfile, err := os.CreateTemp("", "helm-testfile-")
if err != nil {
t.Fatalf("Unable to create temporary file. %s", err)
}

View File

@ -26,7 +26,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/url"
@ -301,7 +300,7 @@ func startProfiler(profilerType string) (minioProfiler, error) {
// library creates to store profiling data.
switch madmin.ProfilerType(profilerType) {
case madmin.ProfilerCPU:
dirPath, err := ioutil.TempDir("", "profile")
dirPath, err := os.MkdirTemp("", "profile")
if err != nil {
return nil, err
}
@ -331,7 +330,7 @@ func startProfiler(profilerType string) (minioProfiler, error) {
if n := runtime.NumGoroutine(); n > 10000 && !globalIsCICD {
return nil, fmt.Errorf("unable to perform CPU IO profile with %d goroutines", n)
}
dirPath, err := ioutil.TempDir("", "profile")
dirPath, err := os.MkdirTemp("", "profile")
if err != nil {
return nil, err
}
@ -396,7 +395,7 @@ func startProfiler(profilerType string) (minioProfiler, error) {
return buf.Bytes(), err
}
case madmin.ProfilerTrace:
dirPath, err := ioutil.TempDir("", "profile")
dirPath, err := os.MkdirTemp("", "profile")
if err != nil {
return nil, err
}
@ -1184,7 +1183,7 @@ func MockOpenIDTestUserInteraction(ctx context.Context, pro OpenIDClientAppParam
return "", fmt.Errorf("request err: %v", err)
}
// {
// bodyBuf, err := ioutil.ReadAll(resp.Body)
// bodyBuf, err := io.ReadAll(resp.Body)
// if err != nil {
// return "", fmt.Errorf("Error reading body: %v", err)
// }
@ -1206,7 +1205,7 @@ func MockOpenIDTestUserInteraction(ctx context.Context, pro OpenIDClientAppParam
return "", fmt.Errorf("post form err: %v", err)
}
// fmt.Printf("resp: %#v %#v\n", resp.StatusCode, resp.Header)
// bodyBuf, err := ioutil.ReadAll(resp.Body)
// bodyBuf, err := io.ReadAll(resp.Body)
// if err != nil {
// return "", fmt.Errorf("Error reading body: %v", err)
// }

View File

@ -22,7 +22,6 @@ import (
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"os"
@ -343,7 +342,7 @@ func TestJSONLoad(t *testing.T) {
// Test jsonSave.
func TestJSONSave(t *testing.T) {
f, err := ioutil.TempFile("", "")
f, err := os.CreateTemp("", "")
if err != nil {
t.Fatal(err)
}

View File

@ -37,7 +37,6 @@ import (
"github.com/klauspost/compress/zstd"
"github.com/minio/minio/internal/bucket/lifecycle"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/ioutil"
)
func TestReadXLMetaNoData(t *testing.T) {
@ -405,7 +404,7 @@ func TestDeleteVersionWithSharedDataDir(t *testing.T) {
}
func Benchmark_mergeXLV2Versions(b *testing.B) {
data, err := ioutil.ReadFile("testdata/xl.meta-v1.2.zst")
data, err := os.ReadFile("testdata/xl.meta-v1.2.zst")
if err != nil {
b.Fatal(err)
}
@ -454,7 +453,7 @@ func Benchmark_mergeXLV2Versions(b *testing.B) {
}
func Benchmark_xlMetaV2Shallow_Load(b *testing.B) {
data, err := ioutil.ReadFile("testdata/xl.meta-v1.2.zst")
data, err := os.ReadFile("testdata/xl.meta-v1.2.zst")
if err != nil {
b.Fatal(err)
}
@ -501,7 +500,7 @@ func Benchmark_xlMetaV2Shallow_Load(b *testing.B) {
func Test_xlMetaV2Shallow_Load(t *testing.T) {
// Load Legacy
data, err := ioutil.ReadFile("testdata/xl.meta-v1.2.zst")
data, err := os.ReadFile("testdata/xl.meta-v1.2.zst")
if err != nil {
t.Fatal(err)
}
@ -561,7 +560,7 @@ func Test_xlMetaV2Shallow_Load(t *testing.T) {
}
func Test_mergeXLV2Versions(t *testing.T) {
dataZ, err := ioutil.ReadFile("testdata/xl-meta-consist.zip")
dataZ, err := os.ReadFile("testdata/xl-meta-consist.zip")
if err != nil {
t.Fatal(err)
}

View File

@ -24,7 +24,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/url"
"os"
pathutil "path"
@ -379,7 +378,7 @@ func (s *xlStorage) SetDiskLoc(poolIdx, setIdx, diskIdx int) {
func (s *xlStorage) Healing() *healingTracker {
healingFile := pathJoin(s.diskPath, minioMetaBucket,
bucketMetaPrefix, healingTrackerFilename)
b, err := ioutil.ReadFile(healingFile)
b, err := os.ReadFile(healingFile)
if err != nil {
return nil
}
@ -676,7 +675,7 @@ func (s *xlStorage) GetDiskID() (string, error) {
}
formatFile := pathJoin(s.diskPath, minioMetaBucket, formatConfigFile)
b, err := ioutil.ReadFile(formatFile)
b, err := os.ReadFile(formatFile)
if err != nil {
// If the disk is still not initialized.
if osIsNotExist(err) {
@ -1471,7 +1470,7 @@ func (s *xlStorage) readAllData(ctx context.Context, volumeDir string, filePath
// Get size for precise allocation.
stat, err := f.Stat()
if err != nil {
buf, err = ioutil.ReadAll(r)
buf, err = io.ReadAll(r)
return buf, dmTime, osErrToFileErr(err)
}
if stat.IsDir() {

View File

@ -23,7 +23,6 @@ import (
"crypto/rand"
"fmt"
"io"
"io/ioutil"
"os"
slashpath "path"
"runtime"
@ -145,7 +144,7 @@ func createPermDeniedFile(t *testing.T) (permDeniedDir string) {
t.Fatalf(fmt.Sprintf("Unable to create temporary directory %v. %v", slashpath.Join(permDeniedDir, "mybucket"), err))
}
if err = ioutil.WriteFile(slashpath.Join(permDeniedDir, "mybucket", "myobject"), []byte(""), 0o400); err != nil {
if err = os.WriteFile(slashpath.Join(permDeniedDir, "mybucket", "myobject"), []byte(""), 0o400); err != nil {
t.Fatalf(fmt.Sprintf("Unable to create file %v. %v", slashpath.Join(permDeniedDir, "mybucket", "myobject"), err))
}
@ -197,7 +196,7 @@ func TestXLStorageIsDirEmpty(t *testing.T) {
// Should give false for not-a-directory.
dir2 := slashpath.Join(tmp, "file")
err := ioutil.WriteFile(dir2, []byte("hello"), 0o777)
err := os.WriteFile(dir2, []byte("hello"), 0o777)
if err != nil {
t.Fatal(err)
}
@ -254,7 +253,7 @@ func TestXLStorageReadVersion(t *testing.T) {
t.Fatalf("Unable to cfgreate xlStorage test setup, %s", err)
}
xlMeta, _ := ioutil.ReadFile("testdata/xl.meta")
xlMeta, _ := os.ReadFile("testdata/xl.meta")
// Create files for the test cases.
if err = xlStorage.MakeVol(context.Background(), "exists"); err != nil {
@ -472,7 +471,7 @@ func TestXLStorageMakeVol(t *testing.T) {
// Setup test environment.
// Create a file.
if err := ioutil.WriteFile(slashpath.Join(path, "vol-as-file"), []byte{}, os.ModePerm); err != nil {
if err := os.WriteFile(slashpath.Join(path, "vol-as-file"), []byte{}, os.ModePerm); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
// Create a directory.
@ -566,7 +565,7 @@ func TestXLStorageDeleteVol(t *testing.T) {
if err = os.Mkdir(vol, 0o777); err != nil {
t.Fatalf("Unable to create directory, %s", err)
}
if err = ioutil.WriteFile(slashpath.Join(vol, "test-file"), []byte{}, os.ModePerm); err != nil {
if err = os.WriteFile(slashpath.Join(vol, "test-file"), []byte{}, os.ModePerm); err != nil {
t.Fatalf("Unable to create file, %s", err)
}
@ -1328,7 +1327,7 @@ func TestXLStorageFormatFileChange(t *testing.T) {
}
// Change the format.json such that "this" is changed to "randomid".
if err = ioutil.WriteFile(pathJoin(xlStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0o644); err != nil {
if err = os.WriteFile(pathJoin(xlStorage.String(), minioMetaBucket, formatConfigFile), []byte(`{"version":"1","format":"xl","id":"592a41c2-b7cc-4130-b883-c4b5cb15965b","xl":{"version":"3","this":"randomid","sets":[["e07285a6-8c73-4962-89c6-047fb939f803","33b8d431-482d-4376-b63c-626d229f0a29","cff6513a-4439-4dc1-bcaa-56c9e880c352","randomid","9c9f21d5-1f15-4737-bce6-835faa0d9626","0a59b346-1424-4fc2-9fa2-a2e80541d0c1","7924a3dc-b69a-4971-9a2e-014966d6aebb","4d2b8dd9-4e48-444b-bdca-c89194b26042"]],"distributionAlgo":"CRCMOD"}}`), 0o644); err != nil {
t.Fatalf("ioutil.WriteFile failed with %s", err)
}

View File

@ -24,7 +24,6 @@ import (
"flag"
"fmt"
"hash/crc32"
"io/ioutil"
"log"
"os"
"strings"
@ -97,7 +96,7 @@ func main() {
if file != "" {
distrib := make([][]string, setCount)
b, err := ioutil.ReadFile(file)
b, err := os.ReadFile(file)
if err != nil {
log.Fatalln(err)
}

View File

@ -22,7 +22,7 @@ import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"io"
"log"
"os"
"strings"
@ -79,7 +79,7 @@ FLAGS:
return err
}
b, err := ioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
return err
}
@ -105,7 +105,7 @@ FLAGS:
fmt.Println(string(b))
return nil
}
b, err := ioutil.ReadFile(file)
b, err := os.ReadFile(file)
if err != nil {
return err
}

View File

@ -27,7 +27,6 @@ import (
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"log"
"os"
"strings"
@ -49,7 +48,7 @@ func main() {
File string `json:"file"`
Key string `json:"key"`
}{}
got, err := ioutil.ReadAll(os.Stdin)
got, err := io.ReadAll(os.Stdin)
if err != nil {
fatalErr(err)
}

View File

@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"log"
"os"
"strings"
@ -80,7 +79,7 @@ FLAGS:
app.Action = func(c *cli.Context) error {
ndjson := c.Bool("ndjson")
decode := func(r io.Reader, file string) ([]byte, error) {
b, err := ioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
return nil, err
}
@ -189,7 +188,7 @@ FLAGS:
}
}, file)
err := data.files(func(name string, data []byte) {
err = ioutil.WriteFile(fmt.Sprintf("%s-%s.data", file, name), data, os.ModePerm)
err = os.WriteFile(fmt.Sprintf("%s-%s.data", file, name), data, os.ModePerm)
if err != nil {
fmt.Println(err)
}

View File

@ -23,7 +23,7 @@ package main
import (
"encoding/json"
"fmt"
"io/ioutil"
"io"
"log"
"net/http"
"strings"
@ -41,7 +41,7 @@ type Result struct {
}
func mainHandler(w http.ResponseWriter, r *http.Request) {
body, err := ioutil.ReadAll(r.Body)
body, err := io.ReadAll(r.Body)
if err != nil {
writeErrorResponse(w, err)
return

View File

@ -24,7 +24,7 @@ import (
"context"
"flag"
"fmt"
"io/ioutil"
"io"
"log"
"net/url"
"os"
@ -85,7 +85,7 @@ func main() {
if f, err := os.Open(sessionPolicyFile); err != nil {
log.Fatalf("Unable to open session policy file: %v", err)
} else {
bs, err := ioutil.ReadAll(f)
bs, err := io.ReadAll(f)
if err != nil {
log.Fatalf("Error reading session policy file: %v", err)
}

View File

@ -24,7 +24,7 @@ import (
"context"
"flag"
"fmt"
"io/ioutil"
"io"
"log"
"net/url"
"os"
@ -82,7 +82,7 @@ func main() {
if f, err := os.Open(sessionPolicyFile); err != nil {
log.Fatalf("Unable to open session policy file: %v", sessionPolicyFile, err)
} else {
bs, err := ioutil.ReadAll(f)
bs, err := io.ReadAll(f)
if err != nil {
log.Fatalf("Error reading session policy file: %v", err)
}

View File

@ -23,7 +23,7 @@ import (
"crypto/x509"
"encoding/pem"
"errors"
"io/ioutil"
"os"
"github.com/minio/pkg/env"
)
@ -37,7 +37,7 @@ const EnvCertPassword = "MINIO_CERT_PASSWD"
func ParsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err error) {
// Read certificate file.
var data []byte
if data, err = ioutil.ReadFile(certFile); err != nil {
if data, err = os.ReadFile(certFile); err != nil {
return nil, err
}
@ -71,11 +71,11 @@ func ParsePublicCertFile(certFile string) (x509Certs []*x509.Certificate, err er
// from the provided paths. The private key may be encrypted and is
// decrypted using the ENV_VAR: MINIO_CERT_PASSWD.
func LoadX509KeyPair(certFile, keyFile string) (tls.Certificate, error) {
certPEMBlock, err := ioutil.ReadFile(certFile)
certPEMBlock, err := os.ReadFile(certFile)
if err != nil {
return tls.Certificate{}, ErrSSLUnexpectedError(err)
}
keyPEMBlock, err := ioutil.ReadFile(keyFile)
keyPEMBlock, err := os.ReadFile(keyFile)
if err != nil {
return tls.Certificate{}, ErrSSLUnexpectedError(err)
}

View File

@ -18,7 +18,6 @@
package config
import (
"io/ioutil"
"os"
"testing"
)
@ -26,7 +25,7 @@ import (
func createTempFile(prefix, content string) (tempFile string, err error) {
var tmpfile *os.File
if tmpfile, err = ioutil.TempFile("", prefix); err != nil {
if tmpfile, err = os.CreateTemp("", prefix); err != nil {
return tempFile, err
}

View File

@ -21,7 +21,6 @@ import (
"bytes"
"encoding/hex"
"io"
"io/ioutil"
"testing"
"github.com/minio/minio/internal/kms"
@ -64,7 +63,7 @@ func TestEncryptDecrypt(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to encrypt stream: %v", i, err)
}
data, err := ioutil.ReadAll(ciphertext)
data, err := io.ReadAll(ciphertext)
if err != nil {
t.Fatalf("Test %d: failed to encrypt stream: %v", i, err)
}
@ -73,7 +72,7 @@ func TestEncryptDecrypt(t *testing.T) {
if err != nil {
t.Fatalf("Test %d: failed to decrypt stream: %v", i, err)
}
data, err = ioutil.ReadAll(plaintext)
data, err = io.ReadAll(plaintext)
if err != nil {
t.Fatalf("Test %d: failed to decrypt stream: %v", i, err)
}
@ -106,7 +105,7 @@ func BenchmarkEncrypt(b *testing.B) {
if err != nil {
b.Fatal(err)
}
if _, err = io.Copy(ioutil.Discard, ciphertext); err != nil {
if _, err = io.Copy(io.Discard, ciphertext); err != nil {
b.Fatal(err)
}
plaintext.Reset(data)

View File

@ -21,7 +21,6 @@ import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/minio/minio/internal/config"
@ -194,7 +193,7 @@ func (o *Opa) IsAllowed(args iampolicy.Args) (bool, error) {
defer o.args.CloseRespFn(resp.Body)
// Read the body to be saved later.
opaRespBytes, err := ioutil.ReadAll(resp.Body)
opaRespBytes, err := io.ReadAll(resp.Body)
if err != nil {
return false, err
}

View File

@ -21,7 +21,6 @@ import (
"bytes"
"encoding/json"
"io"
"io/ioutil"
"net/http"
"github.com/minio/minio/internal/config"
@ -188,7 +187,7 @@ func (o *AuthZPlugin) IsAllowed(args iampolicy.Args) (bool, error) {
defer o.args.CloseRespFn(resp.Body)
// Read the body to be saved later.
opaRespBytes, err := ioutil.ReadAll(resp.Body)
opaRespBytes, err := io.ReadAll(resp.Body)
if err != nil {
return false, err
}

View File

@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"time"
@ -62,7 +61,7 @@ func (c Config) Post(reqURL string, payload interface{}) (string, error) {
}
defer xhttp.DrainBody(resp.Body)
respBytes, err := ioutil.ReadAll(io.LimitReader(resp.Body, respBodyLimit))
respBytes, err := io.ReadAll(io.LimitReader(resp.Body, respBodyLimit))
if err != nil {
return "", err
}

View File

@ -19,7 +19,7 @@ package dsync
import (
"fmt"
"io/ioutil"
"io"
"net/http"
"net/http/httptest"
"sync"
@ -38,7 +38,7 @@ var (
)
func getLockArgs(r *http.Request) (args LockArgs, err error) {
buf, err := ioutil.ReadAll(r.Body)
buf, err := io.ReadAll(r.Body)
if err != nil {
return args, err
}

View File

@ -19,7 +19,6 @@ package etag
import (
"io"
"io/ioutil"
"net/http"
"strings"
"testing"
@ -139,7 +138,7 @@ var readerTests = []struct { // Reference values computed by: echo <content> | m
func TestReader(t *testing.T) {
for i, test := range readerTests {
reader := NewReader(strings.NewReader(test.Content), test.ETag)
if _, err := io.Copy(ioutil.Discard, reader); err != nil {
if _, err := io.Copy(io.Discard, reader); err != nil {
t.Fatalf("Test %d: read failed: %v", i, err)
}
if ETag := reader.ETag(); !Equal(ETag, test.ETag) {

View File

@ -24,7 +24,6 @@ import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@ -449,7 +448,7 @@ func (c *esClientV7) createIndex(args ElasticsearchArgs) error {
err := fmt.Errorf("Create index err: %s", res.String())
return err
}
io.Copy(ioutil.Discard, resp.Body)
io.Copy(io.Discard, resp.Body)
return nil
}
return nil
@ -462,7 +461,7 @@ func (c *esClientV7) ping(ctx context.Context, _ ElasticsearchArgs) (bool, error
if err != nil {
return false, errNotConnected
}
io.Copy(ioutil.Discard, resp.Body)
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
return !resp.IsError(), nil
}
@ -476,7 +475,7 @@ func (c *esClientV7) entryExists(ctx context.Context, index string, key string)
if err != nil {
return false, err
}
io.Copy(ioutil.Discard, res.Body)
io.Copy(io.Discard, res.Body)
res.Body.Close()
return !res.IsError(), nil
}
@ -493,7 +492,7 @@ func (c *esClientV7) removeEntry(ctx context.Context, index string, key string)
return err
}
defer res.Body.Close()
defer io.Copy(ioutil.Discard, res.Body)
defer io.Copy(io.Discard, res.Body)
if res.IsError() {
return fmt.Errorf("Delete err: %s", res.String())
}
@ -522,7 +521,7 @@ func (c *esClientV7) updateEntry(ctx context.Context, index string, key string,
return err
}
defer res.Body.Close()
defer io.Copy(ioutil.Discard, res.Body)
defer io.Copy(io.Discard, res.Body)
if res.IsError() {
return fmt.Errorf("Update err: %s", res.String())
}
@ -549,7 +548,7 @@ func (c *esClientV7) addEntry(ctx context.Context, index string, eventData event
return err
}
defer res.Body.Close()
defer io.Copy(ioutil.Discard, res.Body)
defer io.Copy(io.Discard, res.Body)
if res.IsError() {
return fmt.Errorf("Add err: %s", res.String())
}

View File

@ -19,7 +19,6 @@ package target
import (
"encoding/json"
"io/ioutil"
"math"
"os"
"path/filepath"
@ -87,7 +86,7 @@ func (store *QueueStore) write(key string, e event.Event) error {
}
path := filepath.Join(store.directory, key+eventExt)
if err := ioutil.WriteFile(path, eventData, os.FileMode(0o770)); err != nil {
if err := os.WriteFile(path, eventData, os.FileMode(0o770)); err != nil {
return err
}
@ -124,7 +123,7 @@ func (store *QueueStore) Get(key string) (event event.Event, err error) {
}(store)
var eventData []byte
eventData, err = ioutil.ReadFile(filepath.Join(store.directory, key+eventExt))
eventData, err = os.ReadFile(filepath.Join(store.directory, key+eventExt))
if err != nil {
return event, err
}
@ -179,14 +178,22 @@ func (store *QueueStore) List() ([]string, error) {
// list lock less.
func (store *QueueStore) list() ([]string, error) {
var names []string
files, err := ioutil.ReadDir(store.directory)
files, err := os.ReadDir(store.directory)
if err != nil {
return names, err
}
// Sort the dentries.
sort.Slice(files, func(i, j int) bool {
return files[i].ModTime().Before(files[j].ModTime())
ii, err := files[i].Info()
if err != nil {
return false
}
ji, err := files[j].Info()
if err != nil {
return true
}
return ii.ModTime().Before(ji.ModTime())
})
for _, file := range files {

View File

@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
@ -135,7 +134,7 @@ func (target *WebhookTarget) IsActive() (bool, error) {
}
return false, err
}
io.Copy(ioutil.Discard, resp.Body)
io.Copy(io.Discard, resp.Body)
resp.Body.Close()
// No network failure i.e response from the target means its up
return true, nil
@ -194,7 +193,7 @@ func (target *WebhookTarget) send(eventData event.Event) error {
return err
}
defer resp.Body.Close()
io.Copy(ioutil.Discard, resp.Body)
io.Copy(io.Discard, resp.Body)
if resp.StatusCode < 200 || resp.StatusCode > 299 {
target.Close()

View File

@ -22,7 +22,6 @@ import (
"encoding/hex"
"fmt"
"io"
"io/ioutil"
"testing"
)
@ -32,7 +31,7 @@ func TestHashReaderHelperMethods(t *testing.T) {
if err != nil {
t.Fatal(err)
}
_, err = io.Copy(ioutil.Discard, r)
_, err = io.Copy(io.Discard, r)
if err != nil {
t.Fatal(err)
}
@ -193,7 +192,7 @@ func TestHashReaderVerification(t *testing.T) {
if err != nil {
t.Fatalf("Test %q: Initializing reader failed %s", testCase.desc, err)
}
_, err = io.Copy(ioutil.Discard, r)
_, err = io.Copy(io.Discard, r)
if err != nil {
if err.Error() != testCase.err.Error() {
t.Errorf("Test %q: Expected error %s, got error %s", testCase.desc, testCase.err, err)

View File

@ -19,7 +19,6 @@ package http
import (
"io"
"io/ioutil"
)
// DrainBody close non nil response with any response Body.
@ -38,6 +37,6 @@ func DrainBody(respBody io.ReadCloser) {
// the same connection for future uses.
// - http://stackoverflow.com/a/17961593/4465767
defer respBody.Close()
io.Copy(ioutil.Discard, respBody)
io.Copy(io.Discard, respBody)
}
}

View File

@ -21,16 +21,16 @@ import (
"context"
"crypto/tls"
"errors"
"io/ioutil"
"log"
"net"
"net/http"
"os"
"runtime/pprof"
"sync"
"sync/atomic"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/dustin/go-humanize"
)
var (
@ -154,7 +154,7 @@ func (srv *Server) Shutdown() error {
select {
case <-shutdownTimer.C:
// Write all running goroutines.
tmp, err := ioutil.TempFile("", "minio-goroutines-*.txt")
tmp, err := os.CreateTemp("", "minio-goroutines-*.txt")
if err == nil {
_ = pprof.Lookup("goroutine").WriteTo(tmp, 1)
tmp.Close()

View File

@ -21,7 +21,6 @@ import (
"bytes"
"context"
"io"
goioutil "io/ioutil"
"os"
"testing"
"time"
@ -63,7 +62,7 @@ func TestDeadlineWriter(t *testing.T) {
}
func TestCloseOnWriter(t *testing.T) {
writer := WriteOnClose(goioutil.Discard)
writer := WriteOnClose(io.Discard)
if writer.HasWritten() {
t.Error("WriteOnCloser must not be marked as HasWritten")
}
@ -72,7 +71,7 @@ func TestCloseOnWriter(t *testing.T) {
t.Error("WriteOnCloser must be marked as HasWritten")
}
writer = WriteOnClose(goioutil.Discard)
writer = WriteOnClose(io.Discard)
writer.Close()
if !writer.HasWritten() {
t.Error("WriteOnCloser must be marked as HasWritten")
@ -81,7 +80,7 @@ func TestCloseOnWriter(t *testing.T) {
// Test for AppendFile.
func TestAppendFile(t *testing.T) {
f, err := goioutil.TempFile("", "")
f, err := os.CreateTemp("", "")
if err != nil {
t.Fatal(err)
}
@ -90,7 +89,7 @@ func TestAppendFile(t *testing.T) {
f.WriteString("aaaaaaaaaa")
f.Close()
f, err = goioutil.TempFile("", "")
f, err = os.CreateTemp("", "")
if err != nil {
t.Fatal(err)
}
@ -103,7 +102,7 @@ func TestAppendFile(t *testing.T) {
t.Error(err)
}
b, err := goioutil.ReadFile(name1)
b, err := os.ReadFile(name1)
if err != nil {
t.Error(err)
}
@ -130,7 +129,7 @@ func TestSkipReader(t *testing.T) {
}
for i, testCase := range testCases {
r := NewSkipReader(testCase.src, testCase.skipLen)
b, err := goioutil.ReadAll(r)
b, err := io.ReadAll(r)
if err != nil {
t.Errorf("Case %d: Unexpected err %v", i, err)
}
@ -141,7 +140,7 @@ func TestSkipReader(t *testing.T) {
}
func TestSameFile(t *testing.T) {
f, err := goioutil.TempFile("", "")
f, err := os.CreateTemp("", "")
if err != nil {
t.Errorf("Error creating tmp file: %v", err)
}
@ -159,7 +158,7 @@ func TestSameFile(t *testing.T) {
if !SameFile(fi1, fi2) {
t.Fatal("Expected the files to be same")
}
if err = goioutil.WriteFile(tmpFile, []byte("aaa"), 0o644); err != nil {
if err = os.WriteFile(tmpFile, []byte("aaa"), 0o644); err != nil {
t.Fatal(err)
}
fi2, err = os.Stat(tmpFile)

View File

@ -18,7 +18,6 @@
package lock
import (
"io/ioutil"
"os"
"testing"
"time"
@ -26,7 +25,7 @@ import (
// Test lock fails.
func TestLockFail(t *testing.T) {
f, err := ioutil.TempFile("", "lock")
f, err := os.CreateTemp("", "lock")
if err != nil {
t.Fatal(err)
}
@ -56,7 +55,7 @@ func TestLockDirFail(t *testing.T) {
// Tests rwlock methods.
func TestRWLockedFile(t *testing.T) {
f, err := ioutil.TempFile("", "lock")
f, err := os.CreateTemp("", "lock")
if err != nil {
t.Fatal(err)
}
@ -119,7 +118,7 @@ func TestRWLockedFile(t *testing.T) {
// Tests lock and unlock semantics.
func TestLockAndUnlock(t *testing.T) {
f, err := ioutil.TempFile("", "lock")
f, err := os.CreateTemp("", "lock")
if err != nil {
t.Fatal(err)
}

View File

@ -22,7 +22,6 @@ package mountinfo
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -40,7 +39,7 @@ func TestCrossDeviceMountPaths(t *testing.T) {
var err error
dir := t.TempDir()
mountsPath := filepath.Join(dir, "mounts")
if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil {
if err = os.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil {
t.Fatal(err)
}
// Failure case where we detected successfully cross device mounts.
@ -89,7 +88,7 @@ func TestCrossDeviceMount(t *testing.T) {
var err error
dir := t.TempDir()
mountsPath := filepath.Join(dir, "mounts")
if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil {
if err = os.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil {
t.Fatal(err)
}
mounts, err := readProcMounts(mountsPath)
@ -138,7 +137,7 @@ func TestReadProcmountInfos(t *testing.T) {
dir := t.TempDir()
mountsPath := filepath.Join(dir, "mounts")
if err = ioutil.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil {
if err = os.WriteFile(mountsPath, []byte(successCase), 0o666); err != nil {
t.Fatal(err)
}
// Verifies if reading each line worked properly.

View File

@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"math/rand"
"net/http"
"net/url"
@ -289,7 +288,7 @@ func (c *Client) Call(ctx context.Context, method string, values url.Values, bod
}
defer xhttp.DrainBody(resp.Body)
// Limit the ReadAll(), just in case, because of a bug, the server responds with large data.
b, err := ioutil.ReadAll(io.LimitReader(resp.Body, c.MaxErrResponseSize))
b, err := io.ReadAll(io.LimitReader(resp.Body, c.MaxErrResponseSize))
if err != nil {
if xnet.IsNetworkOrHostDown(err, c.ExpectTimeouts) {
if !c.NoMetrics {

View File

@ -21,7 +21,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"reflect"
"strings"
"testing"
@ -46,7 +46,7 @@ func TestRead(t *testing.T) {
var record sql.Record
var result bytes.Buffer
r, _ := NewReader(ioutil.NopCloser(strings.NewReader(c.content)), &ReaderArgs{
r, _ := NewReader(io.NopCloser(strings.NewReader(c.content)), &ReaderArgs{
FileHeaderInfo: none,
RecordDelimiter: c.recordDelimiter,
FieldDelimiter: c.fieldDelimiter,
@ -88,7 +88,7 @@ type tester interface {
}
func openTestFile(t tester, file string) []byte {
f, err := ioutil.ReadFile("testdata/testdata.zip")
f, err := os.ReadFile("testdata/testdata.zip")
if err != nil {
t.Fatal(err)
}
@ -103,7 +103,7 @@ func openTestFile(t tester, file string) []byte {
t.Fatal(err)
}
defer rc.Close()
b, err := ioutil.ReadAll(rc)
b, err := io.ReadAll(rc)
if err != nil {
t.Fatal(err)
}
@ -238,7 +238,7 @@ func TestReadExtended(t *testing.T) {
if !c.header {
args.FileHeaderInfo = none
}
r, _ := NewReader(ioutil.NopCloser(bytes.NewReader(input)), &args)
r, _ := NewReader(io.NopCloser(bytes.NewReader(input)), &args)
fields := 0
for {
record, err = r.Read(record)
@ -455,7 +455,7 @@ func TestReadFailures(t *testing.T) {
if c.sendErr != nil {
inr = io.MultiReader(inr, errReader{c.sendErr})
}
r, _ := NewReader(ioutil.NopCloser(inr), &args)
r, _ := NewReader(io.NopCloser(inr), &args)
fields := 0
for {
record, err = r.Read(record)
@ -502,7 +502,7 @@ func BenchmarkReaderBasic(b *testing.B) {
unmarshaled: true,
}
f := openTestFile(b, "nyc-taxi-data-100k.csv")
r, err := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args)
r, err := NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
if err != nil {
b.Fatalf("Reading init failed with %s", err)
}
@ -512,7 +512,7 @@ func BenchmarkReaderBasic(b *testing.B) {
b.SetBytes(int64(len(f)))
var record sql.Record
for i := 0; i < b.N; i++ {
r, err = NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args)
r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
if err != nil {
b.Fatalf("Reading init failed with %s", err)
}
@ -550,7 +550,7 @@ func BenchmarkReaderHuge(b *testing.B) {
b.ResetTimer()
var record sql.Record
for i := 0; i < b.N; i++ {
r, err := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args)
r, err := NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
if err != nil {
b.Fatalf("Reading init failed with %s", err)
}
@ -584,7 +584,7 @@ func BenchmarkReaderReplace(b *testing.B) {
unmarshaled: true,
}
f := openTestFile(b, "nyc-taxi-data-100k-single-delim.csv")
r, err := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args)
r, err := NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
if err != nil {
b.Fatalf("Reading init failed with %s", err)
}
@ -594,7 +594,7 @@ func BenchmarkReaderReplace(b *testing.B) {
b.SetBytes(int64(len(f)))
var record sql.Record
for i := 0; i < b.N; i++ {
r, err = NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args)
r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
if err != nil {
b.Fatalf("Reading init failed with %s", err)
}
@ -621,7 +621,7 @@ func BenchmarkReaderReplaceTwo(b *testing.B) {
unmarshaled: true,
}
f := openTestFile(b, "nyc-taxi-data-100k-multi-delim.csv")
r, err := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args)
r, err := NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
if err != nil {
b.Fatalf("Reading init failed with %s", err)
}
@ -631,7 +631,7 @@ func BenchmarkReaderReplaceTwo(b *testing.B) {
b.SetBytes(int64(len(f)))
var record sql.Record
for i := 0; i < b.N; i++ {
r, err = NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &args)
r, err = NewReader(io.NopCloser(bytes.NewBuffer(f)), &args)
if err != nil {
b.Fatalf("Reading init failed with %s", err)
}

View File

@ -20,7 +20,6 @@ package json
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -29,7 +28,7 @@ import (
)
func TestNewPReader(t *testing.T) {
files, err := ioutil.ReadDir("testdata")
files, err := os.ReadDir("testdata")
if err != nil {
t.Fatal(err)
}
@ -75,13 +74,13 @@ func TestNewPReader(t *testing.T) {
}
func BenchmarkPReader(b *testing.B) {
files, err := ioutil.ReadDir("testdata")
files, err := os.ReadDir("testdata")
if err != nil {
b.Fatal(err)
}
for _, file := range files {
b.Run(file.Name(), func(b *testing.B) {
f, err := ioutil.ReadFile(filepath.Join("testdata", file.Name()))
f, err := os.ReadFile(filepath.Join("testdata", file.Name()))
if err != nil {
b.Fatal(err)
}
@ -90,7 +89,7 @@ func BenchmarkPReader(b *testing.B) {
b.ResetTimer()
var record sql.Record
for i := 0; i < b.N; i++ {
r := NewPReader(ioutil.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{})
r := NewPReader(io.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{})
for {
record, err = r.Read(record)
if err != nil {

View File

@ -20,7 +20,6 @@ package json
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -29,7 +28,7 @@ import (
)
func TestNewReader(t *testing.T) {
files, err := ioutil.ReadDir("testdata")
files, err := os.ReadDir("testdata")
if err != nil {
t.Fatal(err)
}
@ -75,13 +74,13 @@ func TestNewReader(t *testing.T) {
}
func BenchmarkReader(b *testing.B) {
files, err := ioutil.ReadDir("testdata")
files, err := os.ReadDir("testdata")
if err != nil {
b.Fatal(err)
}
for _, file := range files {
b.Run(file.Name(), func(b *testing.B) {
f, err := ioutil.ReadFile(filepath.Join("testdata", file.Name()))
f, err := os.ReadFile(filepath.Join("testdata", file.Name()))
if err != nil {
b.Fatal(err)
}
@ -90,7 +89,7 @@ func BenchmarkReader(b *testing.B) {
b.ResetTimer()
var record sql.Record
for i := 0; i < b.N; i++ {
r := NewReader(ioutil.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{})
r := NewReader(io.NopCloser(bytes.NewBuffer(f)), &ReaderArgs{})
for {
record, err = r.Read(record)
if err != nil {

View File

@ -25,7 +25,6 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"strings"
@ -83,9 +82,9 @@ var bufPool = sync.Pool{
var bufioWriterPool = sync.Pool{
New: func() interface{} {
// ioutil.Discard is just used to create the writer. Actual destination
// io.Discard is just used to create the writer. Actual destination
// writer is set later by Reset() before using it.
return bufio.NewWriter(ioutil.Discard)
return bufio.NewWriter(io.Discard)
},
}
@ -461,7 +460,7 @@ func (s3Select *S3Select) marshal(buf *bytes.Buffer, record sql.Record) error {
// Use bufio Writer to prevent csv.Writer from allocating a new buffer.
bufioWriter := bufioWriterPool.Get().(*bufio.Writer)
defer func() {
bufioWriter.Reset(ioutil.Discard)
bufioWriter.Reset(io.Discard)
bufioWriterPool.Put(bufioWriter)
}()

View File

@ -22,7 +22,6 @@ import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"reflect"
@ -636,7 +635,7 @@ func TestJSONQueries(t *testing.T) {
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -644,7 +643,7 @@ func TestJSONQueries(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -682,7 +681,7 @@ func TestJSONQueries(t *testing.T) {
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -690,7 +689,7 @@ func TestJSONQueries(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -763,7 +762,7 @@ func TestCSVQueries(t *testing.T) {
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -771,7 +770,7 @@ func TestCSVQueries(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -946,7 +945,7 @@ func TestCSVQueries2(t *testing.T) {
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -954,7 +953,7 @@ func TestCSVQueries2(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -1090,7 +1089,7 @@ true`,
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -1098,7 +1097,7 @@ true`,
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -1236,7 +1235,7 @@ func TestCSVInput(t *testing.T) {
if !reflect.DeepEqual(w.response, testCase.expectedResult) {
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -1244,7 +1243,7 @@ func TestCSVInput(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -1356,7 +1355,7 @@ func TestJSONInput(t *testing.T) {
if !reflect.DeepEqual(w.response, testCase.expectedResult) {
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -1364,7 +1363,7 @@ func TestJSONInput(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -1663,7 +1662,7 @@ func TestCSVRanges(t *testing.T) {
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -1671,7 +1670,7 @@ func TestCSVRanges(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -1765,7 +1764,7 @@ func TestParquetInput(t *testing.T) {
if !reflect.DeepEqual(w.response, testCase.expectedResult) {
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -1773,7 +1772,7 @@ func TestParquetInput(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -1866,7 +1865,7 @@ func TestParquetInputSchema(t *testing.T) {
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -1874,7 +1873,7 @@ func TestParquetInputSchema(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return
@ -1966,7 +1965,7 @@ func TestParquetInputSchemaCSV(t *testing.T) {
s3Select.Close()
resp := http.Response{
StatusCode: http.StatusOK,
Body: ioutil.NopCloser(bytes.NewReader(w.response)),
Body: io.NopCloser(bytes.NewReader(w.response)),
ContentLength: int64(len(w.response)),
}
res, err := minio.NewSelectResults(&resp, "testbucket")
@ -1974,7 +1973,7 @@ func TestParquetInputSchemaCSV(t *testing.T) {
t.Error(err)
return
}
got, err := ioutil.ReadAll(res)
got, err := io.ReadAll(res)
if err != nil {
t.Error(err)
return

View File

@ -20,7 +20,7 @@ package simdj
import (
"bytes"
"io"
"io/ioutil"
"os"
"path/filepath"
"testing"
@ -40,7 +40,7 @@ func loadCompressed(t tester, file string) (js []byte) {
t.Fatal(err)
}
defer dec.Close()
js, err = ioutil.ReadFile(filepath.Join("testdata", file+".json.zst"))
js, err = os.ReadFile(filepath.Join("testdata", file+".json.zst"))
if err != nil {
t.Fatal(err)
}
@ -86,7 +86,7 @@ func TestNDJSON(t *testing.T) {
if false {
t.Log(string(b))
}
// _ = ioutil.WriteFile(filepath.Join("testdata", tt.name+".json"), b, os.ModePerm)
// _ = os.WriteFile(filepath.Join("testdata", tt.name+".json"), b, os.ModePerm)
parser:
for {
@ -121,7 +121,7 @@ func TestNDJSON(t *testing.T) {
t.Fatal("unexpected type:", typ.String())
}
}
refDec := json.NewReader(ioutil.NopCloser(bytes.NewBuffer(ref)), &json.ReaderArgs{ContentType: "json"})
refDec := json.NewReader(io.NopCloser(bytes.NewBuffer(ref)), &json.ReaderArgs{ContentType: "json"})
for {
rec, err := dec.Read(nil)

View File

@ -20,7 +20,7 @@ package sql
import (
"bytes"
"fmt"
"io/ioutil"
"io"
"os"
"path/filepath"
"reflect"
@ -48,7 +48,7 @@ func TestJsonpathEval(t *testing.T) {
t.Fatal(err)
}
b, err := ioutil.ReadAll(f)
b, err := io.ReadAll(f)
if err != nil {
t.Fatal(err)
}

View File

@ -27,7 +27,6 @@ import (
"compress/gzip"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
@ -169,11 +168,13 @@ var _bindata = map[string]func() (*asset, error){
// directory embedded in the file by go-bindata.
// For example if you run go-bindata on data/... and data contains the
// following hierarchy:
// data/
// foo.txt
// img/
// a.png
// b.png
//
// data/
// foo.txt
// img/
// a.png
// b.png
//
// then AssetDir("data") would return []string{"foo.txt", "img"}
// AssetDir("data/img") would return []string{"a.png", "b.png"}
// AssetDir("foo.txt") and AssetDir("notexist") would return an error
@ -223,7 +224,7 @@ func RestoreAsset(dir, name string) error {
if err != nil {
return err
}
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
err = os.WriteFile(_filePath(dir, name), data, info.Mode())
if err != nil {
return err
}

View File

@ -1,5 +1,4 @@
//go:build testrunmain
// +build testrunmain
/*
* MinIO Object Storage (c) 2021 MinIO, Inc.
@ -39,8 +38,9 @@ import (
// 1. As an alternative you can also run the system under test by just by calling "go test"
// $ APP_ARGS="server /tmp/test" go test -cover -tags testrunmain -covermode count -coverpkg="./..." -coverprofile=coverage.cov
//
// 2. Run System-Tests (when using GitBash prefix this line with MSYS_NO_PATHCONV=1)
// Note the SERVER_ENDPOINT must be reachable from inside the docker container (so don't use localhost!)
// 2. Run System-Tests (when using GitBash prefix this line with MSYS_NO_PATHCONV=1)
// Note the SERVER_ENDPOINT must be reachable from inside the docker container (so don't use localhost!)
//
// $ podman run -e MINT_MODE=full -e SERVER_ENDPOINT=192.168.47.11:9000 -e ACCESS_KEY=minioadmin -e SECRET_KEY=minioadmin -v /tmp/mint/log:/mint/log minio/mint
//
// 3. Stop system under test by sending SIGTERM