Add sufficient deadlines and countermeasures to handle hung node scenario

Signed-off-by: Shubhendu Ram Tripathi <shubhendu@minio.io>
Signed-off-by: Harshavardhana <harsha@minio.io>
This commit is contained in:
Harshavardhana 2024-05-16 00:58:35 -07:00
parent 1fd90c93ff
commit b6ced9cdf9
23 changed files with 245 additions and 120 deletions

View File

@ -56,6 +56,10 @@ jobs:
run: |
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "erasure" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
- name: resiliency
run: |
${GITHUB_WORKSPACE}/.github/workflows/run-mint.sh "resiliency" "minio" "minio123" "${{ steps.vars.outputs.sha_short }}"
- name: The job must cleanup
if: ${{ always() }}
run: |

View File

@ -0,0 +1,78 @@
version: '3.7'
# Settings and configurations that are common for all containers
x-minio-common: &minio-common
image: quay.io/minio/minio:${JOB_NAME}
command: server --console-address ":9001" http://minio{1...4}/rdata{1...2}
expose:
- "9000"
- "9001"
environment:
MINIO_CI_CD: "on"
MINIO_ROOT_USER: "minio"
MINIO_ROOT_PASSWORD: "minio123"
MINIO_KMS_SECRET_KEY: "my-minio-key:OSMM+vkKUTCvQs9YL/CVMIMt43HFhkUpqJxTmGl6rYw="
MINIO_DRIVE_MAX_TIMEOUT: "5s"
healthcheck:
test: ["CMD", "mc", "ready", "local"]
interval: 5s
timeout: 5s
retries: 5
# starts 4 docker containers running minio server instances.
# using nginx reverse proxy, load balancing, you can access
# it through port 9000.
services:
minio1:
<<: *minio-common
hostname: minio1
volumes:
- rdata1-1:/rdata1
- rdata1-2:/rdata2
minio2:
<<: *minio-common
hostname: minio2
volumes:
- rdata2-1:/rdata1
- rdata2-2:/rdata2
minio3:
<<: *minio-common
hostname: minio3
volumes:
- rdata3-1:/rdata1
- rdata3-2:/rdata2
minio4:
<<: *minio-common
hostname: minio4
volumes:
- rdata4-1:/rdata1
- rdata4-2:/rdata2
nginx:
image: nginx:1.19.2-alpine
hostname: nginx
volumes:
- ./nginx-4-node.conf:/etc/nginx/nginx.conf:ro
ports:
- "9000:9000"
- "9001:9001"
depends_on:
- minio1
- minio2
- minio3
- minio4
## By default this config uses default local driver,
## For custom volumes replace with volume driver configuration.
volumes:
rdata1-1:
rdata1-2:
rdata2-1:
rdata2-2:
rdata3-1:
rdata3-2:
rdata4-1:
rdata4-2:

View File

@ -16,7 +16,7 @@ docker volume rm $(docker volume ls -f dangling=true) || true
cd .github/workflows/mint
docker-compose -f minio-${MODE}.yaml up -d
sleep 30s
sleep 1m
docker system prune -f || true
docker volume prune -f || true
@ -26,6 +26,9 @@ docker volume rm $(docker volume ls -q -f dangling=true) || true
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio2
[ "${MODE}" == "pools" ] && docker-compose -f minio-${MODE}.yaml stop minio6
# Pause one node, to check that all S3 calls work while one node goes wrong
[ "${MODE}" == "resiliency" ] && docker-compose -f minio-${MODE}.yaml pause minio4
docker run --rm --net=mint_default \
--name="mint-${MODE}-${JOB_NAME}" \
-e SERVER_ENDPOINT="nginx:9000" \

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@ -18,7 +18,6 @@
package cmd
import (
"context"
"math"
"net/http"
"os"
@ -31,6 +30,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/config"
"github.com/minio/minio/internal/kms"
xnet "github.com/minio/pkg/v2/net"
)
// getLocalServerProperty - returns madmin.ServerProperties for only the
@ -64,9 +64,11 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
if err := isServerResolvable(endpoint, 5*time.Second); err == nil {
network[nodeName] = string(madmin.ItemOnline)
} else {
network[nodeName] = string(madmin.ItemOffline)
// log once the error
peersLogOnceIf(context.Background(), err, nodeName)
if xnet.IsNetworkOrHostDown(err, false) {
network[nodeName] = string(madmin.ItemOffline)
} else if xnet.IsNetworkOrHostDown(err, true) {
network[nodeName] = "connection attempt timedout"
}
}
}
}

View File

@ -404,18 +404,14 @@ func buildServerCtxt(ctx *cli.Context, ctxt *serverCtxt) (err error) {
ctxt.FTP = ctx.StringSlice("ftp")
ctxt.SFTP = ctx.StringSlice("sftp")
ctxt.Interface = ctx.String("interface")
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
ctxt.ConnReadDeadline = ctx.Duration("conn-read-deadline")
ctxt.ConnWriteDeadline = ctx.Duration("conn-write-deadline")
ctxt.ConnClientReadDeadline = ctx.Duration("conn-client-read-deadline")
ctxt.ConnClientWriteDeadline = ctx.Duration("conn-client-write-deadline")
ctxt.SendBufSize = ctx.Int("send-buf-size")
ctxt.RecvBufSize = ctx.Int("recv-buf-size")
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
ctxt.IdleTimeout = ctx.Duration("idle-timeout")
ctxt.UserTimeout = ctx.Duration("conn-user-timeout")
ctxt.ShutdownTimeout = ctx.Duration("shutdown-timeout")
ctxt.ConnReadDeadline = ctx.Duration("conn-read-deadline")
ctxt.ConnWriteDeadline = ctx.Duration("conn-write-deadline")
ctxt.ReadHeaderTimeout = ctx.Duration("read-header-timeout")
ctxt.MaxIdleConnsPerHost = ctx.Int("max-idle-conns-per-host")

View File

@ -657,7 +657,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
if err != nil {
return PartInfo{}, err
}
pctx := plkctx.Context()
ctx = plkctx.Context()
defer partIDLock.Unlock(plkctx)
onlineDisks := er.getDisks()
@ -689,7 +690,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
}
}()
erasure, err := NewErasure(pctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
erasure, err := NewErasure(ctx, fi.Erasure.DataBlocks, fi.Erasure.ParityBlocks, fi.Erasure.BlockSize)
if err != nil {
return pi, toObjectErr(err, bucket, object)
}
@ -742,7 +743,7 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
}
}
n, err := erasure.Encode(pctx, toEncode, writers, buffer, writeQuorum)
n, err := erasure.Encode(ctx, toEncode, writers, buffer, writeQuorum)
closeBitrotWriters(writers)
if err != nil {
return pi, toObjectErr(err, bucket, object)

View File

@ -36,8 +36,12 @@ var globalGridStart = make(chan struct{})
func initGlobalGrid(ctx context.Context, eps EndpointServerPools) error {
hosts, local := eps.GridHosts()
lookupHost := globalDNSCache.LookupHost
g, err := grid.NewManager(ctx, grid.ManagerOptions{
Dialer: grid.ContextDialer(xhttp.DialContextWithLookupHost(globalDNSCache.LookupHost, xhttp.NewInternodeDialContext(rest.DefaultTimeout, globalTCPOptions))),
// Pass Dialer for websocket grid, make sure we do not
// provide any DriveOPTimeout() function, as that is not
// useful over persistent connections.
Dialer: grid.ContextDialer(xhttp.DialContextWithLookupHost(lookupHost, xhttp.NewInternodeDialContext(rest.DefaultTimeout, globalTCPOptions.ForWebsocket()))),
Local: local,
Hosts: hosts,
AddAuth: newCachedAuthToken(),

View File

@ -320,6 +320,9 @@ func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts Buc
}
func (client *remotePeerS3Client) ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error) {
ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout())
defer cancel()
bi, err := listBucketsRPC.Call(ctx, client.gridConn(), &opts)
if err != nil {
return nil, toStorageErr(err)
@ -345,6 +348,9 @@ func (client *remotePeerS3Client) HealBucket(ctx context.Context, bucket string,
peerS3BucketDeleted: strconv.FormatBool(opts.Remove),
})
ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout())
defer cancel()
_, err := healBucketRPC.Call(ctx, conn, mss)
// Initialize heal result info
@ -367,6 +373,9 @@ func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket stri
peerS3BucketDeleted: strconv.FormatBool(opts.Deleted),
})
ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout())
defer cancel()
volInfo, err := headBucketRPC.Call(ctx, conn, mss)
if err != nil {
return BucketInfo{}, toStorageErr(err)
@ -418,6 +427,9 @@ func (client *remotePeerS3Client) MakeBucket(ctx context.Context, bucket string,
peerS3BucketForceCreate: strconv.FormatBool(opts.ForceCreate),
})
ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout())
defer cancel()
_, err := makeBucketRPC.Call(ctx, conn, mss)
return toStorageErr(err)
}
@ -467,6 +479,9 @@ func (client *remotePeerS3Client) DeleteBucket(ctx context.Context, bucket strin
peerS3BucketForceDelete: strconv.FormatBool(opts.Force),
})
ctx, cancel := context.WithTimeout(ctx, globalDriveConfig.GetMaxTimeout())
defer cancel()
_, err := deleteBucketRPC.Call(ctx, conn, mss)
return toStorageErr(err)
}

View File

@ -234,7 +234,7 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr
// Call the ServeHTTP to execute the handler.
apiRouter.ServeHTTP(rec, req)
if rec.Code != test.expectedStatus {
t.Fatalf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`", i+1, instanceType, test.expectedStatus, rec.Code)
t.Fatalf("Test %d: %s: Expected the response status to be `%d`, but instead found `%d`, Resp: %s", i+1, instanceType, test.expectedStatus, rec.Code, rec.Body)
}
}

View File

@ -103,18 +103,6 @@ var ServerFlags = []cli.Flag{
EnvVar: "MINIO_READ_HEADER_TIMEOUT",
Hidden: true,
},
cli.DurationFlag{
Name: "conn-client-read-deadline",
Usage: "custom connection READ deadline for incoming requests",
Hidden: true,
EnvVar: "MINIO_CONN_CLIENT_READ_DEADLINE",
},
cli.DurationFlag{
Name: "conn-client-write-deadline",
Usage: "custom connection WRITE deadline for outgoing requests",
Hidden: true,
EnvVar: "MINIO_CONN_CLIENT_WRITE_DEADLINE",
},
cli.DurationFlag{
Name: "conn-read-deadline",
Usage: "custom connection READ deadline",
@ -441,8 +429,9 @@ func serverHandleCmdArgs(ctxt serverCtxt) {
globalTCPOptions = xhttp.TCPOptions{
UserTimeout: int(ctxt.UserTimeout.Milliseconds()),
ClientReadTimeout: ctxt.ConnClientReadDeadline,
ClientWriteTimeout: ctxt.ConnClientWriteDeadline,
ClientReadTimeout: ctxt.ConnReadDeadline,
ClientWriteTimeout: ctxt.ConnWriteDeadline,
DriveOPTimeout: globalDriveConfig.GetOPTimeout,
Interface: ctxt.Interface,
SendBufSize: ctxt.SendBufSize,
RecvBufSize: ctxt.RecvBufSize,
@ -844,6 +833,11 @@ func serverMain(ctx *cli.Context) {
}
}
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
// Check for updates in non-blocking manner.
go func() {
if !globalServerCtxt.Quiet && !globalInplaceUpdateDisabled {
@ -870,12 +864,7 @@ func serverMain(ctx *cli.Context) {
warnings = append(warnings, color.YellowBold("- Detected GOMAXPROCS(%d) < NumCPU(%d), please make sure to provide all PROCS to MinIO for optimal performance", maxProcs, cpuProcs))
}
var getCert certs.GetCertificateFunc
if globalTLSCerts != nil {
getCert = globalTLSCerts.GetCertificate
}
// Initialize gridn
// Initialize grid
bootstrapTrace("initGrid", func() {
logger.FatalIf(initGlobalGrid(GlobalContext, globalEndpoints), "Unable to configure server grid RPC services")
})
@ -937,9 +926,6 @@ func serverMain(ctx *cli.Context) {
}
})
xhttp.SetDeploymentID(globalDeploymentID())
xhttp.SetMinIOVersion(Version)
for _, n := range globalNodes {
nodeName := n.Host
if n.IsLocal {

View File

@ -25,15 +25,18 @@ import (
"github.com/minio/pkg/v2/env"
)
// Drive specific timeout environment variables
const (
envMaxDriveTimeout = "MINIO_DRIVE_MAX_TIMEOUT"
EnvMaxDriveTimeout = "MINIO_DRIVE_MAX_TIMEOUT"
EnvMaxDriveTimeoutLegacy = "_MINIO_DRIVE_MAX_TIMEOUT"
EnvMaxDiskTimeoutLegacy = "_MINIO_DISK_MAX_TIMEOUT"
)
// DefaultKVS - default KVS for drive
var DefaultKVS = config.KVS{
config.KV{
Key: MaxTimeout,
Value: "",
Value: "30s",
},
}
@ -53,8 +56,13 @@ func (c *Config) Update(new Config) error {
return nil
}
// GetMaxTimeout - returns the max timeout value.
// GetMaxTimeout - returns the per call drive operation timeout
func (c *Config) GetMaxTimeout() time.Duration {
return c.GetOPTimeout()
}
// GetOPTimeout - returns the per call drive operation timeout
func (c *Config) GetOPTimeout() time.Duration {
configLk.RLock()
defer configLk.RUnlock()
@ -71,14 +79,7 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
}
// if not set. Get default value from environment
d := env.Get(envMaxDriveTimeout, kvs.GetWithDefault(MaxTimeout, DefaultKVS))
if d == "" {
d = env.Get("_MINIO_DRIVE_MAX_TIMEOUT", "")
if d == "" {
d = env.Get("_MINIO_DISK_MAX_TIMEOUT", "")
}
}
d := env.Get(EnvMaxDriveTimeout, env.Get(EnvMaxDriveTimeoutLegacy, env.Get(EnvMaxDiskTimeoutLegacy, kvs.GetWithDefault(MaxTimeout, DefaultKVS))))
dur, _ := time.ParseDuration(d)
if dur < time.Second {
cfg.MaxTimeout = 30 * time.Second
@ -91,10 +92,7 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
func getMaxTimeout(t time.Duration) time.Duration {
if t < time.Second {
// get default value
d := env.Get("_MINIO_DRIVE_MAX_TIMEOUT", "")
if d == "" {
d = env.Get("_MINIO_DISK_MAX_TIMEOUT", "")
}
d := env.Get(EnvMaxDriveTimeoutLegacy, env.Get(EnvMaxDiskTimeoutLegacy, ""))
dur, _ := time.ParseDuration(d)
if dur < time.Second {
return 30 * time.Second

View File

@ -22,12 +22,13 @@ import "github.com/minio/minio/internal/config"
var (
// MaxTimeout is the max timeout for drive
MaxTimeout = "max_timeout"
// HelpDrive is help for drive
HelpDrive = config.HelpKVS{
config.HelpKV{
Key: MaxTimeout,
Type: "string",
Description: "set per call max_timeout for the drive, defaults to 2 minutes",
Description: "set per call max_timeout for the drive, defaults to 30 seconds",
Optional: true,
},
}

View File

@ -68,7 +68,7 @@ func (c Config) submitPost(r *http.Request) (string, error) {
configLock.RLock()
r.Header.Set(xhttp.SubnetAPIKey, c.APIKey)
configLock.RUnlock()
r.Header.Set(xhttp.MinioDeploymentID, xhttp.GlobalDeploymentID)
r.Header.Set(xhttp.MinioDeploymentID, xhttp.GlobalDeploymentID.Load().(string))
client := &http.Client{
Timeout: 10 * time.Second,

View File

@ -33,13 +33,13 @@ type DeadlineConn struct {
// Sets read deadline
func (c *DeadlineConn) setReadDeadline() {
if c.readDeadline > 0 {
c.SetReadDeadline(time.Now().UTC().Add(c.readDeadline))
c.Conn.SetReadDeadline(time.Now().UTC().Add(c.readDeadline))
}
}
func (c *DeadlineConn) setWriteDeadline() {
if c.writeDeadline > 0 {
c.SetWriteDeadline(time.Now().UTC().Add(c.writeDeadline))
c.Conn.SetWriteDeadline(time.Now().UTC().Add(c.writeDeadline))
}
}

View File

@ -42,6 +42,7 @@ import (
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/pubsub"
xnet "github.com/minio/pkg/v2/net"
"github.com/puzpuzpuz/xsync/v3"
"github.com/tinylib/msgp/msgp"
"github.com/zeebo/xxh3"
@ -615,10 +616,6 @@ func (c *Connection) sendMsg(conn net.Conn, msg message, payload msgp.MarshalSiz
if c.outgoingBytes != nil {
c.outgoingBytes(int64(len(dst)))
}
err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout))
if err != nil {
return err
}
return wsutil.WriteMessage(conn, c.side, ws.OpBinary, dst)
}
@ -677,9 +674,8 @@ func (c *Connection) connect() {
return
}
if gotState != StateConnecting {
// Don't print error on first attempt,
// and after that only once per hour.
gridLogOnceIf(c.ctx, fmt.Errorf("grid: %s connecting to %s: %w (%T) Sleeping %v (%v)", c.Local, toDial, err, err, sleep, gotState), toDial)
// Don't print error on first attempt, and after that only once per hour.
gridLogOnceIf(c.ctx, fmt.Errorf("grid: %s re-connecting to %s: %w (%T) Sleeping %v (%v)", c.Local, toDial, err, err, sleep, gotState), toDial)
}
c.updateState(StateConnectionError)
time.Sleep(sleep)
@ -972,7 +968,9 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
msg, err = readDataInto(msg, conn, c.side, ws.OpBinary)
if err != nil {
cancel(ErrDisconnected)
gridLogIfNot(ctx, fmt.Errorf("ws read: %w", err), net.ErrClosed, io.EOF)
if !xnet.IsNetworkOrHostDown(err, true) {
gridLogIfNot(ctx, fmt.Errorf("ws read: %w", err), net.ErrClosed, io.EOF)
}
return
}
if c.incomingBytes != nil {
@ -983,7 +981,9 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
var m message
subID, remain, err := m.parse(msg)
if err != nil {
gridLogIf(ctx, fmt.Errorf("ws parse package: %w", err))
if !xnet.IsNetworkOrHostDown(err, true) {
gridLogIf(ctx, fmt.Errorf("ws parse package: %w", err))
}
cancel(ErrDisconnected)
return
}
@ -1004,7 +1004,9 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
var next []byte
next, remain, err = msgp.ReadBytesZC(remain)
if err != nil {
gridLogIf(ctx, fmt.Errorf("ws read merged: %w", err))
if !xnet.IsNetworkOrHostDown(err, true) {
gridLogIf(ctx, fmt.Errorf("ws read merged: %w", err))
}
cancel(ErrDisconnected)
return
}
@ -1012,7 +1014,9 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
m.Payload = nil
subID, _, err = m.parse(next)
if err != nil {
gridLogIf(ctx, fmt.Errorf("ws parse merged: %w", err))
if !xnet.IsNetworkOrHostDown(err, true) {
gridLogIf(ctx, fmt.Errorf("ws parse merged: %w", err))
}
cancel(ErrDisconnected)
return
}
@ -1119,18 +1123,17 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
buf.Reset()
err := wsw.writeMessage(&buf, c.side, ws.OpBinary, toSend)
if err != nil {
gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err))
if !xnet.IsNetworkOrHostDown(err, true) {
gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err))
}
return
}
PutByteBuffer(toSend)
err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout))
if err != nil {
gridLogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err))
return
}
_, err = buf.WriteTo(conn)
if err != nil {
gridLogIf(ctx, fmt.Errorf("ws write: %w", err))
if !xnet.IsNetworkOrHostDown(err, true) {
gridLogIf(ctx, fmt.Errorf("ws write: %w", err))
}
return
}
continue
@ -1163,18 +1166,17 @@ func (c *Connection) handleMessages(ctx context.Context, conn net.Conn) {
buf.Reset()
err = wsw.writeMessage(&buf, c.side, ws.OpBinary, toSend)
if err != nil {
gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err))
if !xnet.IsNetworkOrHostDown(err, true) {
gridLogIf(ctx, fmt.Errorf("ws writeMessage: %w", err))
}
return
}
// buf is our local buffer, so we can reuse it.
err = conn.SetWriteDeadline(time.Now().Add(connWriteTimeout))
if err != nil {
gridLogIf(ctx, fmt.Errorf("conn.SetWriteDeadline: %w", err))
return
}
_, err = buf.WriteTo(conn)
if err != nil {
gridLogIf(ctx, fmt.Errorf("ws write: %w", err))
if !xnet.IsNetworkOrHostDown(err, true) {
gridLogIf(ctx, fmt.Errorf("ws write: %w", err))
}
return
}

View File

@ -26,6 +26,7 @@ import (
"syscall"
"time"
"github.com/minio/minio/internal/deadlineconn"
"golang.org/x/sys/unix"
)
@ -39,10 +40,12 @@ func setTCPParametersFn(opts TCPOptions) func(network, address string, c syscall
_ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_REUSEPORT, 1)
{
// Enable big buffers
// Enable custom socket send/recv buffers.
if opts.SendBufSize > 0 {
_ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_SNDBUF, opts.SendBufSize)
}
if opts.RecvBufSize > 0 {
_ = unix.SetsockoptInt(fd, unix.SOL_SOCKET, unix.SO_RCVBUF, opts.RecvBufSize)
}
@ -82,7 +85,9 @@ func setTCPParametersFn(opts TCPOptions) func(network, address string, c syscall
// https://blog.cloudflare.com/when-tcp-sockets-refuse-to-die/
// This is a sensitive configuration, it is better to set it to high values, > 60 secs since it can
// affect clients reading data with a very slow pace (disappropriate with socket buffer sizes)
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, opts.UserTimeout)
if opts.UserTimeout > 0 {
_ = syscall.SetsockoptInt(fd, syscall.IPPROTO_TCP, unix.TCP_USER_TIMEOUT, opts.UserTimeout)
}
if opts.Interface != "" {
if h, _, err := net.SplitHostPort(address); err == nil {
@ -109,6 +114,16 @@ func NewInternodeDialContext(dialTimeout time.Duration, opts TCPOptions) DialCon
Timeout: dialTimeout,
Control: setTCPParametersFn(opts),
}
return dialer.DialContext(ctx, network, addr)
conn, err := dialer.DialContext(ctx, network, addr)
if err != nil {
return nil, err
}
if opts.DriveOPTimeout != nil {
// Read deadlines are sufficient for now as per various
// scenarios of hung node detection, we may add Write deadlines
// if needed later on.
return deadlineconn.New(conn).WithReadDeadline(opts.DriveOPTimeout()), nil
}
return conn, nil
}
}

View File

@ -128,12 +128,23 @@ type TCPOptions struct {
// When the net.Conn is idle for more than WriteTimeout duration, we close the connection on the client proactively.
ClientWriteTimeout time.Duration
// When the net.Conn is a remote drive this value is honored, we close the connection to remote peer proactively.
DriveOPTimeout func() time.Duration
SendBufSize int // SO_SNDBUF size for the socket connection, NOTE: this sets server and client connection
RecvBufSize int // SO_RECVBUF size for the socket connection, NOTE: this sets server and client connection
Interface string // This is a VRF device passed via `--interface` flag
Trace func(msg string) // Trace when starting.
}
// ForWebsocket returns TCPOptions valid for websocket net.Conn
func (t TCPOptions) ForWebsocket() TCPOptions {
return TCPOptions{
UserTimeout: t.UserTimeout,
Interface: t.Interface,
}
}
// newHTTPListener - creates new httpListener object which is interface compatible to net.Listener.
// httpListener is capable to
// * listen to multiple addresses

View File

@ -36,10 +36,10 @@ import (
var (
// GlobalMinIOVersion - is sent in the header to all http targets
GlobalMinIOVersion string
GlobalMinIOVersion atomic.Value
// GlobalDeploymentID - is sent in the header to all http targets
GlobalDeploymentID string
GlobalDeploymentID atomic.Value
)
const (
@ -271,10 +271,10 @@ func NewServer(addrs []string) *Server {
// SetMinIOVersion -- MinIO version from the main package is set here
func SetMinIOVersion(version string) {
GlobalMinIOVersion = version
GlobalMinIOVersion.Store(version)
}
// SetDeploymentID -- Deployment Id from the main package is set here
func SetDeploymentID(deploymentID string) {
GlobalDeploymentID = deploymentID
GlobalDeploymentID.Store(deploymentID)
}

View File

@ -97,13 +97,6 @@ type ioret[V any] struct {
err error
}
// DeadlineWriter deadline writer with timeout
type DeadlineWriter struct {
io.WriteCloser
timeout time.Duration
err error
}
// WithDeadline will execute a function with a deadline and return a value of a given type.
// If the deadline/context passes before the function finishes executing,
// the zero value and the context error is returned.
@ -145,21 +138,17 @@ func NewDeadlineWorker(timeout time.Duration) *DeadlineWorker {
// channel so that the work function can attempt to exit gracefully.
// Multiple calls to Run will run independently of each other.
func (d *DeadlineWorker) Run(work func() error) error {
c := make(chan ioret[struct{}], 1)
t := time.NewTimer(d.timeout)
go func() {
c <- ioret[struct{}]{val: struct{}{}, err: work()}
}()
_, err := WithDeadline[struct{}](context.Background(), d.timeout, func(ctx context.Context) (struct{}, error) {
return struct{}{}, work()
})
return err
}
select {
case r := <-c:
if !t.Stop() {
<-t.C
}
return r.err
case <-t.C:
return context.DeadlineExceeded
}
// DeadlineWriter deadline writer with timeout
type DeadlineWriter struct {
io.WriteCloser
timeout time.Duration
err error
}
// NewDeadlineWriter wraps a writer to make it respect given deadline

View File

@ -41,6 +41,26 @@ func (w *sleepWriter) Close() error {
return nil
}
func TestDeadlineWorker(t *testing.T) {
work := NewDeadlineWorker(500 * time.Millisecond)
err := work.Run(func() error {
time.Sleep(600 * time.Millisecond)
return nil
})
if err != context.DeadlineExceeded {
t.Error("DeadlineWorker shouldn't be successful - should return context.DeadlineExceeded")
}
err = work.Run(func() error {
time.Sleep(450 * time.Millisecond)
return nil
})
if err != nil {
t.Error("DeadlineWorker should succeed")
}
}
func TestDeadlineWriter(t *testing.T) {
w := NewDeadlineWriter(&sleepWriter{timeout: 500 * time.Millisecond}, 450*time.Millisecond)
_, err := w.Write([]byte("1"))

View File

@ -51,7 +51,7 @@ func GetAuditEntry(ctx context.Context) *audit.Entry {
}
r = &audit.Entry{
Version: internalAudit.Version,
DeploymentID: xhttp.GlobalDeploymentID,
DeploymentID: xhttp.GlobalDeploymentID.Load().(string),
Time: time.Now().UTC(),
}
return r
@ -75,7 +75,7 @@ func AuditLog(ctx context.Context, w http.ResponseWriter, r *http.Request, reqCl
reqInfo.RLock()
defer reqInfo.RUnlock()
entry = internalAudit.ToEntry(w, r, reqClaims, xhttp.GlobalDeploymentID)
entry = internalAudit.ToEntry(w, r, reqClaims, xhttp.GlobalDeploymentID.Load().(string))
// indicates all requests for this API call are inbound
entry.Trigger = "incoming"

View File

@ -331,7 +331,7 @@ func buildLogEntry(ctx context.Context, subsystem, message string, trace []strin
// Get the cause for the Error
deploymentID := req.DeploymentID
if req.DeploymentID == "" {
deploymentID = xhttp.GlobalDeploymentID
deploymentID = xhttp.GlobalDeploymentID.Load().(string)
}
objects := make([]log.ObjectVersion, 0, len(req.Objects))

View File

@ -224,8 +224,8 @@ func (h *Target) send(ctx context.Context, payload []byte, payloadType string, t
if payloadType != "" {
req.Header.Set(xhttp.ContentType, payloadType)
}
req.Header.Set(xhttp.MinIOVersion, xhttp.GlobalMinIOVersion)
req.Header.Set(xhttp.MinioDeploymentID, xhttp.GlobalDeploymentID)
req.Header.Set(xhttp.MinIOVersion, xhttp.GlobalMinIOVersion.Load().(string))
req.Header.Set(xhttp.MinioDeploymentID, xhttp.GlobalDeploymentID.Load().(string))
// Set user-agent to indicate MinIO release
// version to the configured log endpoint