avoid using 10MiB EC buffers in maxAPI calculations (#19665)

max requests per node is more conservative in its value
causing premature serialization of the calls, avoid it
for newer deployments.
This commit is contained in:
Harshavardhana 2024-05-03 13:08:20 -07:00 committed by GitHub
parent 4afb59e63f
commit da3e7747ca
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 38 additions and 23 deletions

View File

@ -575,7 +575,7 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
configLogIf(ctx, fmt.Errorf("Invalid api configuration: %w", err))
}
globalAPIConfig.init(apiConfig, setDriveCounts)
globalAPIConfig.init(apiConfig, setDriveCounts, objAPI.Legacy())
autoGenerateRootCredentials() // Generate the KMS root credentials here since we don't know whether API root access is disabled until now.
setRemoteInstanceTransport(NewHTTPTransportWithTimeout(apiConfig.RemoteTransportDeadline))
case config.CompressionSubSys:

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@ -644,6 +644,15 @@ func (z *erasureServerPools) Shutdown(ctx context.Context) error {
return nil
}
// Legacy returns 'true' if distribution algo is CRCMOD
func (z *erasureServerPools) Legacy() (ok bool) {
ok = true
for _, set := range z.serverPools {
ok = ok && set.Legacy()
}
return ok
}
func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
b.Type = madmin.Erasure

View File

@ -194,6 +194,11 @@ func findDiskIndex(refFormat, format *formatErasureV3) (int, int, error) {
return -1, -1, fmt.Errorf("DriveID: %s not found", format.Erasure.This)
}
// Legacy returns 'true' if distribution algo is CRCMOD
func (s *erasureSets) Legacy() (ok bool) {
return s.distributionAlgo == formatErasureVersionV2DistributionAlgoV1
}
// connectDisks - attempt to connect all the endpoints, loads format
// and re-arranges the disks in proper position.
func (s *erasureSets) connectDisks() {

View File

@ -22,6 +22,7 @@ import (
"net/http"
"os"
"runtime"
"slices"
"strconv"
"strings"
"sync"
@ -38,13 +39,11 @@ import (
type apiConfig struct {
mu sync.RWMutex
requestsDeadline time.Duration
requestsPool chan struct{}
clusterDeadline time.Duration
listQuorum string
corsAllowOrigins []string
// total drives per erasure set across pools.
totalDriveCount int
requestsDeadline time.Duration
requestsPool chan struct{}
clusterDeadline time.Duration
listQuorum string
corsAllowOrigins []string
replicationPriority string
replicationMaxWorkers int
transitionWorkers int
@ -110,7 +109,7 @@ func availableMemory() (available uint64) {
return
}
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
func (t *apiConfig) init(cfg api.Config, setDriveCounts []int, legacy bool) {
t.mu.Lock()
defer t.mu.Unlock()
@ -125,27 +124,24 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
}
t.corsAllowOrigins = corsAllowOrigin
maxSetDrives := 0
for _, setDriveCount := range setDriveCounts {
t.totalDriveCount += setDriveCount
if setDriveCount > maxSetDrives {
maxSetDrives = setDriveCount
}
}
var apiRequestsMaxPerNode int
if cfg.RequestsMax <= 0 {
maxSetDrives := slices.Max(setDriveCounts)
// Returns 75% of max memory allowed
maxMem := availableMemory()
// max requests per node is calculated as
// total_ram / ram_per_request
// ram_per_request is (2MiB+128KiB) * driveCount \
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
blockSize := xioutil.LargeBlock + xioutil.SmallBlock
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))
if globalIsDistErasure {
logger.Info("Automatically configured API requests per node based on available memory on the system: %d", apiRequestsMaxPerNode)
if legacy {
// ram_per_request is (1MiB+32KiB) * driveCount \
// + 2 * 10MiB (default erasure block size v1) + 2 * 1MiB (default erasure block size v2)
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV1*2+blockSizeV2*2)))
} else {
// ram_per_request is (1MiB+32KiB) * driveCount \
// + 2 * 1MiB (default erasure block size v2)
apiRequestsMaxPerNode = int(maxMem / uint64(maxSetDrives*blockSize+int(blockSizeV2*2)))
}
} else {
apiRequestsMaxPerNode = cfg.RequestsMax
@ -154,6 +150,10 @@ func (t *apiConfig) init(cfg api.Config, setDriveCounts []int) {
}
}
if globalIsDistErasure {
logger.Info("Configured max API requests per node based on available memory: %d", apiRequestsMaxPerNode)
}
if cap(t.requestsPool) != apiRequestsMaxPerNode {
// Only replace if needed.
// Existing requests will use the previous limit,

View File

@ -244,6 +244,7 @@ type ObjectLayer interface {
Shutdown(context.Context) error
NSScanner(ctx context.Context, updates chan<- DataUsageInfo, wantCycle uint32, scanMode madmin.HealScanMode) error
BackendInfo() madmin.BackendInfo
Legacy() bool // Only returns true for deployments which use CRCMOD as its object distribution algorithm.
StorageInfo(ctx context.Context, metrics bool) StorageInfo
LocalStorageInfo(ctx context.Context, metrics bool) StorageInfo