Fix ilm config at startup (#19189)

Remove api.expiration_workers config setting which was inadvertently left behind. Per review comment 

https://github.com/minio/minio/pull/18926, expiration_workers can be configured via ilm.expiration_workers.
This commit is contained in:
Krishnan Parthasarathi 2024-03-04 18:50:24 -08:00 committed by GitHub
parent e385f54185
commit b69bcdcdc4
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 64 additions and 36 deletions

View File

@ -1,4 +1,4 @@
// Copyright (c) 2015-2021 MinIO, Inc.
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
@ -374,7 +374,7 @@ func (es *expiryState) Worker(input <-chan expiryOp) {
}
func initBackgroundExpiry(ctx context.Context, objectAPI ObjectLayer) {
globalExpiryState = newExpiryState(ctx, objectAPI, globalAPIConfig.getExpiryWorkers())
globalExpiryState = newExpiryState(ctx, objectAPI, globalILMConfig.getExpirationWorkers())
}
// newerNoncurrentTask encapsulates arguments required by worker to expire objects
@ -438,6 +438,10 @@ func newTransitionState(ctx context.Context) *transitionState {
// of transition workers.
func (t *transitionState) Init(objAPI ObjectLayer) {
n := globalAPIConfig.getTransitionWorkers()
// Prefer ilm.transition_workers over now deprecated api.transition_workers
if tw := globalILMConfig.getTransitionWorkers(); tw > 0 {
n = tw
}
t.mu.Lock()
defer t.mu.Unlock()

View File

@ -725,15 +725,11 @@ func applyDynamicConfigForSubSys(ctx context.Context, objAPI ObjectLayer, s conf
}
if globalTransitionState != nil {
globalTransitionState.UpdateWorkers(ilmCfg.TransitionWorkers)
} else {
logger.LogIf(ctx, fmt.Errorf("ILM transition subsystem not initialized"))
}
if globalExpiryState != nil {
globalExpiryState.ResizeWorkers(ilmCfg.ExpirationWorkers)
} else {
logger.LogIf(ctx, fmt.Errorf("ILM expiration subsystem not initialized"))
}
globalILMConfig.update(ilmCfg)
}
globalServerConfigMu.Lock()
defer globalServerConfigMu.Unlock()

View File

@ -47,7 +47,6 @@ type apiConfig struct {
replicationPriority string
replicationMaxWorkers int
transitionWorkers int
expiryWorkers int
staleUploadsExpiry time.Duration
staleUploadsCleanupInterval time.Duration
@ -368,13 +367,6 @@ func (t *apiConfig) getReplicationOpts() replicationPoolOpts {
}
}
func (t *apiConfig) getExpiryWorkers() int {
t.mu.RLock()
defer t.mu.RUnlock()
return t.expiryWorkers
}
func (t *apiConfig) getTransitionWorkers() int {
t.mu.RLock()
defer t.mu.RUnlock()

57
cmd/ilm-config.go Normal file
View File

@ -0,0 +1,57 @@
// Copyright (c) 2015-2024 MinIO, Inc.
//
// This file is part of MinIO Object Storage stack
//
// This program is free software: you can redistribute it and/or modify
// it under the terms of the GNU Affero General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Affero General Public License for more details.
//
// You should have received a copy of the GNU Affero General Public License
// along with this program. If not, see <http://www.gnu.org/licenses/>.
package cmd
import (
"sync"
"github.com/minio/minio/internal/config/ilm"
)
var globalILMConfig = ilmConfig{
cfg: ilm.Config{
ExpirationWorkers: 100,
TransitionWorkers: 100,
},
}
type ilmConfig struct {
mu sync.RWMutex
cfg ilm.Config
}
func (c *ilmConfig) getExpirationWorkers() int {
c.mu.RLock()
defer c.mu.RUnlock()
return c.cfg.ExpirationWorkers
}
func (c *ilmConfig) getTransitionWorkers() int {
c.mu.RLock()
defer c.mu.RUnlock()
return c.cfg.TransitionWorkers
}
func (c *ilmConfig) update(cfg ilm.Config) {
c.mu.Lock()
defer c.mu.Unlock()
c.cfg = cfg
}

View File

@ -41,7 +41,6 @@ const (
apiReplicationMaxWorkers = "replication_max_workers"
apiTransitionWorkers = "transition_workers"
apiExpiryWorkers = "expiry_workers"
apiStaleUploadsCleanupInterval = "stale_uploads_cleanup_interval"
apiStaleUploadsExpiry = "stale_uploads_expiry"
apiDeleteCleanupInterval = "delete_cleanup_interval"
@ -57,7 +56,6 @@ const (
EnvAPICorsAllowOrigin = "MINIO_API_CORS_ALLOW_ORIGIN"
EnvAPIRemoteTransportDeadline = "MINIO_API_REMOTE_TRANSPORT_DEADLINE"
EnvAPITransitionWorkers = "MINIO_API_TRANSITION_WORKERS"
EnvAPIExpiryWorkers = "MINIO_API_EXPIRY_WORKERS"
EnvAPIListQuorum = "MINIO_API_LIST_QUORUM"
EnvAPISecureCiphers = "MINIO_API_SECURE_CIPHERS" // default config.EnableOn
EnvAPIReplicationPriority = "MINIO_API_REPLICATION_PRIORITY"
@ -119,10 +117,6 @@ var (
Key: apiTransitionWorkers,
Value: "100",
},
config.KV{
Key: apiExpiryWorkers,
Value: "100",
},
config.KV{
Key: apiStaleUploadsCleanupInterval,
Value: "6h",
@ -288,15 +282,6 @@ func LookupConfig(kvs config.KVS) (cfg Config, err error) {
}
cfg.TransitionWorkers = transitionWorkers
expiryWorkers, err := strconv.Atoi(env.Get(EnvAPIExpiryWorkers, kvs.GetWithDefault(apiExpiryWorkers, DefaultKVS)))
if err != nil {
return cfg, err
}
if expiryWorkers <= 0 || expiryWorkers > 500 {
return cfg, config.ErrInvalidExpiryWorkersValue(nil).Msg("Number of expiry workers should be between 1 and 500")
}
cfg.ExpiryWorkers = expiryWorkers
v := env.Get(EnvAPIDeleteCleanupInterval, kvs.Get(apiDeleteCleanupInterval))
if v == "" {
v = env.Get(EnvDeleteCleanupInterval, kvs.GetWithDefault(apiDeleteCleanupInterval, DefaultKVS))

View File

@ -80,12 +80,6 @@ var (
Optional: true,
Type: "number",
},
config.HelpKV{
Key: apiExpiryWorkers,
Description: `set the number of expiry workers` + defaultHelpPostfix(apiExpiryWorkers),
Optional: true,
Type: "number",
},
config.HelpKV{
Key: apiStaleUploadsExpiry,
Description: `set to expire stale multipart uploads older than this values` + defaultHelpPostfix(apiStaleUploadsExpiry),