Port ListBuckets to websockets layer & some cleanup (#19199)

This commit is contained in:
Klaus Post 2024-03-08 20:08:18 +01:00 committed by GitHub
parent 650efc2e96
commit 51f62a8da3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 2783 additions and 184 deletions

View File

@ -366,12 +366,6 @@ func errorResponseHandler(w http.ResponseWriter, r *http.Request) {
}
desc := "Do not upgrade one server at a time - please follow the recommended guidelines mentioned here https://github.com/minio/minio#upgrading-minio for your environment"
switch {
case strings.HasPrefix(r.URL.Path, peerS3Prefix):
writeErrorResponseString(r.Context(), w, APIError{
Code: "XMinioPeerS3VersionMismatch",
Description: desc,
HTTPStatusCode: http.StatusUpgradeRequired,
}, r.URL)
case strings.HasPrefix(r.URL.Path, peerRESTPrefix):
writeErrorResponseString(r.Context(), w, APIError{
Code: "XMinioPeerVersionMismatch",

View File

@ -30,6 +30,8 @@ import (
"github.com/minio/minio/internal/logger"
)
//go:generate msgp -file $GOFILE -io=false -tests=false -unexported=false
// BackendType - represents different backend types.
type BackendType int
@ -187,9 +189,9 @@ type ObjectInfo struct {
Parts []ObjectPartInfo `json:"-"`
// Implements writer and reader used by CopyObject API
Writer io.WriteCloser `json:"-"`
Reader *hash.Reader `json:"-"`
PutObjReader *PutObjReader `json:"-"`
Writer io.WriteCloser `json:"-" msg:"-"`
Reader *hash.Reader `json:"-" msg:"-"`
PutObjReader *PutObjReader `json:"-" msg:"-"`
metadataOnly bool
versionOnly bool // adds a new version, only used by CopyObject

File diff suppressed because it is too large Load Diff

View File

@ -32,6 +32,10 @@ import (
xioutil "github.com/minio/minio/internal/ioutil"
)
//go:generate msgp -file $GOFILE -io=false -tests=false -unexported=false
//msgp:ignore ObjectOptions TransitionOptions DeleteBucketOptions
// CheckPreconditionFn returns true if precondition check failed.
type CheckPreconditionFn func(o ObjectInfo) bool

View File

@ -0,0 +1,319 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// MarshalMsg implements msgp.Marshaler
func (z BucketOptions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "Deleted"
o = append(o, 0x82, 0xa7, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64)
o = msgp.AppendBool(o, z.Deleted)
// string "Cached"
o = append(o, 0xa6, 0x43, 0x61, 0x63, 0x68, 0x65, 0x64)
o = msgp.AppendBool(o, z.Cached)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketOptions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Deleted":
z.Deleted, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Deleted")
return
}
case "Cached":
z.Cached, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Cached")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z BucketOptions) Msgsize() (s int) {
s = 1 + 8 + msgp.BoolSize + 7 + msgp.BoolSize
return
}
// MarshalMsg implements msgp.Marshaler
func (z ExpirationOptions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "Expire"
o = append(o, 0x81, 0xa6, 0x45, 0x78, 0x70, 0x69, 0x72, 0x65)
o = msgp.AppendBool(o, z.Expire)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ExpirationOptions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Expire":
z.Expire, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Expire")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z ExpirationOptions) Msgsize() (s int) {
s = 1 + 7 + msgp.BoolSize
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MakeBucketOptions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 5
// string "LockEnabled"
o = append(o, 0x85, 0xab, 0x4c, 0x6f, 0x63, 0x6b, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
o = msgp.AppendBool(o, z.LockEnabled)
// string "VersioningEnabled"
o = append(o, 0xb1, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x69, 0x6e, 0x67, 0x45, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64)
o = msgp.AppendBool(o, z.VersioningEnabled)
// string "ForceCreate"
o = append(o, 0xab, 0x46, 0x6f, 0x72, 0x63, 0x65, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65)
o = msgp.AppendBool(o, z.ForceCreate)
// string "CreatedAt"
o = append(o, 0xa9, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
o = msgp.AppendTime(o, z.CreatedAt)
// string "NoLock"
o = append(o, 0xa6, 0x4e, 0x6f, 0x4c, 0x6f, 0x63, 0x6b)
o = msgp.AppendBool(o, z.NoLock)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MakeBucketOptions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "LockEnabled":
z.LockEnabled, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LockEnabled")
return
}
case "VersioningEnabled":
z.VersioningEnabled, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VersioningEnabled")
return
}
case "ForceCreate":
z.ForceCreate, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ForceCreate")
return
}
case "CreatedAt":
z.CreatedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "CreatedAt")
return
}
case "NoLock":
z.NoLock, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "NoLock")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MakeBucketOptions) Msgsize() (s int) {
s = 1 + 12 + msgp.BoolSize + 18 + msgp.BoolSize + 12 + msgp.BoolSize + 10 + msgp.TimeSize + 7 + msgp.BoolSize
return
}
// MarshalMsg implements msgp.Marshaler
func (z *WalkOptions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 4
// string "Marker"
o = append(o, 0x84, 0xa6, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72)
o = msgp.AppendString(o, z.Marker)
// string "LatestOnly"
o = append(o, 0xaa, 0x4c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x4f, 0x6e, 0x6c, 0x79)
o = msgp.AppendBool(o, z.LatestOnly)
// string "AskDisks"
o = append(o, 0xa8, 0x41, 0x73, 0x6b, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendString(o, z.AskDisks)
// string "VersionsSort"
o = append(o, 0xac, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x53, 0x6f, 0x72, 0x74)
o = msgp.AppendUint8(o, uint8(z.VersionsSort))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *WalkOptions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Marker":
z.Marker, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Marker")
return
}
case "LatestOnly":
z.LatestOnly, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "LatestOnly")
return
}
case "AskDisks":
z.AskDisks, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AskDisks")
return
}
case "VersionsSort":
{
var zb0002 uint8
zb0002, bts, err = msgp.ReadUint8Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "VersionsSort")
return
}
z.VersionsSort = WalkVersionsSortOrder(zb0002)
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *WalkOptions) Msgsize() (s int) {
s = 1 + 7 + msgp.StringPrefixSize + len(z.Marker) + 11 + msgp.BoolSize + 9 + msgp.StringPrefixSize + len(z.AskDisks) + 13 + msgp.Uint8Size
return
}
// MarshalMsg implements msgp.Marshaler
func (z WalkVersionsSortOrder) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendUint8(o, uint8(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *WalkVersionsSortOrder) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 uint8
zb0001, bts, err = msgp.ReadUint8Bytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = WalkVersionsSortOrder(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z WalkVersionsSortOrder) Msgsize() (s int) {
s = msgp.Uint8Size
return
}

View File

@ -25,22 +25,16 @@ const (
)
const (
peerRESTMethodHealth = "/health"
peerRESTMethodVerifyBinary = "/verifybinary"
peerRESTMethodCommitBinary = "/commitbinary"
peerRESTMethodSignalService = "/signalservice"
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
peerRESTMethodGetLocks = "/getlocks"
peerRESTMethodStartProfiling = "/startprofiling"
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
peerRESTMethodGetBandwidth = "/bandwidth"
peerRESTMethodSpeedTest = "/speedtest"
peerRESTMethodDriveSpeedTest = "/drivespeedtest"
peerRESTMethodReloadSiteReplicationConfig = "/reloadsitereplicationconfig"
peerRESTMethodGetLastDayTierStats = "/getlastdaytierstats"
peerRESTMethodDevNull = "/devnull"
peerRESTMethodNetperf = "/netperf"
peerRESTMethodGetReplicationMRF = "/getreplicationmrf"
peerRESTMethodHealth = "/health"
peerRESTMethodVerifyBinary = "/verifybinary"
peerRESTMethodCommitBinary = "/commitbinary"
peerRESTMethodStartProfiling = "/startprofiling"
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
peerRESTMethodSpeedTest = "/speedtest"
peerRESTMethodDriveSpeedTest = "/drivespeedtest"
peerRESTMethodDevNull = "/devnull"
peerRESTMethodNetperf = "/netperf"
peerRESTMethodGetReplicationMRF = "/getreplicationmrf"
)
const (

View File

@ -53,6 +53,7 @@ type peerRESTServer struct{}
var (
// Types & Wrappers
aoBucketInfo = grid.NewArrayOf[*BucketInfo](func() *BucketInfo { return &BucketInfo{} })
aoMetricsGroup = grid.NewArrayOf[*Metric](func() *Metric { return &Metric{} })
madminBgHealState = grid.NewJSONPool[madmin.BgHealState]()
madminCPUs = grid.NewJSONPool[madmin.CPUs]()
@ -97,6 +98,7 @@ var (
getSysServicesRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysServices]](grid.HandlerGetSysServices, grid.NewMSS, madminSysServices.NewJSON)
headBucketRPC = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerHeadBucket, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
healBucketRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerHealBucket, grid.NewMSS, grid.NewNoPayload)
listBucketsRPC = grid.NewSingleHandler[*BucketOptions, *grid.Array[*BucketInfo]](grid.HandlerListBuckets, func() *BucketOptions { return &BucketOptions{} }, aoBucketInfo.New)
loadBucketMetadataRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadBucketMetadata, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadGroupRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadGroup, grid.NewMSS, grid.NewNoPayload)
loadPolicyMappingRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicyMapping, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
@ -1226,7 +1228,20 @@ func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, ne
return np, nil
}
// HeadBucketHandler implements peer BuckeInfo call, returns bucket create date.
func (s *peerRESTServer) ListBucketsHandler(opts *BucketOptions) (*grid.Array[*BucketInfo], *grid.RemoteErr) {
buckets, err := listBucketsLocal(context.Background(), *opts)
if err != nil {
return nil, grid.NewRemoteErr(err)
}
res := aoBucketInfo.New()
for i := range buckets {
bucket := buckets[i]
res.Append(&bucket)
}
return res, nil
}
// HeadBucketHandler implements peer BucketInfo call, returns bucket create date.
func (s *peerRESTServer) HeadBucketHandler(mss *grid.MSS) (info *VolInfo, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket)
if isMinioMetaBucket(bucket) {
@ -1332,6 +1347,7 @@ func registerPeerRESTHandlers(router *mux.Router, gm *grid.Manager) {
logger.FatalIf(getSysServicesRPC.Register(gm, server.GetSysServicesHandler), "unable to register handler")
logger.FatalIf(headBucketRPC.Register(gm, server.HeadBucketHandler), "unable to register handler")
logger.FatalIf(healBucketRPC.Register(gm, server.HealBucketHandler), "unable to register handler")
logger.FatalIf(listBucketsRPC.Register(gm, server.ListBucketsHandler), "unable to register handler")
logger.FatalIf(listenRPC.RegisterNoInput(gm, server.ListenHandler), "unable to register handler")
logger.FatalIf(loadBucketMetadataRPC.Register(gm, server.LoadBucketMetadataHandler), "unable to register handler")
logger.FatalIf(loadGroupRPC.Register(gm, server.LoadGroupHandler), "unable to register handler")

View File

@ -19,11 +19,8 @@ package cmd
import (
"context"
"encoding/gob"
"errors"
"fmt"
"io"
"net/url"
"sort"
"strconv"
"sync/atomic"
@ -31,9 +28,7 @@ import (
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/grid"
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/rest"
"github.com/minio/pkg/v2/sync/errgroup"
"golang.org/x/exp/slices"
)
@ -92,39 +87,14 @@ func (l localPeerS3Client) DeleteBucket(ctx context.Context, bucket string, opts
// client to talk to peer Nodes.
type remotePeerS3Client struct {
node Node
pools []int
restClient *rest.Client
node Node
pools []int
// Function that returns the grid connection for this peer when initialized.
// Will return nil if the grid connection is not initialized yet.
gridConn func() *grid.Connection
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *remotePeerS3Client) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
return client.callWithContext(GlobalContext, method, values, body, length)
}
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
// after verifying format.json
func (client *remotePeerS3Client) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
if values == nil {
values = make(url.Values)
}
respBody, err = client.restClient.Call(ctx, method, values, body, length)
if err == nil {
return respBody, nil
}
err = toStorageErr(err)
return nil, err
}
// S3PeerSys - S3 peer call system.
type S3PeerSys struct {
peerClients []peerS3Client // Excludes self
@ -351,18 +321,18 @@ func (sys *S3PeerSys) GetBucketInfo(ctx context.Context, bucket string, opts Buc
}
func (client *remotePeerS3Client) ListBuckets(ctx context.Context, opts BucketOptions) ([]BucketInfo, error) {
v := url.Values{}
v.Set(peerS3BucketDeleted, strconv.FormatBool(opts.Deleted))
respBody, err := client.call(peerS3MethodListBuckets, v, nil, -1)
bi, err := listBucketsRPC.Call(ctx, client.gridConn(), &opts)
if err != nil {
return nil, err
return nil, toStorageErr(err)
}
defer xhttp.DrainBody(respBody)
var buckets []BucketInfo
err = gob.NewDecoder(respBody).Decode(&buckets)
return buckets, err
buckets := make([]BucketInfo, 0, len(bi.Value()))
for _, b := range bi.Value() {
if b != nil {
buckets = append(buckets, *b)
}
}
bi.Recycle() // BucketInfo has no internal pointers, so it's safe to recycle.
return buckets, nil
}
func (client *remotePeerS3Client) HealBucket(ctx context.Context, bucket string, opts madmin.HealOpts) (madmin.HealResultItem, error) {
@ -533,35 +503,10 @@ func newPeerS3Clients(endpoints EndpointServerPools) (peers []peerS3Client) {
// Returns a peer S3 client.
func newPeerS3Client(node Node) peerS3Client {
scheme := "http"
if globalIsTLS {
scheme = "https"
}
serverURL := &url.URL{
Scheme: scheme,
Host: node.Host,
Path: peerS3Path,
}
restClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken())
// Use a separate client to avoid recursive calls.
healthClient := rest.NewClient(serverURL, globalInternodeTransport, newCachedAuthToken())
healthClient.NoMetrics = true
// Construct a new health function.
restClient.HealthCheckFn = func() bool {
ctx, cancel := context.WithTimeout(context.Background(), restClient.HealthCheckTimeout)
defer cancel()
respBody, err := healthClient.Call(ctx, peerS3MethodHealth, nil, nil, -1)
xhttp.DrainBody(respBody)
return !isNetworkError(err)
}
var gridConn atomic.Pointer[grid.Connection]
return &remotePeerS3Client{
node: node, restClient: restClient,
node: node,
gridConn: func() *grid.Connection {
// Lazy initialization of grid connection.
// When we create this peer client, the grid connection is likely not yet initialized.

View File

@ -19,33 +19,12 @@ package cmd
import (
"context"
"encoding/gob"
"errors"
"net/http"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/logger"
"github.com/minio/mux"
"github.com/minio/pkg/v2/sync/errgroup"
)
const (
peerS3Version = "v1" // First implementation
peerS3VersionPrefix = SlashSeparator + peerS3Version
peerS3Prefix = minioReservedBucketPath + "/peer-s3"
peerS3Path = peerS3Prefix + peerS3VersionPrefix
)
const (
peerS3MethodHealth = "/health"
peerS3MethodMakeBucket = "/make-bucket"
peerS3MethodGetBucketInfo = "/get-bucket-info"
peerS3MethodDeleteBucket = "/delete-bucket"
peerS3MethodListBuckets = "/list-buckets"
peerS3MethodHealBucket = "/heal-bucket"
)
const (
peerS3Bucket = "bucket"
peerS3BucketDeleted = "bucket-deleted"
@ -53,33 +32,6 @@ const (
peerS3BucketForceDelete = "force-delete"
)
type peerS3Server struct{}
func (s *peerS3Server) writeErrorResponse(w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusForbidden)
w.Write([]byte(err.Error()))
}
// IsValid - To authenticate and verify the time difference.
func (s *peerS3Server) IsValid(w http.ResponseWriter, r *http.Request) bool {
objAPI := newObjectLayerFn()
if objAPI == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return false
}
if err := storageServerRequestValidate(r); err != nil {
s.writeErrorResponse(w, err)
return false
}
return true
}
// HealthHandler - returns true of health
func (s *peerS3Server) HealthHandler(w http.ResponseWriter, r *http.Request) {
s.IsValid(w, r)
}
func healBucketLocal(ctx context.Context, bucket string, opts madmin.HealOpts) (res madmin.HealResultItem, err error) {
globalLocalDrivesMu.RLock()
localDrives := cloneDrives(globalLocalDrives)
@ -372,34 +324,3 @@ func makeBucketLocal(ctx context.Context, bucket string, opts MakeBucketOptions)
errs := g.Wait()
return reduceWriteQuorumErrs(ctx, errs, bucketOpIgnoredErrs, (len(localDrives)/2)+1)
}
func (s *peerS3Server) ListBucketsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
bucketDeleted := r.Form.Get(peerS3BucketDeleted) == "true"
buckets, err := listBucketsLocal(r.Context(), BucketOptions{
Deleted: bucketDeleted,
})
if err != nil {
s.writeErrorResponse(w, err)
return
}
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(buckets))
}
// registerPeerS3Handlers - register peer s3 router.
func registerPeerS3Handlers(router *mux.Router) {
server := &peerS3Server{}
subrouter := router.PathPrefix(peerS3Prefix).Subrouter()
h := func(f http.HandlerFunc) http.HandlerFunc {
return collectInternodeStats(httpTraceHdrs(f))
}
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodHealth).HandlerFunc(h(server.HealthHandler))
subrouter.Methods(http.MethodPost).Path(peerS3VersionPrefix + peerS3MethodListBuckets).HandlerFunc(h(server.ListBucketsHandler))
}

View File

@ -32,9 +32,6 @@ func registerDistErasureRouters(router *mux.Router, endpointServerPools Endpoint
// Register peer REST router only if its a distributed setup.
registerPeerRESTHandlers(router, globalGrid.Load())
// Register peer S3 router only if its a distributed setup.
registerPeerS3Handlers(router)
// Register bootstrap REST router for distributed setups.
registerBootstrapRESTHandlers(globalGrid.Load())

View File

@ -22,6 +22,9 @@ package replication
// StatusType of Replication for x-amz-replication-status header
type StatusType string
// Type - replication type enum
type Type int
const (
// Pending - replication is pending.
Pending StatusType = "PENDING"

View File

@ -57,3 +57,55 @@ func (z StatusType) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// DecodeMsg implements msgp.Decodable
func (z *Type) DecodeMsg(dc *msgp.Reader) (err error) {
{
var zb0001 int
zb0001, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = Type(zb0001)
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Type) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteInt(int(z))
if err != nil {
err = msgp.WrapError(err)
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Type) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendInt(o, int(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Type) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 int
zb0001, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = Type(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Type) Msgsize() (s int) {
s = msgp.IntSize
return
}

View File

@ -118,9 +118,6 @@ func (c Config) Validate(bucket string, sameTarget bool) error {
return nil
}
// Type - replication type enum
type Type int
// Types of replication
const (
UnsetReplicationType Type = 0 + iota

View File

@ -110,6 +110,7 @@ const (
HandlerSignalService
HandlerGetBandwidth
HandlerWriteAll
HandlerListBuckets
// Add more above here ^^^
// If all handlers are used, the type of Handler can be changed.
@ -187,6 +188,7 @@ var handlerPrefixes = [handlerLast]string{
HandlerStorageInfo: peerPrefix,
HandlerConsoleLog: peerPrefix,
HandlerListDir: storagePrefix,
HandlerListBuckets: peerPrefixS3,
}
const (

View File

@ -79,14 +79,15 @@ func _() {
_ = x[HandlerSignalService-68]
_ = x[HandlerGetBandwidth-69]
_ = x[HandlerWriteAll-70]
_ = x[handlerTest-71]
_ = x[handlerTest2-72]
_ = x[handlerLast-73]
_ = x[HandlerListBuckets-71]
_ = x[handlerTest-72]
_ = x[handlerTest2-73]
_ = x[handlerLast-74]
}
const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenDeleteBucketMetadataLoadBucketMetadataReloadSiteReplicationConfigReloadPoolMetaStopRebalanceLoadRebalanceMetaLoadTransitionTierConfigDeletePolicyLoadPolicyLoadPolicyMappingDeleteServiceAccountLoadServiceAccountDeleteUserLoadUserLoadGroupHealBucketMakeBucketHeadBucketDeleteBucketGetMetricsGetResourceMetricsGetMemInfoGetProcInfoGetOSInfoGetPartitionsGetNetInfoGetCPUsServerInfoGetSysConfigGetSysServicesGetSysErrorsGetAllBucketStatsGetBucketStatsGetSRMetricsGetPeerMetricsGetMetacacheListingUpdateMetacacheListingGetPeerBucketMetricsStorageInfoConsoleLogListDirGetLocksBackgroundHealStatusGetLastDayTierStatsSignalServiceGetBandwidthWriteAllhandlerTesthandlerTest2handlerLast"
const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenDeleteBucketMetadataLoadBucketMetadataReloadSiteReplicationConfigReloadPoolMetaStopRebalanceLoadRebalanceMetaLoadTransitionTierConfigDeletePolicyLoadPolicyLoadPolicyMappingDeleteServiceAccountLoadServiceAccountDeleteUserLoadUserLoadGroupHealBucketMakeBucketHeadBucketDeleteBucketGetMetricsGetResourceMetricsGetMemInfoGetProcInfoGetOSInfoGetPartitionsGetNetInfoGetCPUsServerInfoGetSysConfigGetSysServicesGetSysErrorsGetAllBucketStatsGetBucketStatsGetSRMetricsGetPeerMetricsGetMetacacheListingUpdateMetacacheListingGetPeerBucketMetricsStorageInfoConsoleLogListDirGetLocksBackgroundHealStatusGetLastDayTierStatsSignalServiceGetBandwidthWriteAllListBucketshandlerTesthandlerTest2handlerLast"
var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 256, 274, 301, 315, 328, 345, 369, 381, 391, 408, 428, 446, 456, 464, 473, 483, 493, 503, 515, 525, 543, 553, 564, 573, 586, 596, 603, 613, 625, 639, 651, 668, 682, 694, 708, 727, 749, 769, 780, 790, 797, 805, 825, 844, 857, 869, 877, 888, 900, 911}
var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 256, 274, 301, 315, 328, 345, 369, 381, 391, 408, 428, 446, 456, 464, 473, 483, 493, 503, 515, 525, 543, 553, 564, 573, 586, 596, 603, 613, 625, 639, 651, 668, 682, 694, 708, 727, 749, 769, 780, 790, 797, 805, 825, 844, 857, 869, 877, 888, 899, 911, 922}
func (i HandlerID) String() string {
if i >= HandlerID(len(_HandlerID_index)-1) {