Convert more peer <--> peer REST calls (#19004)

* Convert more peer <--> peer REST calls
* Clean up in general.
* Add JSON wrapper.
* Add slice wrapper.
* Add option to make handler return nil error if no connection is given, `IgnoreNilConn`.

Converts the following:

```
+	HandlerGetMetrics
+	HandlerGetResourceMetrics
+	HandlerGetMemInfo
+	HandlerGetProcInfo
+	HandlerGetOSInfo
+	HandlerGetPartitions
+	HandlerGetNetInfo
+	HandlerGetCPUs
+	HandlerServerInfo
+	HandlerGetSysConfig
+	HandlerGetSysServices
+	HandlerGetSysErrors
+	HandlerGetAllBucketStats
+	HandlerGetBucketStats
+	HandlerGetSRMetrics
+	HandlerGetPeerMetrics
+	HandlerGetMetacacheListing
+	HandlerUpdateMetacacheListing
+	HandlerGetPeerBucketMetrics
+	HandlerStorageInfo
+	HandlerGetLocks
+	HandlerBackgroundHealStatus
+	HandlerGetLastDayTierStats
+	HandlerSignalService
+	HandlerGetBandwidth
```
This commit is contained in:
Klaus Post 2024-02-19 14:54:46 -08:00 committed by GitHub
parent 4c8197a119
commit e06168596f
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
29 changed files with 4794 additions and 979 deletions

View File

@ -1984,7 +1984,6 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
// The ConsoleLogHandler handler sends console logs to the connected HTTP client.
func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
objectAPI, _ := validateAdminReq(ctx, w, r, policy.ConsoleLogAdminAction)
if objectAPI == nil {
return
@ -2009,44 +2008,65 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
setEventStreamHeaders(w)
logCh := make(chan log.Info, 4000)
logCh := make(chan log.Info, 1000)
peers, _ := newPeerRestClients(globalEndpoints)
encodedCh := make(chan []byte, 1000+len(peers)*1000)
err = globalConsoleSys.Subscribe(logCh, ctx.Done(), node, limitLines, logKind, nil)
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return
}
// Convert local entries to JSON
go func() {
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
for {
select {
case <-ctx.Done():
return
case li := <-logCh:
if !li.SendLog(node, logKind) {
continue
}
buf.Reset()
if err := enc.Encode(li); err != nil {
continue
}
select {
case <-ctx.Done():
return
case encodedCh <- append(grid.GetByteBuffer()[:0], buf.Bytes()...):
}
}
}
}()
// Collect from matching peers
for _, peer := range peers {
if peer == nil {
continue
}
if node == "" || strings.EqualFold(peer.host.Name, node) {
peer.ConsoleLog(logCh, ctx.Done())
peer.ConsoleLog(ctx, logKind, encodedCh)
}
}
enc := json.NewEncoder(w)
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
defer keepAliveTicker.Stop()
for {
select {
case log, ok := <-logCh:
case log, ok := <-encodedCh:
if !ok {
return
}
if log.SendLog(node, logKind) {
if err := enc.Encode(log); err != nil {
return
}
if len(logCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
}
_, err = w.Write(log)
if err != nil {
return
}
grid.PutByteBuffer(log)
if len(logCh) == 0 {
// Flush if nothing is queued
w.(http.Flusher).Flush()
}
case <-keepAliveTicker.C:
if len(logCh) > 0 {

View File

@ -17,6 +17,8 @@
package cmd
//go:generate msgp -file=$GOFILE -unexported
import (
"context"
"fmt"
@ -36,12 +38,9 @@ type lockRequesterInfo struct {
TimeLastRefresh time.Time // Timestamp for last lock refresh.
Source string // Contains line, function and filename requesting the lock.
Group bool // indicates if it was a group lock.
// Owner represents the UUID of the owner who originally requested the lock
// useful in expiry.
Owner string
// Quorum represents the quorum required for this lock to be active.
Quorum int
idx int
Owner string // Owner represents the UUID of the owner who originally requested the lock.
Quorum int // Quorum represents the quorum required for this lock to be active.
idx int `msg:"-"` // index of the lock in the lockMap.
}
// isWriteLock returns whether the lock is a write or read lock.
@ -50,6 +49,8 @@ func isWriteLock(lri []lockRequesterInfo) bool {
}
// localLocker implements Dsync.NetLocker
//
//msgp:ignore localLocker
type localLocker struct {
mutex sync.Mutex
lockMap map[string][]lockRequesterInfo
@ -238,7 +239,9 @@ func (l *localLocker) stats() lockStats {
return st
}
func (l *localLocker) DupLockMap() map[string][]lockRequesterInfo {
type localLockMap map[string][]lockRequesterInfo
func (l *localLocker) DupLockMap() localLockMap {
l.mutex.Lock()
defer l.mutex.Unlock()

620
cmd/local-locker_gen.go Normal file
View File

@ -0,0 +1,620 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *localLockMap) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(localLockMap, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
for zb0004 > 0 {
zb0004--
var zb0001 string
var zb0002 []lockRequesterInfo
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0005 uint32
zb0005, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
if cap(zb0002) >= int(zb0005) {
zb0002 = (zb0002)[:zb0005]
} else {
zb0002 = make([]lockRequesterInfo, zb0005)
}
for zb0003 := range zb0002 {
err = zb0002[zb0003].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, zb0001, zb0003)
return
}
}
(*z)[zb0001] = zb0002
}
return
}
// EncodeMsg implements msgp.Encodable
func (z localLockMap) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteMapHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0006, zb0007 := range z {
err = en.WriteString(zb0006)
if err != nil {
err = msgp.WrapError(err)
return
}
err = en.WriteArrayHeader(uint32(len(zb0007)))
if err != nil {
err = msgp.WrapError(err, zb0006)
return
}
for zb0008 := range zb0007 {
err = zb0007[zb0008].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, zb0006, zb0008)
return
}
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z localLockMap) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(z)))
for zb0006, zb0007 := range z {
o = msgp.AppendString(o, zb0006)
o = msgp.AppendArrayHeader(o, uint32(len(zb0007)))
for zb0008 := range zb0007 {
o, err = zb0007[zb0008].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, zb0006, zb0008)
return
}
}
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *localLockMap) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(localLockMap, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
for zb0004 > 0 {
var zb0001 string
var zb0002 []lockRequesterInfo
zb0004--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var zb0005 uint32
zb0005, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
if cap(zb0002) >= int(zb0005) {
zb0002 = (zb0002)[:zb0005]
} else {
zb0002 = make([]lockRequesterInfo, zb0005)
}
for zb0003 := range zb0002 {
bts, err = zb0002[zb0003].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, zb0003)
return
}
}
(*z)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z localLockMap) Msgsize() (s int) {
s = msgp.MapHeaderSize
if z != nil {
for zb0006, zb0007 := range z {
_ = zb0007
s += msgp.StringPrefixSize + len(zb0006) + msgp.ArrayHeaderSize
for zb0008 := range zb0007 {
s += zb0007[zb0008].Msgsize()
}
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *lockRequesterInfo) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Writer":
z.Writer, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Writer")
return
}
case "UID":
z.UID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "UID")
return
}
case "Timestamp":
z.Timestamp, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
case "TimeLastRefresh":
z.TimeLastRefresh, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "TimeLastRefresh")
return
}
case "Source":
z.Source, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Source")
return
}
case "Group":
z.Group, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Group")
return
}
case "Owner":
z.Owner, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Owner")
return
}
case "Quorum":
z.Quorum, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Quorum")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *lockRequesterInfo) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 9
// write "Name"
err = en.Append(0x89, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Name)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
// write "Writer"
err = en.Append(0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteBool(z.Writer)
if err != nil {
err = msgp.WrapError(err, "Writer")
return
}
// write "UID"
err = en.Append(0xa3, 0x55, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.UID)
if err != nil {
err = msgp.WrapError(err, "UID")
return
}
// write "Timestamp"
err = en.Append(0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70)
if err != nil {
return
}
err = en.WriteTime(z.Timestamp)
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
// write "TimeLastRefresh"
err = en.Append(0xaf, 0x54, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68)
if err != nil {
return
}
err = en.WriteTime(z.TimeLastRefresh)
if err != nil {
err = msgp.WrapError(err, "TimeLastRefresh")
return
}
// write "Source"
err = en.Append(0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65)
if err != nil {
return
}
err = en.WriteString(z.Source)
if err != nil {
err = msgp.WrapError(err, "Source")
return
}
// write "Group"
err = en.Append(0xa5, 0x47, 0x72, 0x6f, 0x75, 0x70)
if err != nil {
return
}
err = en.WriteBool(z.Group)
if err != nil {
err = msgp.WrapError(err, "Group")
return
}
// write "Owner"
err = en.Append(0xa5, 0x4f, 0x77, 0x6e, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Owner)
if err != nil {
err = msgp.WrapError(err, "Owner")
return
}
// write "Quorum"
err = en.Append(0xa6, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d)
if err != nil {
return
}
err = en.WriteInt(z.Quorum)
if err != nil {
err = msgp.WrapError(err, "Quorum")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *lockRequesterInfo) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 9
// string "Name"
o = append(o, 0x89, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, z.Name)
// string "Writer"
o = append(o, 0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x72)
o = msgp.AppendBool(o, z.Writer)
// string "UID"
o = append(o, 0xa3, 0x55, 0x49, 0x44)
o = msgp.AppendString(o, z.UID)
// string "Timestamp"
o = append(o, 0xa9, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70)
o = msgp.AppendTime(o, z.Timestamp)
// string "TimeLastRefresh"
o = append(o, 0xaf, 0x54, 0x69, 0x6d, 0x65, 0x4c, 0x61, 0x73, 0x74, 0x52, 0x65, 0x66, 0x72, 0x65, 0x73, 0x68)
o = msgp.AppendTime(o, z.TimeLastRefresh)
// string "Source"
o = append(o, 0xa6, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65)
o = msgp.AppendString(o, z.Source)
// string "Group"
o = append(o, 0xa5, 0x47, 0x72, 0x6f, 0x75, 0x70)
o = msgp.AppendBool(o, z.Group)
// string "Owner"
o = append(o, 0xa5, 0x4f, 0x77, 0x6e, 0x65, 0x72)
o = msgp.AppendString(o, z.Owner)
// string "Quorum"
o = append(o, 0xa6, 0x51, 0x75, 0x6f, 0x72, 0x75, 0x6d)
o = msgp.AppendInt(o, z.Quorum)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *lockRequesterInfo) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Name":
z.Name, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
case "Writer":
z.Writer, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Writer")
return
}
case "UID":
z.UID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UID")
return
}
case "Timestamp":
z.Timestamp, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Timestamp")
return
}
case "TimeLastRefresh":
z.TimeLastRefresh, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "TimeLastRefresh")
return
}
case "Source":
z.Source, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Source")
return
}
case "Group":
z.Group, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Group")
return
}
case "Owner":
z.Owner, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Owner")
return
}
case "Quorum":
z.Quorum, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Quorum")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *lockRequesterInfo) Msgsize() (s int) {
s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 7 + msgp.BoolSize + 4 + msgp.StringPrefixSize + len(z.UID) + 10 + msgp.TimeSize + 16 + msgp.TimeSize + 7 + msgp.StringPrefixSize + len(z.Source) + 6 + msgp.BoolSize + 6 + msgp.StringPrefixSize + len(z.Owner) + 7 + msgp.IntSize
return
}
// DecodeMsg implements msgp.Decodable
func (z *lockStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Total, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
case "Writes":
z.Writes, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Writes")
return
}
case "Reads":
z.Reads, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Reads")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z lockStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 3
// write "Total"
err = en.Append(0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
if err != nil {
return
}
err = en.WriteInt(z.Total)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
// write "Writes"
err = en.Append(0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.Writes)
if err != nil {
err = msgp.WrapError(err, "Writes")
return
}
// write "Reads"
err = en.Append(0xa5, 0x52, 0x65, 0x61, 0x64, 0x73)
if err != nil {
return
}
err = en.WriteInt(z.Reads)
if err != nil {
err = msgp.WrapError(err, "Reads")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z lockStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 3
// string "Total"
o = append(o, 0x83, 0xa5, 0x54, 0x6f, 0x74, 0x61, 0x6c)
o = msgp.AppendInt(o, z.Total)
// string "Writes"
o = append(o, 0xa6, 0x57, 0x72, 0x69, 0x74, 0x65, 0x73)
o = msgp.AppendInt(o, z.Writes)
// string "Reads"
o = append(o, 0xa5, 0x52, 0x65, 0x61, 0x64, 0x73)
o = msgp.AppendInt(o, z.Reads)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *lockStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Total":
z.Total, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Total")
return
}
case "Writes":
z.Writes, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Writes")
return
}
case "Reads":
z.Reads, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Reads")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z lockStats) Msgsize() (s int) {
s = 1 + 6 + msgp.IntSize + 7 + msgp.IntSize + 6 + msgp.IntSize
return
}

View File

@ -0,0 +1,349 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshallocalLockMap(t *testing.T) {
v := localLockMap{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglocalLockMap(b *testing.B) {
v := localLockMap{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglocalLockMap(b *testing.B) {
v := localLockMap{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallocalLockMap(b *testing.B) {
v := localLockMap{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelocalLockMap(t *testing.T) {
v := localLockMap{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelocalLockMap Msgsize() is inaccurate")
}
vn := localLockMap{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelocalLockMap(b *testing.B) {
v := localLockMap{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelocalLockMap(b *testing.B) {
v := localLockMap{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshallockRequesterInfo(t *testing.T) {
v := lockRequesterInfo{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelockRequesterInfo(t *testing.T) {
v := lockRequesterInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelockRequesterInfo Msgsize() is inaccurate")
}
vn := lockRequesterInfo{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelockRequesterInfo(b *testing.B) {
v := lockRequesterInfo{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshallockStats(t *testing.T) {
v := lockStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglockStats(b *testing.B) {
v := lockStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglockStats(b *testing.B) {
v := lockStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallockStats(b *testing.B) {
v := lockStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelockStats(t *testing.T) {
v := lockStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelockStats Msgsize() is inaccurate")
}
vn := lockStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelockStats(b *testing.B) {
v := lockStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelockStats(b *testing.B) {
v := lockStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -42,6 +42,8 @@ import (
"github.com/minio/pkg/v2/console"
)
//go:generate msgp -file $GOFILE -unexported
type listPathOptions struct {
// ID of the listing.
// This will be used to persist the list.
@ -99,18 +101,18 @@ type listPathOptions struct {
// Versioning config is used for if the path
// has versioning enabled.
Versioning *versioning.Versioning
Versioning *versioning.Versioning `msg:"-"`
// Lifecycle performs filtering based on lifecycle.
// This will filter out objects if the most recent version should be deleted by lifecycle.
// Is not transferred across request calls.
Lifecycle *lifecycle.Lifecycle
Lifecycle *lifecycle.Lifecycle `msg:"-"`
// Retention configuration, needed to be passed along with lifecycle if set.
Retention lock.Retention
Retention lock.Retention `msg:"-"`
// Replication configuration
Replication replicationConfig
Replication replicationConfig `msg:"-"`
// StopDiskAtLimit will stop listing on each disk when limit number off objects has been returned.
StopDiskAtLimit bool
@ -767,6 +769,7 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, resul
})
}
//msgp:ignore metaCacheRPC
type metaCacheRPC struct {
o listPathOptions
mu sync.Mutex
@ -917,6 +920,7 @@ func (er *erasureObjects) saveMetaCacheStream(ctx context.Context, mc *metaCache
return nil
}
//msgp:ignore listPathRawOptions
type listPathRawOptions struct {
disks []StorageAPI
fallbackDisks []StorageAPI

535
cmd/metacache-set_gen.go Normal file
View File

@ -0,0 +1,535 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *listPathOptions) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "Bucket":
z.Bucket, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "BaseDir":
z.BaseDir, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "BaseDir")
return
}
case "Prefix":
z.Prefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "FilterPrefix":
z.FilterPrefix, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "FilterPrefix")
return
}
case "Marker":
z.Marker, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Marker")
return
}
case "Limit":
z.Limit, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
case "AskDisks":
z.AskDisks, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "AskDisks")
return
}
case "InclDeleted":
z.InclDeleted, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "InclDeleted")
return
}
case "Recursive":
z.Recursive, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
case "Separator":
z.Separator, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Separator")
return
}
case "Create":
z.Create, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Create")
return
}
case "IncludeDirectories":
z.IncludeDirectories, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "IncludeDirectories")
return
}
case "Transient":
z.Transient, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Transient")
return
}
case "Versioned":
z.Versioned, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "Versioned")
return
}
case "StopDiskAtLimit":
z.StopDiskAtLimit, err = dc.ReadBool()
if err != nil {
err = msgp.WrapError(err, "StopDiskAtLimit")
return
}
case "pool":
z.pool, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "pool")
return
}
case "set":
z.set, err = dc.ReadInt()
if err != nil {
err = msgp.WrapError(err, "set")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *listPathOptions) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 18
// write "ID"
err = en.Append(0xde, 0x0, 0x12, 0xa2, 0x49, 0x44)
if err != nil {
return
}
err = en.WriteString(z.ID)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
// write "Bucket"
err = en.Append(0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteString(z.Bucket)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
// write "BaseDir"
err = en.Append(0xa7, 0x42, 0x61, 0x73, 0x65, 0x44, 0x69, 0x72)
if err != nil {
return
}
err = en.WriteString(z.BaseDir)
if err != nil {
err = msgp.WrapError(err, "BaseDir")
return
}
// write "Prefix"
err = en.Append(0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.Prefix)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
// write "FilterPrefix"
err = en.Append(0xac, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
if err != nil {
return
}
err = en.WriteString(z.FilterPrefix)
if err != nil {
err = msgp.WrapError(err, "FilterPrefix")
return
}
// write "Marker"
err = en.Append(0xa6, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Marker)
if err != nil {
err = msgp.WrapError(err, "Marker")
return
}
// write "Limit"
err = en.Append(0xa5, 0x4c, 0x69, 0x6d, 0x69, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.Limit)
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
// write "AskDisks"
err = en.Append(0xa8, 0x41, 0x73, 0x6b, 0x44, 0x69, 0x73, 0x6b, 0x73)
if err != nil {
return
}
err = en.WriteString(z.AskDisks)
if err != nil {
err = msgp.WrapError(err, "AskDisks")
return
}
// write "InclDeleted"
err = en.Append(0xab, 0x49, 0x6e, 0x63, 0x6c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.InclDeleted)
if err != nil {
err = msgp.WrapError(err, "InclDeleted")
return
}
// write "Recursive"
err = en.Append(0xa9, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Recursive)
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
// write "Separator"
err = en.Append(0xa9, 0x53, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72)
if err != nil {
return
}
err = en.WriteString(z.Separator)
if err != nil {
err = msgp.WrapError(err, "Separator")
return
}
// write "Create"
err = en.Append(0xa6, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65)
if err != nil {
return
}
err = en.WriteBool(z.Create)
if err != nil {
err = msgp.WrapError(err, "Create")
return
}
// write "IncludeDirectories"
err = en.Append(0xb2, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73)
if err != nil {
return
}
err = en.WriteBool(z.IncludeDirectories)
if err != nil {
err = msgp.WrapError(err, "IncludeDirectories")
return
}
// write "Transient"
err = en.Append(0xa9, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74)
if err != nil {
return
}
err = en.WriteBool(z.Transient)
if err != nil {
err = msgp.WrapError(err, "Transient")
return
}
// write "Versioned"
err = en.Append(0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64)
if err != nil {
return
}
err = en.WriteBool(z.Versioned)
if err != nil {
err = msgp.WrapError(err, "Versioned")
return
}
// write "StopDiskAtLimit"
err = en.Append(0xaf, 0x53, 0x74, 0x6f, 0x70, 0x44, 0x69, 0x73, 0x6b, 0x41, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74)
if err != nil {
return
}
err = en.WriteBool(z.StopDiskAtLimit)
if err != nil {
err = msgp.WrapError(err, "StopDiskAtLimit")
return
}
// write "pool"
err = en.Append(0xa4, 0x70, 0x6f, 0x6f, 0x6c)
if err != nil {
return
}
err = en.WriteInt(z.pool)
if err != nil {
err = msgp.WrapError(err, "pool")
return
}
// write "set"
err = en.Append(0xa3, 0x73, 0x65, 0x74)
if err != nil {
return
}
err = en.WriteInt(z.set)
if err != nil {
err = msgp.WrapError(err, "set")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *listPathOptions) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 18
// string "ID"
o = append(o, 0xde, 0x0, 0x12, 0xa2, 0x49, 0x44)
o = msgp.AppendString(o, z.ID)
// string "Bucket"
o = append(o, 0xa6, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74)
o = msgp.AppendString(o, z.Bucket)
// string "BaseDir"
o = append(o, 0xa7, 0x42, 0x61, 0x73, 0x65, 0x44, 0x69, 0x72)
o = msgp.AppendString(o, z.BaseDir)
// string "Prefix"
o = append(o, 0xa6, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.Prefix)
// string "FilterPrefix"
o = append(o, 0xac, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78)
o = msgp.AppendString(o, z.FilterPrefix)
// string "Marker"
o = append(o, 0xa6, 0x4d, 0x61, 0x72, 0x6b, 0x65, 0x72)
o = msgp.AppendString(o, z.Marker)
// string "Limit"
o = append(o, 0xa5, 0x4c, 0x69, 0x6d, 0x69, 0x74)
o = msgp.AppendInt(o, z.Limit)
// string "AskDisks"
o = append(o, 0xa8, 0x41, 0x73, 0x6b, 0x44, 0x69, 0x73, 0x6b, 0x73)
o = msgp.AppendString(o, z.AskDisks)
// string "InclDeleted"
o = append(o, 0xab, 0x49, 0x6e, 0x63, 0x6c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64)
o = msgp.AppendBool(o, z.InclDeleted)
// string "Recursive"
o = append(o, 0xa9, 0x52, 0x65, 0x63, 0x75, 0x72, 0x73, 0x69, 0x76, 0x65)
o = msgp.AppendBool(o, z.Recursive)
// string "Separator"
o = append(o, 0xa9, 0x53, 0x65, 0x70, 0x61, 0x72, 0x61, 0x74, 0x6f, 0x72)
o = msgp.AppendString(o, z.Separator)
// string "Create"
o = append(o, 0xa6, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65)
o = msgp.AppendBool(o, z.Create)
// string "IncludeDirectories"
o = append(o, 0xb2, 0x49, 0x6e, 0x63, 0x6c, 0x75, 0x64, 0x65, 0x44, 0x69, 0x72, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x69, 0x65, 0x73)
o = msgp.AppendBool(o, z.IncludeDirectories)
// string "Transient"
o = append(o, 0xa9, 0x54, 0x72, 0x61, 0x6e, 0x73, 0x69, 0x65, 0x6e, 0x74)
o = msgp.AppendBool(o, z.Transient)
// string "Versioned"
o = append(o, 0xa9, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x65, 0x64)
o = msgp.AppendBool(o, z.Versioned)
// string "StopDiskAtLimit"
o = append(o, 0xaf, 0x53, 0x74, 0x6f, 0x70, 0x44, 0x69, 0x73, 0x6b, 0x41, 0x74, 0x4c, 0x69, 0x6d, 0x69, 0x74)
o = msgp.AppendBool(o, z.StopDiskAtLimit)
// string "pool"
o = append(o, 0xa4, 0x70, 0x6f, 0x6f, 0x6c)
o = msgp.AppendInt(o, z.pool)
// string "set"
o = append(o, 0xa3, 0x73, 0x65, 0x74)
o = msgp.AppendInt(o, z.set)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *listPathOptions) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "ID":
z.ID, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "ID")
return
}
case "Bucket":
z.Bucket, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bucket")
return
}
case "BaseDir":
z.BaseDir, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "BaseDir")
return
}
case "Prefix":
z.Prefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Prefix")
return
}
case "FilterPrefix":
z.FilterPrefix, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "FilterPrefix")
return
}
case "Marker":
z.Marker, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Marker")
return
}
case "Limit":
z.Limit, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Limit")
return
}
case "AskDisks":
z.AskDisks, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "AskDisks")
return
}
case "InclDeleted":
z.InclDeleted, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "InclDeleted")
return
}
case "Recursive":
z.Recursive, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Recursive")
return
}
case "Separator":
z.Separator, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Separator")
return
}
case "Create":
z.Create, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Create")
return
}
case "IncludeDirectories":
z.IncludeDirectories, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "IncludeDirectories")
return
}
case "Transient":
z.Transient, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Transient")
return
}
case "Versioned":
z.Versioned, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Versioned")
return
}
case "StopDiskAtLimit":
z.StopDiskAtLimit, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StopDiskAtLimit")
return
}
case "pool":
z.pool, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "pool")
return
}
case "set":
z.set, bts, err = msgp.ReadIntBytes(bts)
if err != nil {
err = msgp.WrapError(err, "set")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *listPathOptions) Msgsize() (s int) {
s = 3 + 3 + msgp.StringPrefixSize + len(z.ID) + 7 + msgp.StringPrefixSize + len(z.Bucket) + 8 + msgp.StringPrefixSize + len(z.BaseDir) + 7 + msgp.StringPrefixSize + len(z.Prefix) + 13 + msgp.StringPrefixSize + len(z.FilterPrefix) + 7 + msgp.StringPrefixSize + len(z.Marker) + 6 + msgp.IntSize + 9 + msgp.StringPrefixSize + len(z.AskDisks) + 12 + msgp.BoolSize + 10 + msgp.BoolSize + 10 + msgp.StringPrefixSize + len(z.Separator) + 7 + msgp.BoolSize + 19 + msgp.BoolSize + 10 + msgp.BoolSize + 10 + msgp.BoolSize + 16 + msgp.BoolSize + 5 + msgp.IntSize + 4 + msgp.IntSize
return
}

View File

@ -0,0 +1,123 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshallistPathOptions(t *testing.T) {
v := listPathOptions{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglistPathOptions(b *testing.B) {
v := listPathOptions{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglistPathOptions(b *testing.B) {
v := listPathOptions{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallistPathOptions(b *testing.B) {
v := listPathOptions{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelistPathOptions(t *testing.T) {
v := listPathOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelistPathOptions Msgsize() is inaccurate")
}
vn := listPathOptions{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelistPathOptions(b *testing.B) {
v := listPathOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelistPathOptions(b *testing.B) {
v := listPathOptions{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -42,6 +42,8 @@ import (
"github.com/prometheus/procfs"
)
//go:generate msgp -file=$GOFILE -unexported -io=false
var (
nodeCollector *minioNodeCollector
clusterCollector *minioClusterCollector
@ -328,7 +330,7 @@ type Metric struct {
// MetricsGroup are a group of metrics that are initialized together.
type MetricsGroup struct {
metricsCache timedValue
metricsCache timedValue `msg:"-"`
cacheInterval time.Duration
metricsGroupOpts MetricsGroupOpts
}
@ -3989,6 +3991,7 @@ func collectMetric(metric Metric, labels []string, values []string, metricName s
}
}
//msgp:ignore minioBucketCollector
type minioBucketCollector struct {
metricsGroups []*MetricsGroup
desc *prometheus.Desc
@ -4024,6 +4027,7 @@ func (c *minioBucketCollector) Collect(out chan<- prometheus.Metric) {
wg.Wait()
}
//msgp:ignore minioClusterCollector
type minioClusterCollector struct {
metricsGroups []*MetricsGroup
desc *prometheus.Desc
@ -4083,6 +4087,8 @@ func ReportMetrics(ctx context.Context, metricsGroups []*MetricsGroup) <-chan Me
}
// minioNodeCollector is the Custom Collector
//
//msgp:ignore minioNodeCollector
type minioNodeCollector struct {
metricsGroups []*MetricsGroup
desc *prometheus.Desc

644
cmd/metrics-v2_gen.go Normal file
View File

@ -0,0 +1,644 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// MarshalMsg implements msgp.Marshaler
func (z *Metric) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 6
// string "Description"
o = append(o, 0x86, 0xab, 0x44, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e)
o, err = z.Description.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Description")
return
}
// string "StaticLabels"
o = append(o, 0xac, 0x53, 0x74, 0x61, 0x74, 0x69, 0x63, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.StaticLabels)))
for za0001, za0002 := range z.StaticLabels {
o = msgp.AppendString(o, za0001)
o = msgp.AppendString(o, za0002)
}
// string "Value"
o = append(o, 0xa5, 0x56, 0x61, 0x6c, 0x75, 0x65)
o = msgp.AppendFloat64(o, z.Value)
// string "VariableLabels"
o = append(o, 0xae, 0x56, 0x61, 0x72, 0x69, 0x61, 0x62, 0x6c, 0x65, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73)
o = msgp.AppendMapHeader(o, uint32(len(z.VariableLabels)))
for za0003, za0004 := range z.VariableLabels {
o = msgp.AppendString(o, za0003)
o = msgp.AppendString(o, za0004)
}
// string "HistogramBucketLabel"
o = append(o, 0xb4, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4c, 0x61, 0x62, 0x65, 0x6c)
o = msgp.AppendString(o, z.HistogramBucketLabel)
// string "Histogram"
o = append(o, 0xa9, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d)
o = msgp.AppendMapHeader(o, uint32(len(z.Histogram)))
for za0005, za0006 := range z.Histogram {
o = msgp.AppendString(o, za0005)
o = msgp.AppendUint64(o, za0006)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Metric) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Description":
bts, err = z.Description.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Description")
return
}
case "StaticLabels":
var zb0002 uint32
zb0002, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StaticLabels")
return
}
if z.StaticLabels == nil {
z.StaticLabels = make(map[string]string, zb0002)
} else if len(z.StaticLabels) > 0 {
for key := range z.StaticLabels {
delete(z.StaticLabels, key)
}
}
for zb0002 > 0 {
var za0001 string
var za0002 string
zb0002--
za0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StaticLabels")
return
}
za0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "StaticLabels", za0001)
return
}
z.StaticLabels[za0001] = za0002
}
case "Value":
z.Value, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Value")
return
}
case "VariableLabels":
var zb0003 uint32
zb0003, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VariableLabels")
return
}
if z.VariableLabels == nil {
z.VariableLabels = make(map[string]string, zb0003)
} else if len(z.VariableLabels) > 0 {
for key := range z.VariableLabels {
delete(z.VariableLabels, key)
}
}
for zb0003 > 0 {
var za0003 string
var za0004 string
zb0003--
za0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VariableLabels")
return
}
za0004, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "VariableLabels", za0003)
return
}
z.VariableLabels[za0003] = za0004
}
case "HistogramBucketLabel":
z.HistogramBucketLabel, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "HistogramBucketLabel")
return
}
case "Histogram":
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Histogram")
return
}
if z.Histogram == nil {
z.Histogram = make(map[string]uint64, zb0004)
} else if len(z.Histogram) > 0 {
for key := range z.Histogram {
delete(z.Histogram, key)
}
}
for zb0004 > 0 {
var za0005 string
var za0006 uint64
zb0004--
za0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Histogram")
return
}
za0006, bts, err = msgp.ReadUint64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "Histogram", za0005)
return
}
z.Histogram[za0005] = za0006
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *Metric) Msgsize() (s int) {
s = 1 + 12 + z.Description.Msgsize() + 13 + msgp.MapHeaderSize
if z.StaticLabels != nil {
for za0001, za0002 := range z.StaticLabels {
_ = za0002
s += msgp.StringPrefixSize + len(za0001) + msgp.StringPrefixSize + len(za0002)
}
}
s += 6 + msgp.Float64Size + 15 + msgp.MapHeaderSize
if z.VariableLabels != nil {
for za0003, za0004 := range z.VariableLabels {
_ = za0004
s += msgp.StringPrefixSize + len(za0003) + msgp.StringPrefixSize + len(za0004)
}
}
s += 21 + msgp.StringPrefixSize + len(z.HistogramBucketLabel) + 10 + msgp.MapHeaderSize
if z.Histogram != nil {
for za0005, za0006 := range z.Histogram {
_ = za0006
s += msgp.StringPrefixSize + len(za0005) + msgp.Uint64Size
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MetricDescription) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 5
// string "Namespace"
o = append(o, 0x85, 0xa9, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65)
o = msgp.AppendString(o, string(z.Namespace))
// string "Subsystem"
o = append(o, 0xa9, 0x53, 0x75, 0x62, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d)
o = msgp.AppendString(o, string(z.Subsystem))
// string "Name"
o = append(o, 0xa4, 0x4e, 0x61, 0x6d, 0x65)
o = msgp.AppendString(o, string(z.Name))
// string "Help"
o = append(o, 0xa4, 0x48, 0x65, 0x6c, 0x70)
o = msgp.AppendString(o, z.Help)
// string "Type"
o = append(o, 0xa4, 0x54, 0x79, 0x70, 0x65)
o = msgp.AppendString(o, string(z.Type))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricDescription) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Namespace":
{
var zb0002 string
zb0002, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Namespace")
return
}
z.Namespace = MetricNamespace(zb0002)
}
case "Subsystem":
{
var zb0003 string
zb0003, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Subsystem")
return
}
z.Subsystem = MetricSubsystem(zb0003)
}
case "Name":
{
var zb0004 string
zb0004, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Name")
return
}
z.Name = MetricName(zb0004)
}
case "Help":
z.Help, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Help")
return
}
case "Type":
{
var zb0005 string
zb0005, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Type")
return
}
z.Type = MetricType(zb0005)
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MetricDescription) Msgsize() (s int) {
s = 1 + 10 + msgp.StringPrefixSize + len(string(z.Namespace)) + 10 + msgp.StringPrefixSize + len(string(z.Subsystem)) + 5 + msgp.StringPrefixSize + len(string(z.Name)) + 5 + msgp.StringPrefixSize + len(z.Help) + 5 + msgp.StringPrefixSize + len(string(z.Type))
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricName) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricName) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricName(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricName) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricNamespace) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricNamespace) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricNamespace(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricNamespace) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricSubsystem) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricSubsystem) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricSubsystem(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricSubsystem) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MarshalMsg implements msgp.Marshaler
func (z MetricType) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendString(o, string(z))
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricType) UnmarshalMsg(bts []byte) (o []byte, err error) {
{
var zb0001 string
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
(*z) = MetricType(zb0001)
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z MetricType) Msgsize() (s int) {
s = msgp.StringPrefixSize + len(string(z))
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MetricsGroup) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "cacheInterval"
o = append(o, 0x82, 0xad, 0x63, 0x61, 0x63, 0x68, 0x65, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c)
o = msgp.AppendDuration(o, z.cacheInterval)
// string "metricsGroupOpts"
o = append(o, 0xb0, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x47, 0x72, 0x6f, 0x75, 0x70, 0x4f, 0x70, 0x74, 0x73)
o, err = z.metricsGroupOpts.MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "metricsGroupOpts")
return
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricsGroup) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "cacheInterval":
z.cacheInterval, bts, err = msgp.ReadDurationBytes(bts)
if err != nil {
err = msgp.WrapError(err, "cacheInterval")
return
}
case "metricsGroupOpts":
bts, err = z.metricsGroupOpts.UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "metricsGroupOpts")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MetricsGroup) Msgsize() (s int) {
s = 1 + 14 + msgp.DurationSize + 17 + z.metricsGroupOpts.Msgsize()
return
}
// MarshalMsg implements msgp.Marshaler
func (z *MetricsGroupOpts) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 12
// string "dependGlobalObjectAPI"
o = append(o, 0x8c, 0xb5, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x50, 0x49)
o = msgp.AppendBool(o, z.dependGlobalObjectAPI)
// string "dependGlobalAuthNPlugin"
o = append(o, 0xb7, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x41, 0x75, 0x74, 0x68, 0x4e, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e)
o = msgp.AppendBool(o, z.dependGlobalAuthNPlugin)
// string "dependGlobalSiteReplicationSys"
o = append(o, 0xbe, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x53, 0x69, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x73)
o = msgp.AppendBool(o, z.dependGlobalSiteReplicationSys)
// string "dependGlobalNotificationSys"
o = append(o, 0xbb, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4e, 0x6f, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x79, 0x73)
o = msgp.AppendBool(o, z.dependGlobalNotificationSys)
// string "dependGlobalKMS"
o = append(o, 0xaf, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4b, 0x4d, 0x53)
o = msgp.AppendBool(o, z.dependGlobalKMS)
// string "bucketOnly"
o = append(o, 0xaa, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x4f, 0x6e, 0x6c, 0x79)
o = msgp.AppendBool(o, z.bucketOnly)
// string "dependGlobalLambdaTargetList"
o = append(o, 0xbc, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4c, 0x61, 0x6d, 0x62, 0x64, 0x61, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4c, 0x69, 0x73, 0x74)
o = msgp.AppendBool(o, z.dependGlobalLambdaTargetList)
// string "dependGlobalIAMSys"
o = append(o, 0xb2, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x49, 0x41, 0x4d, 0x53, 0x79, 0x73)
o = msgp.AppendBool(o, z.dependGlobalIAMSys)
// string "dependGlobalLockServer"
o = append(o, 0xb6, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x4c, 0x6f, 0x63, 0x6b, 0x53, 0x65, 0x72, 0x76, 0x65, 0x72)
o = msgp.AppendBool(o, z.dependGlobalLockServer)
// string "dependGlobalIsDistErasure"
o = append(o, 0xb9, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x49, 0x73, 0x44, 0x69, 0x73, 0x74, 0x45, 0x72, 0x61, 0x73, 0x75, 0x72, 0x65)
o = msgp.AppendBool(o, z.dependGlobalIsDistErasure)
// string "dependGlobalBackgroundHealState"
o = append(o, 0xbf, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x47, 0x6c, 0x6f, 0x62, 0x61, 0x6c, 0x42, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x48, 0x65, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65)
o = msgp.AppendBool(o, z.dependGlobalBackgroundHealState)
// string "dependBucketTargetSys"
o = append(o, 0xb5, 0x64, 0x65, 0x70, 0x65, 0x6e, 0x64, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x53, 0x79, 0x73)
o = msgp.AppendBool(o, z.dependBucketTargetSys)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *MetricsGroupOpts) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "dependGlobalObjectAPI":
z.dependGlobalObjectAPI, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalObjectAPI")
return
}
case "dependGlobalAuthNPlugin":
z.dependGlobalAuthNPlugin, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalAuthNPlugin")
return
}
case "dependGlobalSiteReplicationSys":
z.dependGlobalSiteReplicationSys, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalSiteReplicationSys")
return
}
case "dependGlobalNotificationSys":
z.dependGlobalNotificationSys, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalNotificationSys")
return
}
case "dependGlobalKMS":
z.dependGlobalKMS, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalKMS")
return
}
case "bucketOnly":
z.bucketOnly, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "bucketOnly")
return
}
case "dependGlobalLambdaTargetList":
z.dependGlobalLambdaTargetList, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalLambdaTargetList")
return
}
case "dependGlobalIAMSys":
z.dependGlobalIAMSys, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalIAMSys")
return
}
case "dependGlobalLockServer":
z.dependGlobalLockServer, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalLockServer")
return
}
case "dependGlobalIsDistErasure":
z.dependGlobalIsDistErasure, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalIsDistErasure")
return
}
case "dependGlobalBackgroundHealState":
z.dependGlobalBackgroundHealState, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependGlobalBackgroundHealState")
return
}
case "dependBucketTargetSys":
z.dependBucketTargetSys, bts, err = msgp.ReadBoolBytes(bts)
if err != nil {
err = msgp.WrapError(err, "dependBucketTargetSys")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *MetricsGroupOpts) Msgsize() (s int) {
s = 1 + 22 + msgp.BoolSize + 24 + msgp.BoolSize + 31 + msgp.BoolSize + 28 + msgp.BoolSize + 16 + msgp.BoolSize + 11 + msgp.BoolSize + 29 + msgp.BoolSize + 19 + msgp.BoolSize + 23 + msgp.BoolSize + 26 + msgp.BoolSize + 32 + msgp.BoolSize + 22 + msgp.BoolSize
return
}

241
cmd/metrics-v2_gen_test.go Normal file
View File

@ -0,0 +1,241 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalMetric(t *testing.T) {
v := Metric{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetric(b *testing.B) {
v := Metric{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetric(b *testing.B) {
v := Metric{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetric(b *testing.B) {
v := Metric{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetricDescription(t *testing.T) {
v := MetricDescription{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetricDescription(b *testing.B) {
v := MetricDescription{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetricDescription(b *testing.B) {
v := MetricDescription{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetricDescription(b *testing.B) {
v := MetricDescription{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetricsGroup(t *testing.T) {
v := MetricsGroup{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetricsGroup(b *testing.B) {
v := MetricsGroup{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetricsGroup(b *testing.B) {
v := MetricsGroup{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetricsGroup(b *testing.B) {
v := MetricsGroup{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalMetricsGroupOpts(t *testing.T) {
v := MetricsGroupOpts{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgMetricsGroupOpts(b *testing.B) {
v := MetricsGroupOpts{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgMetricsGroupOpts(b *testing.B) {
v := MetricsGroupOpts{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalMetricsGroupOpts(b *testing.B) {
v := MetricsGroupOpts{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -18,7 +18,6 @@
package cmd
import (
"bytes"
"context"
"encoding/gob"
"encoding/hex"
@ -28,7 +27,6 @@ import (
"io"
"net/url"
"strconv"
"strings"
"sync/atomic"
"time"
@ -38,9 +36,7 @@ import (
xhttp "github.com/minio/minio/internal/http"
"github.com/minio/minio/internal/logger"
"github.com/minio/minio/internal/rest"
"github.com/minio/pkg/v2/logger/message/log"
xnet "github.com/minio/pkg/v2/net"
"github.com/tinylib/msgp/msgp"
)
// client to talk to peer Nodes.
@ -154,111 +150,65 @@ func (client *peerRESTClient) Close() error {
// GetLocks - fetch older locks for a remote node.
func (client *peerRESTClient) GetLocks() (lockMap map[string][]lockRequesterInfo, err error) {
respBody, err := client.call(peerRESTMethodGetLocks, nil, nil, -1)
if err != nil {
return
resp, err := getLocksRPC.Call(context.Background(), client.gridConn(), grid.NewMSS())
if err != nil || resp == nil {
return nil, err
}
lockMap = map[string][]lockRequesterInfo{}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&lockMap)
return lockMap, err
return *resp, nil
}
// LocalStorageInfo - fetch server information for a remote node.
func (client *peerRESTClient) LocalStorageInfo(metrics bool) (info StorageInfo, err error) {
values := make(url.Values)
values.Set(peerRESTMetrics, strconv.FormatBool(metrics))
respBody, err := client.call(peerRESTMethodLocalStorageInfo, values, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := localStorageInfoRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTMetrics: strconv.FormatBool(metrics),
}))
return resp.ValueOrZero(), err
}
// ServerInfo - fetch server information for a remote node.
func (client *peerRESTClient) ServerInfo(metrics bool) (info madmin.ServerProperties, err error) {
values := make(url.Values)
values.Set(peerRESTMetrics, strconv.FormatBool(metrics))
respBody, err := client.call(peerRESTMethodServerInfo, values, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := serverInfoRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{peerRESTMetrics: strconv.FormatBool(metrics)}))
return resp.ValueOrZero(), err
}
// GetCPUs - fetch CPU information for a remote node.
func (client *peerRESTClient) GetCPUs(ctx context.Context) (info madmin.CPUs, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodCPUInfo, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := getCPUsHandler.Call(ctx, client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// GetNetInfo - fetch network information for a remote node.
func (client *peerRESTClient) GetNetInfo(ctx context.Context) (info madmin.NetInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodNetHwInfo, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := getNetInfoRPC.Call(ctx, client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// GetPartitions - fetch disk partition information for a remote node.
func (client *peerRESTClient) GetPartitions(ctx context.Context) (info madmin.Partitions, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodDiskHwInfo, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := getPartitionsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// GetOSInfo - fetch OS information for a remote node.
func (client *peerRESTClient) GetOSInfo(ctx context.Context) (info madmin.OSInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodOsInfo, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := getOSInfoRPC.Call(ctx, client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// GetSELinuxInfo - fetch SELinux information for a remote node.
func (client *peerRESTClient) GetSELinuxInfo(ctx context.Context) (info madmin.SysServices, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodSysServices, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := getSysServicesRPC.Call(ctx, client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// GetSysConfig - fetch sys config for a remote node.
func (client *peerRESTClient) GetSysConfig(ctx context.Context) (info madmin.SysConfig, err error) {
sent := time.Now()
respBody, err := client.callWithContext(ctx, peerRESTMethodSysConfig, nil, nil, -1)
if err != nil {
return
}
roundtrip := int32(time.Since(sent).Milliseconds())
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
resp, err := getSysConfigRPC.Call(ctx, client.gridConn(), grid.NewMSS())
info = resp.ValueOrZero()
if ti, ok := info.Config["time-info"].(madmin.TimeInfo); ok {
ti.RoundtripDuration = roundtrip
rt := int32(time.Since(sent).Milliseconds())
ti.RoundtripDuration = rt
info.Config["time-info"] = ti
}
return info, err
@ -266,24 +216,14 @@ func (client *peerRESTClient) GetSysConfig(ctx context.Context) (info madmin.Sys
// GetSysErrors - fetch sys errors for a remote node.
func (client *peerRESTClient) GetSysErrors(ctx context.Context) (info madmin.SysErrors, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodSysErrors, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := getSysErrorsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// GetMemInfo - fetch memory information for a remote node.
func (client *peerRESTClient) GetMemInfo(ctx context.Context) (info madmin.MemInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodMemInfo, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := getMemInfoRPC.Call(ctx, client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// GetMetrics - fetch metrics from a remote node.
@ -298,52 +238,34 @@ func (client *peerRESTClient) GetMetrics(ctx context.Context, t madmin.MetricTyp
}
values.Set(peerRESTJobID, opts.jobID)
values.Set(peerRESTDepID, opts.depID)
respBody, err := client.callWithContext(ctx, peerRESTMethodMetrics, values, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
v, err := getMetricsRPC.Call(ctx, client.gridConn(), grid.NewURLValuesWith(values))
return v.ValueOrZero(), err
}
func (client *peerRESTClient) GetResourceMetrics(ctx context.Context) (<-chan Metric, error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodResourceMetrics, nil, nil, -1)
st, err := getResourceMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil {
return nil, err
}
dec := gob.NewDecoder(respBody)
ch := make(chan Metric)
ch := make(chan Metric, 1)
go func(ch chan<- Metric) {
defer func() {
xhttp.DrainBody(respBody)
close(ch)
}()
for {
var metric Metric
if err := dec.Decode(&metric); err != nil {
return
}
defer close(ch)
st.Results(func(metric *Metric) error {
select {
case <-ctx.Done():
return
case ch <- metric:
return ctx.Err()
case ch <- *metric:
return nil
}
}
})
}(ch)
return ch, nil
}
// GetProcInfo - fetch MinIO process information for a remote node.
func (client *peerRESTClient) GetProcInfo(ctx context.Context) (info madmin.ProcInfo, err error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodProcInfo, nil, nil, -1)
if err != nil {
return
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&info)
return info, err
resp, err := getProcInfoRPC.Call(ctx, client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// StartProfiling - Issues profiling command on the peer node.
@ -371,51 +293,36 @@ func (client *peerRESTClient) DownloadProfileData() (data map[string][]byte, err
// GetBucketStats - load bucket statistics
func (client *peerRESTClient) GetBucketStats(bucket string) (BucketStats, error) {
values := make(url.Values)
values.Set(peerRESTBucket, bucket)
respBody, err := client.call(peerRESTMethodGetBucketStats, values, nil, -1)
if err != nil {
resp, err := getBucketStatsRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTBucket: bucket,
}))
if err != nil || resp == nil {
return BucketStats{}, err
}
var bs BucketStats
defer xhttp.DrainBody(respBody)
return bs, msgp.Decode(respBody, &bs)
return *resp, nil
}
// GetSRMetrics- loads site replication metrics, optionally for a specific bucket
// GetSRMetrics loads site replication metrics, optionally for a specific bucket
func (client *peerRESTClient) GetSRMetrics() (SRMetricsSummary, error) {
values := make(url.Values)
respBody, err := client.call(peerRESTMethodGetSRMetrics, values, nil, -1)
if err != nil {
resp, err := getSRMetricsRPC.Call(context.Background(), client.gridConn(), grid.NewMSS())
if err != nil || resp == nil {
return SRMetricsSummary{}, err
}
var sm SRMetricsSummary
defer xhttp.DrainBody(respBody)
return sm, msgp.Decode(respBody, &sm)
return *resp, nil
}
// GetAllBucketStats - load replication stats for all buckets
func (client *peerRESTClient) GetAllBucketStats() (BucketStatsMap, error) {
values := make(url.Values)
respBody, err := client.call(peerRESTMethodGetAllBucketStats, values, nil, -1)
if err != nil {
resp, err := getAllBucketStatsRPC.Call(context.Background(), client.gridConn(), grid.NewMSS())
if err != nil || resp == nil {
return BucketStatsMap{}, err
}
bsMap := BucketStatsMap{}
defer xhttp.DrainBody(respBody)
return bsMap, msgp.Decode(respBody, &bsMap)
return *resp, nil
}
// LoadBucketMetadata - load bucket metadata
func (client *peerRESTClient) LoadBucketMetadata(bucket string) error {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err := loadBucketMetadataHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err := loadBucketMetadataRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTBucket: bucket,
}))
return err
@ -423,11 +330,7 @@ func (client *peerRESTClient) LoadBucketMetadata(bucket string) error {
// DeleteBucketMetadata - Delete bucket metadata
func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err := deleteBucketMetadataHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err := deleteBucketMetadataRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTBucket: bucket,
}))
return err
@ -435,12 +338,7 @@ func (client *peerRESTClient) DeleteBucketMetadata(bucket string) error {
// DeletePolicy - delete a specific canned policy.
func (client *peerRESTClient) DeletePolicy(policyName string) (err error) {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err = deletePolicyHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err = deletePolicyRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTPolicy: policyName,
}))
return err
@ -448,12 +346,7 @@ func (client *peerRESTClient) DeletePolicy(policyName string) (err error) {
// LoadPolicy - reload a specific canned policy.
func (client *peerRESTClient) LoadPolicy(policyName string) (err error) {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err = loadPolicyHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err = loadPolicyRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTPolicy: policyName,
}))
return err
@ -461,12 +354,7 @@ func (client *peerRESTClient) LoadPolicy(policyName string) (err error) {
// LoadPolicyMapping - reload a specific policy mapping
func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, userType IAMUserType, isGroup bool) error {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err := loadPolicyMappingHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err := loadPolicyMappingRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTUserOrGroup: userOrGroup,
peerRESTUserType: strconv.Itoa(int(userType)),
peerRESTIsGroup: strconv.FormatBool(isGroup),
@ -476,12 +364,7 @@ func (client *peerRESTClient) LoadPolicyMapping(userOrGroup string, userType IAM
// DeleteUser - delete a specific user.
func (client *peerRESTClient) DeleteUser(accessKey string) (err error) {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err = deleteUserHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err = deleteUserRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTUser: accessKey,
}))
return err
@ -489,12 +372,7 @@ func (client *peerRESTClient) DeleteUser(accessKey string) (err error) {
// DeleteServiceAccount - delete a specific service account.
func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error) {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err = deleteSvcActHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err = deleteSvcActRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTUser: accessKey,
}))
return err
@ -502,12 +380,7 @@ func (client *peerRESTClient) DeleteServiceAccount(accessKey string) (err error)
// LoadUser - reload a specific user.
func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error) {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err = loadUserHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err = loadUserRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTUser: accessKey,
peerRESTUserTemp: strconv.FormatBool(temp),
}))
@ -516,12 +389,7 @@ func (client *peerRESTClient) LoadUser(accessKey string, temp bool) (err error)
// LoadServiceAccount - reload a specific service account.
func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err = loadSvcActHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err = loadSvcActRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTUser: accessKey,
}))
return err
@ -529,12 +397,7 @@ func (client *peerRESTClient) LoadServiceAccount(accessKey string) (err error) {
// LoadGroup - send load group command to peers.
func (client *peerRESTClient) LoadGroup(group string) error {
conn := client.gridConn()
if conn == nil {
return nil
}
_, err := loadGroupHandler.Call(context.Background(), conn, grid.NewMSSWith(map[string]string{
_, err := loadGroupRPC.Call(context.Background(), client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTGroup: group,
}))
return err
@ -546,7 +409,7 @@ func (client *peerRESTClient) ReloadSiteReplicationConfig(ctx context.Context) e
return nil
}
_, err := reloadSiteReplicationConfigHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
_, err := reloadSiteReplicationConfigRPC.Call(ctx, conn, grid.NewMSS())
return err
}
@ -577,28 +440,17 @@ func (client *peerRESTClient) CommitBinary(ctx context.Context) error {
// SignalService - sends signal to peer nodes.
func (client *peerRESTClient) SignalService(sig serviceSignal, subSys string, dryRun bool) error {
values := make(url.Values)
values := grid.NewMSS()
values.Set(peerRESTSignal, strconv.Itoa(int(sig)))
values.Set(peerRESTDryRun, strconv.FormatBool(dryRun))
values.Set(peerRESTSubSys, subSys)
respBody, err := client.call(peerRESTMethodSignalService, values, nil, -1)
if err != nil {
return err
}
defer xhttp.DrainBody(respBody)
return nil
_, err := signalServiceRPC.Call(context.Background(), client.gridConn(), values)
return err
}
func (client *peerRESTClient) BackgroundHealStatus() (madmin.BgHealState, error) {
respBody, err := client.call(peerRESTMethodBackgroundHealStatus, nil, nil, -1)
if err != nil {
return madmin.BgHealState{}, err
}
defer xhttp.DrainBody(respBody)
state := madmin.BgHealState{}
err = gob.NewDecoder(respBody).Decode(&state)
return state, err
resp, err := getBackgroundHealStatusRPC.Call(context.Background(), client.gridConn(), grid.NewMSS())
return resp.ValueOrZero(), err
}
// GetMetacacheListing - get a new or existing metacache.
@ -607,19 +459,7 @@ func (client *peerRESTClient) GetMetacacheListing(ctx context.Context, o listPat
resp := localMetacacheMgr.getBucket(ctx, o.Bucket).findCache(o)
return &resp, nil
}
var reader bytes.Buffer
err := gob.NewEncoder(&reader).Encode(o)
if err != nil {
return nil, err
}
respBody, err := client.callWithContext(ctx, peerRESTMethodGetMetacacheListing, nil, &reader, int64(reader.Len()))
if err != nil {
return nil, err
}
var resp metacache
defer xhttp.DrainBody(respBody)
return &resp, msgp.Decode(respBody, &resp)
return getMetacacheListingRPC.Call(ctx, client.gridConn(), &o)
}
// UpdateMetacacheListing - update an existing metacache it will unconditionally be updated to the new state.
@ -627,17 +467,11 @@ func (client *peerRESTClient) UpdateMetacacheListing(ctx context.Context, m meta
if client == nil {
return localMetacacheMgr.updateCacheEntry(m)
}
b, err := m.MarshalMsg(nil)
if err != nil {
return m, err
resp, err := updateMetacacheListingRPC.Call(ctx, client.gridConn(), &m)
if err != nil || resp == nil {
return metacache{}, err
}
respBody, err := client.callWithContext(ctx, peerRESTMethodUpdateMetacacheListing, nil, bytes.NewBuffer(b), int64(len(b)))
if err != nil {
return m, err
}
defer xhttp.DrainBody(respBody)
var resp metacache
return resp, msgp.Decode(respBody, &resp)
return *resp, nil
}
func (client *peerRESTClient) ReloadPoolMeta(ctx context.Context) error {
@ -645,7 +479,7 @@ func (client *peerRESTClient) ReloadPoolMeta(ctx context.Context) error {
if conn == nil {
return nil
}
_, err := reloadPoolMetaHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
_, err := reloadPoolMetaRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
return err
}
@ -654,7 +488,7 @@ func (client *peerRESTClient) StopRebalance(ctx context.Context) error {
if conn == nil {
return nil
}
_, err := stopRebalanceHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
_, err := stopRebalanceRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
return err
}
@ -663,7 +497,7 @@ func (client *peerRESTClient) LoadRebalanceMeta(ctx context.Context, startRebala
if conn == nil {
return nil
}
_, err := loadRebalanceMetaHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{
_, err := loadRebalanceMetaRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{
peerRESTStartRebalance: strconv.FormatBool(startRebalance),
}))
return err
@ -674,7 +508,7 @@ func (client *peerRESTClient) LoadTransitionTierConfig(ctx context.Context) erro
if conn == nil {
return nil
}
_, err := loadTransitionTierConfigHandler.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
_, err := loadTransitionTierConfigRPC.Call(ctx, conn, grid.NewMSSWith(map[string]string{}))
return err
}
@ -711,7 +545,7 @@ func (client *peerRESTClient) doListen(ctx context.Context, listenCh chan<- []by
if conn == nil {
return
}
st, err := listenHandler.Call(ctx, conn, grid.NewURLValuesWith(v))
st, err := listenRPC.Call(ctx, conn, grid.NewURLValuesWith(v))
if err != nil {
return
}
@ -759,48 +593,31 @@ func (client *peerRESTClient) Trace(ctx context.Context, traceCh chan<- []byte,
}()
}
func (client *peerRESTClient) doConsoleLog(logCh chan log.Info, doneCh <-chan struct{}) {
// To cancel the REST request in case doneCh gets closed.
ctx, cancel := context.WithCancel(GlobalContext)
cancelCh := make(chan struct{})
defer close(cancelCh)
go func() {
select {
case <-doneCh:
case <-cancelCh:
// There was an error in the REST request.
}
cancel()
}()
respBody, err := client.callWithContext(ctx, peerRESTMethodLog, nil, nil, -1)
defer xhttp.DrainBody(respBody)
func (client *peerRESTClient) doConsoleLog(ctx context.Context, kind madmin.LogMask, logCh chan<- []byte) {
st, err := consoleLogRPC.Call(ctx, client.gridConn(), grid.NewMSSWith(map[string]string{
peerRESTLogMask: strconv.Itoa(int(kind)),
}))
if err != nil {
return
}
dec := gob.NewDecoder(respBody)
for {
var lg log.Info
if err = dec.Decode(&lg); err != nil {
break
}
st.Results(func(b *grid.Bytes) error {
select {
case logCh <- lg:
case logCh <- *b:
default:
consoleLogRPC.PutResponse(b)
// Do not block on slow receivers.
}
}
return nil
})
}
// ConsoleLog - sends request to peer nodes to get console logs
func (client *peerRESTClient) ConsoleLog(logCh chan log.Info, doneCh <-chan struct{}) {
func (client *peerRESTClient) ConsoleLog(ctx context.Context, kind madmin.LogMask, logCh chan<- []byte) {
go func() {
for {
client.doConsoleLog(logCh, doneCh)
client.doConsoleLog(ctx, kind, logCh)
select {
case <-doneCh:
case <-ctx.Done():
return
default:
// There was error in the REST request, retry after sometime as probably the peer is down.
@ -838,71 +655,53 @@ func newPeerRestClients(endpoints EndpointServerPools) (remote, all []*peerRESTC
// MonitorBandwidth - send http trace request to peer nodes
func (client *peerRESTClient) MonitorBandwidth(ctx context.Context, buckets []string) (*bandwidth.BucketBandwidthReport, error) {
values := make(url.Values)
values.Set(peerRESTBuckets, strings.Join(buckets, ","))
respBody, err := client.callWithContext(ctx, peerRESTMethodGetBandwidth, values, nil, -1)
if err != nil {
return nil, err
}
defer xhttp.DrainBody(respBody)
dec := gob.NewDecoder(respBody)
var bandwidthReport bandwidth.BucketBandwidthReport
err = dec.Decode(&bandwidthReport)
return &bandwidthReport, err
values := grid.NewURLValuesWith(map[string][]string{
peerRESTBuckets: buckets,
})
return getBandwidthRPC.Call(ctx, client.gridConn(), values)
}
func (client *peerRESTClient) GetPeerMetrics(ctx context.Context) (<-chan Metric, error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodGetPeerMetrics, nil, nil, -1)
resp, err := getPeerMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil {
return nil, err
}
dec := gob.NewDecoder(respBody)
ch := make(chan Metric)
go func(ch chan<- Metric) {
defer func() {
xhttp.DrainBody(respBody)
close(ch)
}()
for {
var metric Metric
if err := dec.Decode(&metric); err != nil {
return
go func() {
defer close(ch)
for _, m := range resp.Value() {
if m == nil {
continue
}
select {
case <-ctx.Done():
return
case ch <- metric:
case ch <- *m:
}
}
}(ch)
}()
return ch, nil
}
func (client *peerRESTClient) GetPeerBucketMetrics(ctx context.Context) (<-chan Metric, error) {
respBody, err := client.callWithContext(ctx, peerRESTMethodGetPeerBucketMetrics, nil, nil, -1)
resp, err := getPeerBucketMetricsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil {
return nil, err
}
dec := gob.NewDecoder(respBody)
ch := make(chan Metric)
go func(ch chan<- Metric) {
defer func() {
xhttp.DrainBody(respBody)
close(ch)
}()
for {
var metric Metric
if err := dec.Decode(&metric); err != nil {
return
go func() {
defer close(ch)
for _, m := range resp.Value() {
if m == nil {
continue
}
select {
case <-ctx.Done():
return
case ch <- metric:
case ch <- *m:
}
}
}(ch)
}()
return ch, nil
}
@ -966,18 +765,11 @@ func (client *peerRESTClient) DriveSpeedTest(ctx context.Context, opts madmin.Dr
}
func (client *peerRESTClient) GetLastDayTierStats(ctx context.Context) (DailyAllTierStats, error) {
var result map[string]lastDayTierStats
respBody, err := client.callWithContext(context.Background(), peerRESTMethodGetLastDayTierStats, nil, nil, -1)
if err != nil {
return result, err
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&result)
if err != nil {
resp, err := getLastDayTierStatsRPC.Call(ctx, client.gridConn(), grid.NewMSS())
if err != nil || resp == nil {
return DailyAllTierStats{}, err
}
return DailyAllTierStats(result), nil
return *resp, nil
}
// DevNull - Used by netperf to pump data to peer

View File

@ -18,50 +18,29 @@
package cmd
const (
peerRESTVersion = "v37" // Add 'metrics' option for ServerInfo
peerRESTVersion = "v38" // Convert RPC calls
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
peerRESTPrefix = minioReservedBucketPath + "/peer"
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix
)
const (
peerRESTMethodHealth = "/health"
peerRESTMethodServerInfo = "/serverinfo"
peerRESTMethodLocalStorageInfo = "/localstorageinfo"
peerRESTMethodCPUInfo = "/cpuinfo"
peerRESTMethodDiskHwInfo = "/diskhwinfo"
peerRESTMethodNetHwInfo = "/nethwinfo"
peerRESTMethodOsInfo = "/osinfo"
peerRESTMethodMemInfo = "/meminfo"
peerRESTMethodProcInfo = "/procinfo"
peerRESTMethodSysErrors = "/syserrors"
peerRESTMethodSysServices = "/sysservices"
peerRESTMethodSysConfig = "/sysconfig"
peerRESTMethodGetBucketStats = "/getbucketstats"
peerRESTMethodGetAllBucketStats = "/getallbucketstats"
peerRESTMethodVerifyBinary = "/verifybinary"
peerRESTMethodCommitBinary = "/commitbinary"
peerRESTMethodSignalService = "/signalservice"
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
peerRESTMethodGetLocks = "/getlocks"
peerRESTMethodStartProfiling = "/startprofiling"
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
peerRESTMethodLog = "/log"
peerRESTMethodGetBandwidth = "/bandwidth"
peerRESTMethodGetMetacacheListing = "/getmetacache"
peerRESTMethodUpdateMetacacheListing = "/updatemetacache"
peerRESTMethodGetPeerMetrics = "/peermetrics"
peerRESTMethodGetPeerBucketMetrics = "/peerbucketmetrics"
peerRESTMethodSpeedTest = "/speedtest"
peerRESTMethodDriveSpeedTest = "/drivespeedtest"
peerRESTMethodStopRebalance = "/stoprebalance"
peerRESTMethodGetLastDayTierStats = "/getlastdaytierstats"
peerRESTMethodDevNull = "/devnull"
peerRESTMethodNetperf = "/netperf"
peerRESTMethodMetrics = "/metrics"
peerRESTMethodResourceMetrics = "/resourcemetrics"
peerRESTMethodGetReplicationMRF = "/getreplicationmrf"
peerRESTMethodGetSRMetrics = "/getsrmetrics"
peerRESTMethodHealth = "/health"
peerRESTMethodVerifyBinary = "/verifybinary"
peerRESTMethodCommitBinary = "/commitbinary"
peerRESTMethodSignalService = "/signalservice"
peerRESTMethodBackgroundHealStatus = "/backgroundhealstatus"
peerRESTMethodGetLocks = "/getlocks"
peerRESTMethodStartProfiling = "/startprofiling"
peerRESTMethodDownloadProfilingData = "/downloadprofilingdata"
peerRESTMethodGetBandwidth = "/bandwidth"
peerRESTMethodSpeedTest = "/speedtest"
peerRESTMethodDriveSpeedTest = "/drivespeedtest"
peerRESTMethodReloadSiteReplicationConfig = "/reloadsitereplicationconfig"
peerRESTMethodGetLastDayTierStats = "/getlastdaytierstats"
peerRESTMethodDevNull = "/devnull"
peerRESTMethodNetperf = "/netperf"
peerRESTMethodGetReplicationMRF = "/getreplicationmrf"
)
const (
@ -99,4 +78,5 @@ const (
peerRESTListenPrefix = "prefix"
peerRESTListenSuffix = "suffix"
peerRESTListenEvents = "events"
peerRESTLogMask = "log-mask"
)

View File

@ -37,6 +37,7 @@ import (
"github.com/dustin/go-humanize"
"github.com/klauspost/compress/zstd"
"github.com/minio/madmin-go/v3"
"github.com/minio/minio/internal/bucket/bandwidth"
b "github.com/minio/minio/internal/bucket/bandwidth"
"github.com/minio/minio/internal/event"
"github.com/minio/minio/internal/grid"
@ -45,34 +46,87 @@ import (
"github.com/minio/minio/internal/pubsub"
"github.com/minio/mux"
"github.com/minio/pkg/v2/logger/message/log"
"github.com/tinylib/msgp/msgp"
)
// To abstract a node over network.
type peerRESTServer struct{}
// GetLocksHandler - returns list of older lock from the server.
func (s *peerRESTServer) GetLocksHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "GetLocks")
logger.LogIf(ctx, gob.NewEncoder(w).Encode(globalLockServer.DupLockMap()))
}
var (
deletePolicyHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeletePolicy, grid.NewMSS, grid.NewNoPayload)
loadPolicyHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicy, grid.NewMSS, grid.NewNoPayload)
loadPolicyMappingHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicyMapping, grid.NewMSS, grid.NewNoPayload)
deleteSvcActHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteServiceAccount, grid.NewMSS, grid.NewNoPayload)
loadSvcActHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadServiceAccount, grid.NewMSS, grid.NewNoPayload)
deleteUserHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteUser, grid.NewMSS, grid.NewNoPayload)
loadUserHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadUser, grid.NewMSS, grid.NewNoPayload)
loadGroupHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadGroup, grid.NewMSS, grid.NewNoPayload)
// Types & Wrappers
aoMetricsGroup = grid.NewArrayOf[*Metric](func() *Metric { return &Metric{} })
madminBgHealState = grid.NewJSONPool[madmin.BgHealState]()
madminCPUs = grid.NewJSONPool[madmin.CPUs]()
madminMemInfo = grid.NewJSONPool[madmin.MemInfo]()
madminNetInfo = grid.NewJSONPool[madmin.NetInfo]()
madminOSInfo = grid.NewJSONPool[madmin.OSInfo]()
madminPartitions = grid.NewJSONPool[madmin.Partitions]()
madminProcInfo = grid.NewJSONPool[madmin.ProcInfo]()
madminRealtimeMetrics = grid.NewJSONPool[madmin.RealtimeMetrics]()
madminServerProperties = grid.NewJSONPool[madmin.ServerProperties]()
madminStorageInfo = grid.NewJSONPool[madmin.StorageInfo]()
madminSysConfig = grid.NewJSONPool[madmin.SysConfig]()
madminSysErrors = grid.NewJSONPool[madmin.SysErrors]()
madminSysServices = grid.NewJSONPool[madmin.SysServices]()
// Request -> Response RPC calls
deleteBucketMetadataRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteBucketMetadata, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
deleteBucketRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteBucket, grid.NewMSS, grid.NewNoPayload)
deletePolicyRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeletePolicy, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
deleteSvcActRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteServiceAccount, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
deleteUserRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteUser, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
getAllBucketStatsRPC = grid.NewSingleHandler[*grid.MSS, *BucketStatsMap](grid.HandlerGetAllBucketStats, grid.NewMSS, func() *BucketStatsMap { return &BucketStatsMap{} })
getBackgroundHealStatusRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.BgHealState]](grid.HandlerBackgroundHealStatus, grid.NewMSS, madminBgHealState.NewJSON)
getBandwidthRPC = grid.NewSingleHandler[*grid.URLValues, *bandwidth.BucketBandwidthReport](grid.HandlerGetBandwidth, grid.NewURLValues, func() *bandwidth.BucketBandwidthReport { return &bandwidth.BucketBandwidthReport{} })
getBucketStatsRPC = grid.NewSingleHandler[*grid.MSS, *BucketStats](grid.HandlerGetBucketStats, grid.NewMSS, func() *BucketStats { return &BucketStats{} })
getCPUsHandler = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.CPUs]](grid.HandlerGetCPUs, grid.NewMSS, madminCPUs.NewJSON)
getLastDayTierStatsRPC = grid.NewSingleHandler[*grid.MSS, *DailyAllTierStats](grid.HandlerGetLastDayTierStats, grid.NewMSS, func() *DailyAllTierStats { return &DailyAllTierStats{} })
getLocksRPC = grid.NewSingleHandler[*grid.MSS, *localLockMap](grid.HandlerGetLocks, grid.NewMSS, func() *localLockMap { return &localLockMap{} })
getMemInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.MemInfo]](grid.HandlerGetMemInfo, grid.NewMSS, madminMemInfo.NewJSON)
getMetacacheListingRPC = grid.NewSingleHandler[*listPathOptions, *metacache](grid.HandlerGetMetacacheListing, func() *listPathOptions { return &listPathOptions{} }, func() *metacache { return &metacache{} })
getMetricsRPC = grid.NewSingleHandler[*grid.URLValues, *grid.JSON[madmin.RealtimeMetrics]](grid.HandlerGetMetrics, grid.NewURLValues, madminRealtimeMetrics.NewJSON)
getNetInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.NetInfo]](grid.HandlerGetNetInfo, grid.NewMSS, madminNetInfo.NewJSON)
getOSInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.OSInfo]](grid.HandlerGetOSInfo, grid.NewMSS, madminOSInfo.NewJSON)
getPartitionsRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.Partitions]](grid.HandlerGetPartitions, grid.NewMSS, madminPartitions.NewJSON)
getPeerBucketMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*Metric]](grid.HandlerGetPeerBucketMetrics, grid.NewMSS, aoMetricsGroup.New)
getPeerMetricsRPC = grid.NewSingleHandler[*grid.MSS, *grid.Array[*Metric]](grid.HandlerGetPeerMetrics, grid.NewMSS, aoMetricsGroup.New)
getProcInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.ProcInfo]](grid.HandlerGetProcInfo, grid.NewMSS, madminProcInfo.NewJSON)
getSRMetricsRPC = grid.NewSingleHandler[*grid.MSS, *SRMetricsSummary](grid.HandlerGetSRMetrics, grid.NewMSS, func() *SRMetricsSummary { return &SRMetricsSummary{} })
getSysConfigRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysConfig]](grid.HandlerGetSysConfig, grid.NewMSS, madminSysConfig.NewJSON)
getSysErrorsRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysErrors]](grid.HandlerGetSysErrors, grid.NewMSS, madminSysErrors.NewJSON)
getSysServicesRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.SysServices]](grid.HandlerGetSysServices, grid.NewMSS, madminSysServices.NewJSON)
headBucketRPC = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerHeadBucket, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
healBucketRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerHealBucket, grid.NewMSS, grid.NewNoPayload)
loadBucketMetadataRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadBucketMetadata, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadGroupRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadGroup, grid.NewMSS, grid.NewNoPayload)
loadPolicyMappingRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicyMapping, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadPolicyRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadPolicy, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadRebalanceMetaRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadRebalanceMeta, grid.NewMSS, grid.NewNoPayload)
loadSvcActRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadServiceAccount, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
loadTransitionTierConfigRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadTransitionTierConfig, grid.NewMSS, grid.NewNoPayload)
loadUserRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadUser, grid.NewMSS, grid.NewNoPayload).IgnoreNilConn()
localStorageInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.StorageInfo]](grid.HandlerStorageInfo, grid.NewMSS, madminStorageInfo.NewJSON)
makeBucketRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerMakeBucket, grid.NewMSS, grid.NewNoPayload)
reloadPoolMetaRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerReloadPoolMeta, grid.NewMSS, grid.NewNoPayload)
reloadSiteReplicationConfigRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerReloadSiteReplicationConfig, grid.NewMSS, grid.NewNoPayload)
serverInfoRPC = grid.NewSingleHandler[*grid.MSS, *grid.JSON[madmin.ServerProperties]](grid.HandlerServerInfo, grid.NewMSS, madminServerProperties.NewJSON)
signalServiceRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerSignalService, grid.NewMSS, grid.NewNoPayload)
stopRebalanceRPC = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerStopRebalance, grid.NewMSS, grid.NewNoPayload)
updateMetacacheListingRPC = grid.NewSingleHandler[*metacache, *metacache](grid.HandlerUpdateMetacacheListing, func() *metacache { return &metacache{} }, func() *metacache { return &metacache{} })
// STREAMS
// Set an output capacity of 100 for consoleLog and listenRPC
// There is another buffer that will buffer events.
consoleLogRPC = grid.NewStream[*grid.MSS, grid.NoPayload, *grid.Bytes](grid.HandlerConsoleLog, grid.NewMSS, nil, grid.NewBytes).WithOutCapacity(100)
listenRPC = grid.NewStream[*grid.URLValues, grid.NoPayload, *grid.Bytes](grid.HandlerListen, grid.NewURLValues, nil, grid.NewBytes).WithOutCapacity(100)
getResourceMetricsRPC = grid.NewStream[*grid.MSS, grid.NoPayload, *Metric](grid.HandlerGetResourceMetrics, grid.NewMSS, nil, func() *Metric { return &Metric{} })
)
// GetLocksHandler - returns list of lock from the server.
func (s *peerRESTServer) GetLocksHandler(_ *grid.MSS) (*localLockMap, *grid.RemoteErr) {
res := globalLockServer.DupLockMap()
return &res, nil
}
// DeletePolicyHandler - deletes a policy on the server.
func (s *peerRESTServer) DeletePolicyHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn()
@ -296,249 +350,123 @@ func (s *peerRESTServer) DownloadProfilingDataHandler(w http.ResponseWriter, r *
logger.LogIf(ctx, gob.NewEncoder(w).Encode(profileData))
}
func (s *peerRESTServer) LocalStorageInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "LocalStorageInfo")
func (s *peerRESTServer) LocalStorageInfoHandler(mss *grid.MSS) (*grid.JSON[madmin.StorageInfo], *grid.RemoteErr) {
objLayer := newObjectLayerFn()
if objLayer == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return
return nil, grid.NewRemoteErr(errServerNotInitialized)
}
metrics, err := strconv.ParseBool(r.Form.Get(peerRESTMetrics))
metrics, err := strconv.ParseBool(mss.Get(peerRESTMetrics))
if err != nil {
s.writeErrorResponse(w, err)
return
return nil, grid.NewRemoteErr(err)
}
logger.LogIf(ctx, gob.NewEncoder(w).Encode(objLayer.LocalStorageInfo(r.Context(), metrics)))
info := objLayer.LocalStorageInfo(context.Background(), metrics)
return madminStorageInfo.NewJSONWith(&info), nil
}
// ServerInfoHandler - returns Server Info
func (s *peerRESTServer) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "ServerInfo")
objLayer := newObjectLayerFn()
if objLayer == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return
}
metrics, err := strconv.ParseBool(r.Form.Get(peerRESTMetrics))
func (s *peerRESTServer) ServerInfoHandler(params *grid.MSS) (*grid.JSON[madmin.ServerProperties], *grid.RemoteErr) {
r := http.Request{Host: globalMinioHost}
metrics, err := strconv.ParseBool(params.Get(peerRESTMetrics))
if err != nil {
s.writeErrorResponse(w, err)
return
return nil, grid.NewRemoteErr(err)
}
info := getLocalServerProperty(globalEndpoints, r, metrics)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
info := getLocalServerProperty(globalEndpoints, &r, metrics)
return madminServerProperties.NewJSONWith(&info), nil
}
// GetCPUsHandler - returns CPU info.
func (s *peerRESTServer) GetCPUsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetCPUs(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
func (s *peerRESTServer) GetCPUsHandler(_ *grid.MSS) (*grid.JSON[madmin.CPUs], *grid.RemoteErr) {
info := madmin.GetCPUs(context.Background(), globalMinioHost)
return madminCPUs.NewJSONWith(&info), nil
}
// GetNetInfoHandler - returns network information.
func (s *peerRESTServer) GetNetInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetNetInfo(r.Host, globalInternodeInterface)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
func (s *peerRESTServer) GetNetInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.NetInfo], *grid.RemoteErr) {
info := madmin.GetNetInfo(globalMinioHost, globalInternodeInterface)
return madminNetInfo.NewJSONWith(&info), nil
}
// GetPartitionsHandler - returns disk partition information.
func (s *peerRESTServer) GetPartitionsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetPartitions(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
func (s *peerRESTServer) GetPartitionsHandler(_ *grid.MSS) (*grid.JSON[madmin.Partitions], *grid.RemoteErr) {
info := madmin.GetPartitions(context.Background(), globalMinioHost)
return madminPartitions.NewJSONWith(&info), nil
}
// GetOSInfoHandler - returns operating system's information.
func (s *peerRESTServer) GetOSInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetOSInfo(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
func (s *peerRESTServer) GetOSInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.OSInfo], *grid.RemoteErr) {
info := madmin.GetOSInfo(context.Background(), globalMinioHost)
return madminOSInfo.NewJSONWith(&info), nil
}
// GetProcInfoHandler - returns this MinIO process information.
func (s *peerRESTServer) GetProcInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetProcInfo(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
func (s *peerRESTServer) GetProcInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.ProcInfo], *grid.RemoteErr) {
info := madmin.GetProcInfo(context.Background(), globalMinioHost)
return madminProcInfo.NewJSONWith(&info), nil
}
// GetMemInfoHandler - returns memory information.
func (s *peerRESTServer) GetMemInfoHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetMemInfo(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
func (s *peerRESTServer) GetMemInfoHandler(_ *grid.MSS) (*grid.JSON[madmin.MemInfo], *grid.RemoteErr) {
info := madmin.GetMemInfo(context.Background(), globalMinioHost)
return madminMemInfo.NewJSONWith(&info), nil
}
// GetMetricsHandler - returns server metrics.
func (s *peerRESTServer) GetMetricsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
func (s *peerRESTServer) GetMetricsHandler(v *grid.URLValues) (*grid.JSON[madmin.RealtimeMetrics], *grid.RemoteErr) {
values := v.Values()
var types madmin.MetricType
if t, _ := strconv.ParseUint(r.Form.Get(peerRESTMetricsTypes), 10, 64); t != 0 {
if t, _ := strconv.ParseUint(values.Get(peerRESTMetricsTypes), 10, 64); t != 0 {
types = madmin.MetricType(t)
} else {
types = madmin.MetricsAll
}
diskMap := make(map[string]struct{})
for _, disk := range r.Form[peerRESTDisk] {
for _, disk := range values[peerRESTDisk] {
diskMap[disk] = struct{}{}
}
hostMap := make(map[string]struct{})
for _, host := range r.Form[peerRESTHost] {
for _, host := range values[peerRESTHost] {
hostMap[host] = struct{}{}
}
jobID := r.Form.Get(peerRESTJobID)
depID := r.Form.Get(peerRESTDepID)
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := collectLocalMetrics(types, collectMetricsOpts{
disks: diskMap,
hosts: hostMap,
jobID: jobID,
depID: depID,
jobID: values.Get(peerRESTJobID),
depID: values.Get(peerRESTDepID),
})
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
return madminRealtimeMetrics.NewJSONWith(&info), nil
}
func (s *peerRESTServer) GetResourceMetrics(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("invalid request"))
return
}
enc := gob.NewEncoder(w)
for m := range ReportMetrics(r.Context(), resourceMetricsGroups) {
if err := enc.Encode(m); err != nil {
s.writeErrorResponse(w, errors.New("Encoding metric failed: "+err.Error()))
return
}
func (s *peerRESTServer) GetResourceMetrics(ctx context.Context, _ *grid.MSS, out chan<- *Metric) *grid.RemoteErr {
for m := range ReportMetrics(ctx, resourceMetricsGroups) {
out <- &m
}
return nil
}
// GetSysConfigHandler - returns system config information.
// (only the config that are of concern to minio)
func (s *peerRESTServer) GetSysConfigHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetSysConfig(ctx, r.Host)
logger.LogOnceIf(ctx, gob.NewEncoder(w).Encode(info), "get-sys-config")
func (s *peerRESTServer) GetSysConfigHandler(_ *grid.MSS) (*grid.JSON[madmin.SysConfig], *grid.RemoteErr) {
info := madmin.GetSysConfig(context.Background(), globalMinioHost)
return madminSysConfig.NewJSONWith(&info), nil
}
// GetSysServicesHandler - returns system services information.
// (only the services that are of concern to minio)
func (s *peerRESTServer) GetSysServicesHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetSysServices(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
func (s *peerRESTServer) GetSysServicesHandler(_ *grid.MSS) (*grid.JSON[madmin.SysServices], *grid.RemoteErr) {
info := madmin.GetSysServices(context.Background(), globalMinioHost)
return madminSysServices.NewJSONWith(&info), nil
}
// GetSysErrorsHandler - returns system level errors
func (s *peerRESTServer) GetSysErrorsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx, cancel := context.WithCancel(r.Context())
defer cancel()
info := madmin.GetSysErrors(ctx, r.Host)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
func (s *peerRESTServer) GetSysErrorsHandler(_ *grid.MSS) (*grid.JSON[madmin.SysErrors], *grid.RemoteErr) {
info := madmin.GetSysErrors(context.Background(), globalMinioHost)
return madminSysErrors.NewJSONWith(&info), nil
}
var deleteBucketMetadataHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteBucketMetadata, grid.NewMSS, grid.NewNoPayload)
// DeleteBucketMetadataHandler - Delete in memory bucket metadata
func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucketName := mss.Get(peerRESTBucket)
@ -559,12 +487,7 @@ func (s *peerRESTServer) DeleteBucketMetadataHandler(mss *grid.MSS) (np grid.NoP
}
// GetAllBucketStatsHandler - fetches bucket replication stats for all buckets from this peer.
func (s *peerRESTServer) GetAllBucketStatsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
func (s *peerRESTServer) GetAllBucketStatsHandler(mss *grid.MSS) (*BucketStatsMap, *grid.RemoteErr) {
replicationStats := globalReplicationStats.GetAll()
bucketStatsMap := make(map[string]BucketStats, len(replicationStats))
for k, v := range replicationStats {
@ -573,22 +496,15 @@ func (s *peerRESTServer) GetAllBucketStatsHandler(w http.ResponseWriter, r *http
ProxyStats: globalReplicationStats.getProxyStats(k),
}
}
logger.LogIf(r.Context(), msgp.Encode(w, &BucketStatsMap{Stats: bucketStatsMap, Timestamp: UTCNow()}))
return &BucketStatsMap{Stats: bucketStatsMap, Timestamp: time.Now()}, nil
}
// GetBucketStatsHandler - fetches current in-memory bucket stats, currently only
// returns BucketStats, that currently includes ReplicationStats.
func (s *peerRESTServer) GetBucketStatsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
vars := mux.Vars(r)
bucketName := vars[peerRESTBucket]
func (s *peerRESTServer) GetBucketStatsHandler(vars *grid.MSS) (*BucketStats, *grid.RemoteErr) {
bucketName := vars.Get(peerRESTBucket)
if bucketName == "" {
s.writeErrorResponse(w, errors.New("Bucket name is missing"))
return
return nil, grid.NewRemoteErrString("Bucket name is missing")
}
bs := BucketStats{
@ -596,27 +512,20 @@ func (s *peerRESTServer) GetBucketStatsHandler(w http.ResponseWriter, r *http.Re
QueueStats: ReplicationQueueStats{Nodes: []ReplQNodeStats{globalReplicationStats.getNodeQueueStats(bucketName)}},
ProxyStats: globalReplicationStats.getProxyStats(bucketName),
}
logger.LogIf(r.Context(), msgp.Encode(w, &bs))
return &bs, nil
}
// GetSRMetricsHandler - fetches current in-memory replication stats at site level from this peer
func (s *peerRESTServer) GetSRMetricsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
func (s *peerRESTServer) GetSRMetricsHandler(mss *grid.MSS) (*SRMetricsSummary, *grid.RemoteErr) {
objAPI := newObjectLayerFn()
if objAPI == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return
return nil, grid.NewRemoteErr(errServerNotInitialized)
}
sm := globalReplicationStats.getSRMetricsForNode()
logger.LogIf(r.Context(), msgp.Encode(w, &sm))
return &sm, nil
}
var loadBucketMetadataHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadBucketMetadata, grid.NewMSS, grid.NewNoPayload)
// LoadBucketMetadataHandler - reloads in memory bucket metadata
func (s *peerRESTServer) LoadBucketMetadataHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucketName := mss.Get(peerRESTBucket)
@ -647,43 +556,17 @@ func (s *peerRESTServer) LoadBucketMetadataHandler(mss *grid.MSS) (np grid.NoPay
return
}
func (s *peerRESTServer) GetMetacacheListingHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "GetMetacacheListing")
var opts listPathOptions
err := gob.NewDecoder(r.Body).Decode(&opts)
if err != nil && err != io.EOF {
s.writeErrorResponse(w, err)
return
}
resp := localMetacacheMgr.getBucket(ctx, opts.Bucket).findCache(opts)
logger.LogIf(ctx, msgp.Encode(w, &resp))
func (s *peerRESTServer) GetMetacacheListingHandler(opts *listPathOptions) (*metacache, *grid.RemoteErr) {
resp := localMetacacheMgr.getBucket(context.Background(), opts.Bucket).findCache(*opts)
return &resp, nil
}
func (s *peerRESTServer) UpdateMetacacheListingHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
ctx := newContext(r, w, "UpdateMetacacheListing")
var req metacache
err := msgp.Decode(r.Body, &req)
func (s *peerRESTServer) UpdateMetacacheListingHandler(req *metacache) (*metacache, *grid.RemoteErr) {
cache, err := localMetacacheMgr.updateCacheEntry(*req)
if err != nil {
s.writeErrorResponse(w, err)
return
return nil, grid.NewRemoteErr(err)
}
cache, err := localMetacacheMgr.updateCacheEntry(req)
if err != nil {
s.writeErrorResponse(w, err)
return
}
// Return updated metadata.
logger.LogIf(ctx, msgp.Encode(w, &cache))
return &cache, nil
}
// PutBucketNotificationHandler - Set bucket policy.
@ -806,36 +689,27 @@ func waitingDrivesNode() map[string]madmin.DiskMetrics {
}
// SignalServiceHandler - signal service handler.
func (s *peerRESTServer) SignalServiceHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
vars := mux.Vars(r)
signalString := vars[peerRESTSignal]
func (s *peerRESTServer) SignalServiceHandler(vars *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
signalString := vars.Get(peerRESTSignal)
if signalString == "" {
s.writeErrorResponse(w, errors.New("signal name is missing"))
return
return np, grid.NewRemoteErrString("signal name is missing")
}
si, err := strconv.Atoi(signalString)
if err != nil {
s.writeErrorResponse(w, err)
return
return np, grid.NewRemoteErr(err)
}
signal := serviceSignal(si)
switch signal {
case serviceRestart, serviceStop:
dryRun := r.Form.Get("dry-run") == "true" // This is only supported for `restart/stop`
dryRun := vars.Get("dry-run") == "true" // This is only supported for `restart/stop`
waitingDisks := waitingDrivesNode()
if len(waitingDisks) > 0 {
buf, err := json.Marshal(waitingDisks)
if err != nil {
s.writeErrorResponse(w, err)
return
return np, grid.NewRemoteErr(err)
}
s.writeErrorResponse(w, errors.New(string(buf)))
return np, grid.NewRemoteErrString(string(buf))
}
if !dryRun {
globalServiceSignalCh <- signal
@ -847,36 +721,30 @@ func (s *peerRESTServer) SignalServiceHandler(w http.ResponseWriter, r *http.Req
case serviceReloadDynamic:
objAPI := newObjectLayerFn()
if objAPI == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return
return np, grid.NewRemoteErr(errServerNotInitialized)
}
srvCfg, err := getValidConfig(objAPI)
if err != nil {
s.writeErrorResponse(w, err)
return
return np, grid.NewRemoteErr(err)
}
subSys := r.Form.Get(peerRESTSubSys)
subSys := vars.Get(peerRESTSubSys)
// Apply dynamic values.
ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)
defer cancel()
if subSys == "" {
err = applyDynamicConfig(r.Context(), objAPI, srvCfg)
err = applyDynamicConfig(ctx, objAPI, srvCfg)
} else {
err = applyDynamicConfigForSubSys(r.Context(), objAPI, srvCfg, subSys)
err = applyDynamicConfigForSubSys(ctx, objAPI, srvCfg, subSys)
}
if err != nil {
s.writeErrorResponse(w, err)
return np, grid.NewRemoteErr(err)
}
return
default:
s.writeErrorResponse(w, errUnsupportedSignal)
return
return np, grid.NewRemoteErr(errUnsupportedSignal)
}
return np, nil
}
// Set an output capacity of 100 for listenHandler
// There is another buffer that will buffer events.
var listenHandler = grid.NewStream[*grid.URLValues, grid.NoPayload, *grid.Bytes](grid.HandlerListen,
grid.NewURLValues, nil, grid.NewBytes).WithOutCapacity(100)
// ListenHandler sends http trace messages back to peer rest client
func (s *peerRESTServer) ListenHandler(ctx context.Context, v *grid.URLValues, out chan<- *grid.Bytes) *grid.RemoteErr {
values := v.Values()
@ -988,24 +856,14 @@ func (s *peerRESTServer) TraceHandler(ctx context.Context, payload []byte, _ <-c
return nil
}
func (s *peerRESTServer) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("invalid request"))
return
}
ctx := newContext(r, w, "BackgroundHealStatus")
state, ok := getLocalBackgroundHealStatus(ctx, newObjectLayerFn())
func (s *peerRESTServer) BackgroundHealStatusHandler(_ *grid.MSS) (*grid.JSON[madmin.BgHealState], *grid.RemoteErr) {
state, ok := getLocalBackgroundHealStatus(context.Background(), newObjectLayerFn())
if !ok {
s.writeErrorResponse(w, errServerNotInitialized)
return
return nil, grid.NewRemoteErr(errServerNotInitialized)
}
logger.LogIf(ctx, gob.NewEncoder(w).Encode(state))
return madminBgHealState.NewJSONWith(&state), nil
}
var reloadSiteReplicationConfigHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerReloadSiteReplicationConfig, grid.NewMSS, grid.NewNoPayload)
// ReloadSiteReplicationConfigHandler - reloads site replication configuration from the disks
func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn()
@ -1017,8 +875,6 @@ func (s *peerRESTServer) ReloadSiteReplicationConfigHandler(mss *grid.MSS) (np g
return
}
var reloadPoolMetaHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerReloadPoolMeta, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn()
if objAPI == nil {
@ -1037,8 +893,6 @@ func (s *peerRESTServer) ReloadPoolMetaHandler(mss *grid.MSS) (np grid.NoPayload
return
}
var stopRebalanceHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerStopRebalance, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn()
if objAPI == nil {
@ -1054,8 +908,6 @@ func (s *peerRESTServer) StopRebalanceHandler(mss *grid.MSS) (np grid.NoPayload,
return
}
var loadRebalanceMetaHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadRebalanceMeta, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn()
if objAPI == nil {
@ -1083,8 +935,6 @@ func (s *peerRESTServer) LoadRebalanceMetaHandler(mss *grid.MSS) (np grid.NoPayl
return
}
var loadTransitionTierConfigHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerLoadTransitionTierConfig, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
objAPI := newObjectLayerFn()
if objAPI == nil {
@ -1102,46 +952,34 @@ func (s *peerRESTServer) LoadTransitionTierConfigHandler(mss *grid.MSS) (np grid
}
// ConsoleLogHandler sends console logs of this node back to peer rest client
func (s *peerRESTServer) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("Invalid request"))
return
}
doneCh := make(chan struct{})
defer xioutil.SafeClose(doneCh)
ch := make(chan log.Info, 100000)
err := globalConsoleSys.Subscribe(ch, doneCh, "", 0, madmin.LogMaskAll, nil)
func (s *peerRESTServer) ConsoleLogHandler(ctx context.Context, params *grid.MSS, out chan<- *grid.Bytes) *grid.RemoteErr {
mask, err := strconv.Atoi(params.Get(peerRESTLogMask))
if err != nil {
s.writeErrorResponse(w, err)
return
mask = int(madmin.LogMaskAll)
}
keepAliveTicker := time.NewTicker(time.Second)
defer keepAliveTicker.Stop()
enc := gob.NewEncoder(w)
ch := make(chan log.Info, 1000)
err = globalConsoleSys.Subscribe(ch, ctx.Done(), "", 0, madmin.LogMask(mask), nil)
if err != nil {
return grid.NewRemoteErr(err)
}
var buf bytes.Buffer
enc := json.NewEncoder(&buf)
for {
select {
case entry, ok := <-ch:
if !ok {
return
return grid.NewRemoteErrString("console log channel closed")
}
if !entry.SendLog("", madmin.LogMask(mask)) {
continue
}
buf.Reset()
if err := enc.Encode(entry); err != nil {
return
return grid.NewRemoteErr(err)
}
if len(ch) == 0 {
w.(http.Flusher).Flush()
}
case <-keepAliveTicker.C:
if len(ch) == 0 {
if err := enc.Encode(&madmin.LogInfo{}); err != nil {
return
}
w.(http.Flusher).Flush()
}
case <-r.Context().Done():
return
out <- grid.NewBytesWithCopyOf(buf.Bytes())
case <-ctx.Done():
return grid.NewRemoteErr(ctx.Err())
}
}
}
@ -1161,59 +999,30 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
}
// GetBandwidth gets the bandwidth for the buckets requested.
func (s *peerRESTServer) GetBandwidth(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("invalid request"))
return
}
bucketsString := r.Form.Get("buckets")
doneCh := make(chan struct{})
defer xioutil.SafeClose(doneCh)
selectBuckets := b.SelectBuckets(strings.Split(bucketsString, ",")...)
report := globalBucketMonitor.GetReport(selectBuckets)
enc := gob.NewEncoder(w)
if err := enc.Encode(report); err != nil {
s.writeErrorResponse(w, errors.New("Encoding report failed: "+err.Error()))
return
}
func (s *peerRESTServer) GetBandwidth(params *grid.URLValues) (*bandwidth.BucketBandwidthReport, *grid.RemoteErr) {
buckets := params.Values().Get("buckets")
selectBuckets := b.SelectBuckets(buckets)
return globalBucketMonitor.GetReport(selectBuckets), nil
}
// GetPeerMetrics gets the metrics to be federated across peers.
func (s *peerRESTServer) GetPeerMetrics(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("invalid request"))
return
}
enc := gob.NewEncoder(w)
for m := range ReportMetrics(r.Context(), peerMetricsGroups) {
if err := enc.Encode(m); err != nil {
s.writeErrorResponse(w, errors.New("Encoding metric failed: "+err.Error()))
return
}
func (s *peerRESTServer) GetPeerMetrics(_ *grid.MSS) (*grid.Array[*Metric], *grid.RemoteErr) {
m := ReportMetrics(context.Background(), peerMetricsGroups)
res := make([]*Metric, 0, len(m))
for m := range m {
res = append(res, &m)
}
return aoMetricsGroup.NewWith(res), nil
}
// GetPeerBucketMetrics gets the metrics to be federated across peers.
func (s *peerRESTServer) GetPeerBucketMetrics(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("invalid request"))
return
}
enc := gob.NewEncoder(w)
for m := range ReportMetrics(r.Context(), bucketPeerMetricsGroups) {
if err := enc.Encode(m); err != nil {
s.writeErrorResponse(w, errors.New("Encoding metric failed: "+err.Error()))
return
}
func (s *peerRESTServer) GetPeerBucketMetrics(_ *grid.MSS) (*grid.Array[*Metric], *grid.RemoteErr) {
m := ReportMetrics(context.Background(), bucketPeerMetricsGroups)
res := make([]*Metric, 0, len(m))
for m := range m {
res = append(res, &m)
}
return aoMetricsGroup.NewWith(res), nil
}
func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request) {
@ -1269,20 +1078,13 @@ func (s *peerRESTServer) SpeedTestHandler(w http.ResponseWriter, r *http.Request
}
// GetLastDayTierStatsHandler - returns per-tier stats in the last 24hrs for this server
func (s *peerRESTServer) GetLastDayTierStatsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
s.writeErrorResponse(w, errors.New("invalid request"))
return
}
ctx := newContext(r, w, "GetLastDayTierStats")
func (s *peerRESTServer) GetLastDayTierStatsHandler(_ *grid.MSS) (*DailyAllTierStats, *grid.RemoteErr) {
if objAPI := newObjectLayerFn(); objAPI == nil || globalTransitionState == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return
return nil, grid.NewRemoteErr(errServerNotInitialized)
}
result := globalTransitionState.getDailyAllTierStats()
logger.LogIf(ctx, gob.NewEncoder(w).Encode(result))
return &result, nil
}
func (s *peerRESTServer) DriveSpeedTestHandler(w http.ResponseWriter, r *http.Request) {
@ -1393,8 +1195,6 @@ func (s *peerRESTServer) NetSpeedTestHandler(w http.ResponseWriter, r *http.Requ
logger.LogIf(r.Context(), gob.NewEncoder(w).Encode(result))
}
var healBucketHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerHealBucket, grid.NewMSS, grid.NewNoPayload)
func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket)
if isMinioMetaBucket(bucket) {
@ -1412,8 +1212,6 @@ func (s *peerRESTServer) HealBucketHandler(mss *grid.MSS) (np grid.NoPayload, ne
return np, nil
}
var headBucketHandler = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerHeadBucket, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
// HeadBucketHandler implements peer BuckeInfo call, returns bucket create date.
func (s *peerRESTServer) HeadBucketHandler(mss *grid.MSS) (info *VolInfo, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket)
@ -1436,8 +1234,6 @@ func (s *peerRESTServer) HeadBucketHandler(mss *grid.MSS) (info *VolInfo, nerr *
}, nil
}
var deleteBucketHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerDeleteBucket, grid.NewMSS, grid.NewNoPayload)
// DeleteBucketHandler implements peer delete bucket call.
func (s *peerRESTServer) DeleteBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket)
@ -1456,8 +1252,6 @@ func (s *peerRESTServer) DeleteBucketHandler(mss *grid.MSS) (np grid.NoPayload,
return np, nil
}
var makeBucketHandler = grid.NewSingleHandler[*grid.MSS, grid.NoPayload](grid.HandlerMakeBucket, grid.NewMSS, grid.NewNoPayload)
// MakeBucketHandler implements peer create bucket call.
func (s *peerRESTServer) MakeBucketHandler(mss *grid.MSS) (np grid.NoPayload, nerr *grid.RemoteErr) {
bucket := mss.Get(peerS3Bucket)
@ -1485,65 +1279,63 @@ func registerPeerRESTHandlers(router *mux.Router, gm *grid.Manager) {
server := &peerRESTServer{}
subrouter := router.PathPrefix(peerRESTPrefix).Subrouter()
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHealth).HandlerFunc(h(server.HealthHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLocks).HandlerFunc(h(server.GetLocksHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(h(server.ServerInfoHandler)).Queries(restQueries(peerRESTMetrics)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLocalStorageInfo).HandlerFunc(h(server.LocalStorageInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(h(server.GetProcInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(h(server.GetMemInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMetrics).HandlerFunc(h(server.GetMetricsHandler)).Queries(restQueries(peerRESTMetricsTypes)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodResourceMetrics).HandlerFunc(h(server.GetResourceMetrics))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysErrors).HandlerFunc(h(server.GetSysErrorsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysServices).HandlerFunc(h(server.GetSysServicesHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSysConfig).HandlerFunc(h(server.GetSysConfigHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodOsInfo).HandlerFunc(h(server.GetOSInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDiskHwInfo).HandlerFunc(h(server.GetPartitionsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetHwInfo).HandlerFunc(h(server.GetNetInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCPUInfo).HandlerFunc(h(server.GetCPUsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetAllBucketStats).HandlerFunc(h(server.GetAllBucketStatsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetBucketStats).HandlerFunc(h(server.GetBucketStatsHandler)).Queries(restQueries(peerRESTBucket)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSignalService).HandlerFunc(h(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodVerifyBinary).HandlerFunc(h(server.VerifyBinaryHandler)).Queries(restQueries(peerRESTURL, peerRESTSha256Sum, peerRESTReleaseInfo)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodCommitBinary).HandlerFunc(h(server.CommitBinaryHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetReplicationMRF).HandlerFunc(httpTraceHdrs(server.GetReplicationMRFHandler)).Queries(restQueries(peerRESTBucket)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetSRMetrics).HandlerFunc(h(server.GetSRMetricsHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodStartProfiling).HandlerFunc(h(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDownloadProfilingData).HandlerFunc(h(server.DownloadProfilingDataHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLog).HandlerFunc(server.ConsoleLogHandler)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetBandwidth).HandlerFunc(h(server.GetBandwidth))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetMetacacheListing).HandlerFunc(h(server.GetMetacacheListingHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodUpdateMetacacheListing).HandlerFunc(h(server.UpdateMetacacheListingHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetPeerMetrics).HandlerFunc(h(server.GetPeerMetrics))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetPeerBucketMetrics).HandlerFunc(h(server.GetPeerBucketMetrics))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodSpeedTest).HandlerFunc(h(server.SpeedTestHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDriveSpeedTest).HandlerFunc(h(server.DriveSpeedTestHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodNetperf).HandlerFunc(h(server.NetSpeedTestHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodDevNull).HandlerFunc(h(server.DevNull))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLastDayTierStats).HandlerFunc(h(server.GetLastDayTierStatsHandler))
logger.FatalIf(makeBucketHandler.Register(gm, server.MakeBucketHandler), "unable to register handler")
logger.FatalIf(deleteBucketHandler.Register(gm, server.DeleteBucketHandler), "unable to register handler")
logger.FatalIf(headBucketHandler.Register(gm, server.HeadBucketHandler), "unable to register handler")
logger.FatalIf(healBucketHandler.Register(gm, server.HealBucketHandler), "unable to register handler")
logger.FatalIf(consoleLogRPC.RegisterNoInput(gm, server.ConsoleLogHandler), "unable to register handler")
logger.FatalIf(deleteBucketMetadataRPC.Register(gm, server.DeleteBucketMetadataHandler), "unable to register handler")
logger.FatalIf(deleteBucketRPC.Register(gm, server.DeleteBucketHandler), "unable to register handler")
logger.FatalIf(deletePolicyRPC.Register(gm, server.DeletePolicyHandler), "unable to register handler")
logger.FatalIf(deleteSvcActRPC.Register(gm, server.DeleteServiceAccountHandler), "unable to register handler")
logger.FatalIf(deleteUserRPC.Register(gm, server.DeleteUserHandler), "unable to register handler")
logger.FatalIf(getAllBucketStatsRPC.Register(gm, server.GetAllBucketStatsHandler), "unable to register handler")
logger.FatalIf(getBackgroundHealStatusRPC.Register(gm, server.BackgroundHealStatusHandler), "unable to register handler")
logger.FatalIf(getBandwidthRPC.Register(gm, server.GetBandwidth), "unable to register handler")
logger.FatalIf(getBucketStatsRPC.Register(gm, server.GetBucketStatsHandler), "unable to register handler")
logger.FatalIf(getCPUsHandler.Register(gm, server.GetCPUsHandler), "unable to register handler")
logger.FatalIf(getLastDayTierStatsRPC.Register(gm, server.GetLastDayTierStatsHandler), "unable to register handler")
logger.FatalIf(getLocksRPC.Register(gm, server.GetLocksHandler), "unable to register handler")
logger.FatalIf(getMemInfoRPC.Register(gm, server.GetMemInfoHandler), "unable to register handler")
logger.FatalIf(getMetacacheListingRPC.Register(gm, server.GetMetacacheListingHandler), "unable to register handler")
logger.FatalIf(getMetricsRPC.Register(gm, server.GetMetricsHandler), "unable to register handler")
logger.FatalIf(getNetInfoRPC.Register(gm, server.GetNetInfoHandler), "unable to register handler")
logger.FatalIf(getOSInfoRPC.Register(gm, server.GetOSInfoHandler), "unable to register handler")
logger.FatalIf(getPartitionsRPC.Register(gm, server.GetPartitionsHandler), "unable to register handler")
logger.FatalIf(getPeerBucketMetricsRPC.Register(gm, server.GetPeerBucketMetrics), "unable to register handler")
logger.FatalIf(getPeerMetricsRPC.Register(gm, server.GetPeerMetrics), "unable to register handler")
logger.FatalIf(getProcInfoRPC.Register(gm, server.GetProcInfoHandler), "unable to register handler")
logger.FatalIf(getResourceMetricsRPC.RegisterNoInput(gm, server.GetResourceMetrics), "unable to register handler")
logger.FatalIf(getSRMetricsRPC.Register(gm, server.GetSRMetricsHandler), "unable to register handler")
logger.FatalIf(getSysConfigRPC.Register(gm, server.GetSysConfigHandler), "unable to register handler")
logger.FatalIf(getSysErrorsRPC.Register(gm, server.GetSysErrorsHandler), "unable to register handler")
logger.FatalIf(getSysServicesRPC.Register(gm, server.GetSysServicesHandler), "unable to register handler")
logger.FatalIf(headBucketRPC.Register(gm, server.HeadBucketHandler), "unable to register handler")
logger.FatalIf(healBucketRPC.Register(gm, server.HealBucketHandler), "unable to register handler")
logger.FatalIf(listenRPC.RegisterNoInput(gm, server.ListenHandler), "unable to register handler")
logger.FatalIf(loadBucketMetadataRPC.Register(gm, server.LoadBucketMetadataHandler), "unable to register handler")
logger.FatalIf(loadGroupRPC.Register(gm, server.LoadGroupHandler), "unable to register handler")
logger.FatalIf(loadPolicyMappingRPC.Register(gm, server.LoadPolicyMappingHandler), "unable to register handler")
logger.FatalIf(loadPolicyRPC.Register(gm, server.LoadPolicyHandler), "unable to register handler")
logger.FatalIf(loadRebalanceMetaRPC.Register(gm, server.LoadRebalanceMetaHandler), "unable to register handler")
logger.FatalIf(loadSvcActRPC.Register(gm, server.LoadServiceAccountHandler), "unable to register handler")
logger.FatalIf(loadTransitionTierConfigRPC.Register(gm, server.LoadTransitionTierConfigHandler), "unable to register handler")
logger.FatalIf(loadUserRPC.Register(gm, server.LoadUserHandler), "unable to register handler")
logger.FatalIf(localStorageInfoRPC.Register(gm, server.LocalStorageInfoHandler), "unable to register handler")
logger.FatalIf(makeBucketRPC.Register(gm, server.MakeBucketHandler), "unable to register handler")
logger.FatalIf(reloadPoolMetaRPC.Register(gm, server.ReloadPoolMetaHandler), "unable to register handler")
logger.FatalIf(reloadSiteReplicationConfigRPC.Register(gm, server.ReloadSiteReplicationConfigHandler), "unable to register handler")
logger.FatalIf(serverInfoRPC.Register(gm, server.ServerInfoHandler), "unable to register handler")
logger.FatalIf(signalServiceRPC.Register(gm, server.SignalServiceHandler), "unable to register handler")
logger.FatalIf(stopRebalanceRPC.Register(gm, server.StopRebalanceHandler), "unable to register handler")
logger.FatalIf(updateMetacacheListingRPC.Register(gm, server.UpdateMetacacheListingHandler), "unable to register handler")
logger.FatalIf(deletePolicyHandler.Register(gm, server.DeletePolicyHandler), "unable to register handler")
logger.FatalIf(loadPolicyHandler.Register(gm, server.LoadPolicyHandler), "unable to register handler")
logger.FatalIf(loadPolicyMappingHandler.Register(gm, server.LoadPolicyMappingHandler), "unable to register handler")
logger.FatalIf(deleteUserHandler.Register(gm, server.DeleteUserHandler), "unable to register handler")
logger.FatalIf(deleteSvcActHandler.Register(gm, server.DeleteServiceAccountHandler), "unable to register handler")
logger.FatalIf(loadUserHandler.Register(gm, server.LoadUserHandler), "unable to register handler")
logger.FatalIf(loadSvcActHandler.Register(gm, server.LoadServiceAccountHandler), "unable to register handler")
logger.FatalIf(loadGroupHandler.Register(gm, server.LoadGroupHandler), "unable to register handler")
logger.FatalIf(loadTransitionTierConfigHandler.Register(gm, server.LoadTransitionTierConfigHandler), "unable to register handler")
logger.FatalIf(reloadPoolMetaHandler.Register(gm, server.ReloadPoolMetaHandler), "unable to register handler")
logger.FatalIf(loadRebalanceMetaHandler.Register(gm, server.LoadRebalanceMetaHandler), "unable to register handler")
logger.FatalIf(stopRebalanceHandler.Register(gm, server.StopRebalanceHandler), "unable to register handler")
logger.FatalIf(reloadSiteReplicationConfigHandler.Register(gm, server.ReloadSiteReplicationConfigHandler), "unable to register handler")
logger.FatalIf(loadBucketMetadataHandler.Register(gm, server.LoadBucketMetadataHandler), "unable to register handler")
logger.FatalIf(deleteBucketMetadataHandler.Register(gm, server.DeleteBucketMetadataHandler), "unable to register handler")
logger.FatalIf(listenHandler.RegisterNoInput(gm, server.ListenHandler), "unable to register handler")
logger.FatalIf(gm.RegisterStreamingHandler(grid.HandlerTrace, grid.StreamHandler{
Handle: server.TraceHandler,
Subroute: "",

View File

@ -376,7 +376,7 @@ func (client *remotePeerS3Client) HealBucket(ctx context.Context, bucket string,
peerS3BucketDeleted: strconv.FormatBool(opts.Remove),
})
_, err := healBucketHandler.Call(ctx, conn, mss)
_, err := healBucketRPC.Call(ctx, conn, mss)
// Initialize heal result info
return madmin.HealResultItem{
@ -398,7 +398,7 @@ func (client *remotePeerS3Client) GetBucketInfo(ctx context.Context, bucket stri
peerS3BucketDeleted: strconv.FormatBool(opts.Deleted),
})
volInfo, err := headBucketHandler.Call(ctx, conn, mss)
volInfo, err := headBucketRPC.Call(ctx, conn, mss)
if err != nil {
return BucketInfo{}, toStorageErr(err)
}
@ -449,7 +449,7 @@ func (client *remotePeerS3Client) MakeBucket(ctx context.Context, bucket string,
peerS3BucketForceCreate: strconv.FormatBool(opts.ForceCreate),
})
_, err := makeBucketHandler.Call(ctx, conn, mss)
_, err := makeBucketRPC.Call(ctx, conn, mss)
return toStorageErr(err)
}
@ -498,7 +498,7 @@ func (client *remotePeerS3Client) DeleteBucket(ctx context.Context, bucket strin
peerS3BucketForceDelete: strconv.FormatBool(opts.Force),
})
_, err := deleteBucketHandler.Call(ctx, conn, mss)
_, err := deleteBucketRPC.Call(ctx, conn, mss)
return toStorageErr(err)
}

View File

@ -21,6 +21,8 @@ import (
"time"
)
//go:generate msgp -file=$GOFILE
// DeleteOptions represents the disk level delete options available for the APIs
type DeleteOptions struct {
BaseOptions
@ -44,8 +46,6 @@ type DiskInfoOptions struct {
NoOp bool `msg:"np"`
}
//go:generate msgp -file=$GOFILE
// DiskInfo is an extended type which returns current
// disk usage per path.
// The above means that any added/deleted fields are incompatible.
@ -445,3 +445,8 @@ type RenameDataResp struct {
type LocalDiskIDs struct {
IDs []string
}
// ListDirResult - ListDir()'s response.
type ListDirResult struct {
Entries []string `msg:"e"`
}

View File

@ -2871,6 +2871,148 @@ func (z *FilesInfo) Msgsize() (s int) {
return
}
// DecodeMsg implements msgp.Decodable
func (z *ListDirResult) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "e":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Entries")
return
}
if cap(z.Entries) >= int(zb0002) {
z.Entries = (z.Entries)[:zb0002]
} else {
z.Entries = make([]string, zb0002)
}
for za0001 := range z.Entries {
z.Entries[za0001], err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err, "Entries", za0001)
return
}
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *ListDirResult) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 1
// write "e"
err = en.Append(0x81, 0xa1, 0x65)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(len(z.Entries)))
if err != nil {
err = msgp.WrapError(err, "Entries")
return
}
for za0001 := range z.Entries {
err = en.WriteString(z.Entries[za0001])
if err != nil {
err = msgp.WrapError(err, "Entries", za0001)
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *ListDirResult) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 1
// string "e"
o = append(o, 0x81, 0xa1, 0x65)
o = msgp.AppendArrayHeader(o, uint32(len(z.Entries)))
for za0001 := range z.Entries {
o = msgp.AppendString(o, z.Entries[za0001])
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *ListDirResult) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "e":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Entries")
return
}
if cap(z.Entries) >= int(zb0002) {
z.Entries = (z.Entries)[:zb0002]
} else {
z.Entries = make([]string, zb0002)
}
for za0001 := range z.Entries {
z.Entries[za0001], bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Entries", za0001)
return
}
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *ListDirResult) Msgsize() (s int) {
s = 1 + 2 + msgp.ArrayHeaderSize
for za0001 := range z.Entries {
s += msgp.StringPrefixSize + len(z.Entries[za0001])
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *LocalDiskIDs) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte

View File

@ -1252,6 +1252,119 @@ func BenchmarkDecodeFilesInfo(b *testing.B) {
}
}
func TestMarshalUnmarshalListDirResult(t *testing.T) {
v := ListDirResult{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgListDirResult(b *testing.B) {
v := ListDirResult{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgListDirResult(b *testing.B) {
v := ListDirResult{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalListDirResult(b *testing.B) {
v := ListDirResult{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeListDirResult(t *testing.T) {
v := ListDirResult{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeListDirResult Msgsize() is inaccurate")
}
vn := ListDirResult{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeListDirResult(b *testing.B) {
v := ListDirResult{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeListDirResult(b *testing.B) {
v := ListDirResult{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalLocalDiskIDs(t *testing.T) {
v := LocalDiskIDs{}
bts, err := v.MarshalMsg(nil)

View File

@ -237,7 +237,7 @@ func (client *storageRESTClient) NSScanner(ctx context.Context, cache dataUsageC
defer atomic.AddInt32(&client.scanning, -1)
defer xioutil.SafeClose(updates)
st, err := storageNSScannerHandler.Call(ctx, client.gridConn, &nsScannerOptions{
st, err := storageNSScannerRPC.Call(ctx, client.gridConn, &nsScannerOptions{
DiskID: client.diskID,
ScanMode: int(scanMode),
Cache: &cache,
@ -311,7 +311,7 @@ func (client *storageRESTClient) DiskInfo(ctx context.Context, opts DiskInfoOpti
opts.DiskID = client.diskID
infop, err := storageDiskInfoHandler.Call(ctx, client.gridConn, &opts)
infop, err := storageDiskInfoRPC.Call(ctx, client.gridConn, &opts)
if err != nil {
return info, toStorageErr(err)
}
@ -340,7 +340,7 @@ func (client *storageRESTClient) ListVols(ctx context.Context) (vols []VolInfo,
// StatVol - get volume info over the network.
func (client *storageRESTClient) StatVol(ctx context.Context, volume string) (vol VolInfo, err error) {
v, err := storageStatVolHandler.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
v, err := storageStatVolRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
storageRESTDiskID: client.diskID,
storageRESTVolume: volume,
}))
@ -349,7 +349,7 @@ func (client *storageRESTClient) StatVol(ctx context.Context, volume string) (vo
}
vol = *v
// Performs shallow copy, so we can reuse.
storageStatVolHandler.PutResponse(v)
storageStatVolRPC.PutResponse(v)
return vol, nil
}
@ -386,7 +386,7 @@ func (client *storageRESTClient) CreateFile(ctx context.Context, origvolume, vol
}
func (client *storageRESTClient) WriteMetadata(ctx context.Context, origvolume, volume, path string, fi FileInfo) error {
_, err := storageWriteMetadataHandler.Call(ctx, client.gridConn, &MetadataHandlerParams{
_, err := storageWriteMetadataRPC.Call(ctx, client.gridConn, &MetadataHandlerParams{
DiskID: client.diskID,
OrigVolume: origvolume,
Volume: volume,
@ -397,7 +397,7 @@ func (client *storageRESTClient) WriteMetadata(ctx context.Context, origvolume,
}
func (client *storageRESTClient) UpdateMetadata(ctx context.Context, volume, path string, fi FileInfo, opts UpdateMetadataOpts) error {
_, err := storageUpdateMetadataHandler.Call(ctx, client.gridConn, &MetadataHandlerParams{
_, err := storageUpdateMetadataRPC.Call(ctx, client.gridConn, &MetadataHandlerParams{
DiskID: client.diskID,
Volume: volume,
FilePath: path,
@ -408,7 +408,7 @@ func (client *storageRESTClient) UpdateMetadata(ctx context.Context, volume, pat
}
func (client *storageRESTClient) DeleteVersion(ctx context.Context, volume, path string, fi FileInfo, forceDelMarker bool, opts DeleteOptions) (err error) {
_, err = storageDeleteVersionHandler.Call(ctx, client.gridConn, &DeleteVersionHandlerParams{
_, err = storageDeleteVersionRPC.Call(ctx, client.gridConn, &DeleteVersionHandlerParams{
DiskID: client.diskID,
Volume: volume,
FilePath: path,
@ -431,7 +431,7 @@ func (client *storageRESTClient) WriteAll(ctx context.Context, volume string, pa
// CheckParts - stat all file parts.
func (client *storageRESTClient) CheckParts(ctx context.Context, volume string, path string, fi FileInfo) error {
_, err := storageCheckPartsHandler.Call(ctx, client.gridConn, &CheckPartsHandlerParams{
_, err := storageCheckPartsRPC.Call(ctx, client.gridConn, &CheckPartsHandlerParams{
DiskID: client.diskID,
Volume: volume,
FilePath: path,
@ -442,7 +442,7 @@ func (client *storageRESTClient) CheckParts(ctx context.Context, volume string,
// RenameData - rename source path to destination path atomically, metadata and data file.
func (client *storageRESTClient) RenameData(ctx context.Context, srcVolume, srcPath string, fi FileInfo, dstVolume, dstPath string, opts RenameOptions) (sign uint64, err error) {
resp, err := storageRenameDataHandler.Call(ctx, client.gridConn, &RenameDataHandlerParams{
resp, err := storageRenameDataRPC.Call(ctx, client.gridConn, &RenameDataHandlerParams{
DiskID: client.diskID,
SrcVolume: srcVolume,
SrcPath: srcPath,
@ -454,7 +454,7 @@ func (client *storageRESTClient) RenameData(ctx context.Context, srcVolume, srcP
if err != nil {
return 0, toStorageErr(err)
}
defer storageRenameDataHandler.PutResponse(resp)
defer storageRenameDataRPC.PutResponse(resp)
return resp.Signature, nil
}
@ -484,7 +484,7 @@ func readMsgpReaderPoolPut(r *msgp.Reader) {
func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, volume, path, versionID string, opts ReadOptions) (fi FileInfo, err error) {
// Use websocket when not reading data.
if !opts.ReadData {
resp, err := storageReadVersionHandler.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
resp, err := storageReadVersionRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
storageRESTDiskID: client.diskID,
storageRESTOrigVolume: origvolume,
storageRESTVolume: volume,
@ -524,7 +524,7 @@ func (client *storageRESTClient) ReadVersion(ctx context.Context, origvolume, vo
func (client *storageRESTClient) ReadXL(ctx context.Context, volume string, path string, readData bool) (rf RawFileInfo, err error) {
// Use websocket when not reading data.
if !readData {
resp, err := storageReadXLHandler.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
resp, err := storageReadXLRPC.Call(ctx, client.gridConn, grid.NewMSSWith(map[string]string{
storageRESTDiskID: client.diskID,
storageRESTVolume: volume,
storageRESTFilePath: path,
@ -567,7 +567,7 @@ func (client *storageRESTClient) ReadAll(ctx context.Context, volume string, pat
}
}
gridBytes, err := storageReadAllHandler.Call(ctx, client.gridConn, &ReadAllHandlerParams{
gridBytes, err := storageReadAllRPC.Call(ctx, client.gridConn, &ReadAllHandlerParams{
DiskID: client.diskID,
Volume: volume,
FilePath: path,
@ -618,24 +618,27 @@ func (client *storageRESTClient) ReadFile(ctx context.Context, volume string, pa
// ListDir - lists a directory.
func (client *storageRESTClient) ListDir(ctx context.Context, origvolume, volume, dirPath string, count int) (entries []string, err error) {
values := make(url.Values)
values := grid.NewMSS()
values.Set(storageRESTVolume, volume)
values.Set(storageRESTDirPath, dirPath)
values.Set(storageRESTCount, strconv.Itoa(count))
values.Set(storageRESTOrigVolume, origvolume)
values.Set(storageRESTDiskID, client.diskID)
respBody, err := client.call(ctx, storageRESTMethodListDir, values, nil, -1)
st, err := storageListDirRPC.Call(ctx, client.gridConn, values)
if err != nil {
return nil, err
return nil, toStorageErr(err)
}
defer xhttp.DrainBody(respBody)
err = gob.NewDecoder(respBody).Decode(&entries)
return entries, err
err = st.Results(func(resp *ListDirResult) error {
entries = resp.Entries
return nil
})
return entries, toStorageErr(err)
}
// DeleteFile - deletes a file.
func (client *storageRESTClient) Delete(ctx context.Context, volume string, path string, deleteOpts DeleteOptions) error {
_, err := storageDeleteFileHandler.Call(ctx, client.gridConn, &DeleteFileHandlerParams{
_, err := storageDeleteFileRPC.Call(ctx, client.gridConn, &DeleteFileHandlerParams{
DiskID: client.diskID,
Volume: volume,
FilePath: path,
@ -700,7 +703,7 @@ func (client *storageRESTClient) DeleteVersions(ctx context.Context, volume stri
// RenameFile - renames a file.
func (client *storageRESTClient) RenameFile(ctx context.Context, srcVolume, srcPath, dstVolume, dstPath string) (err error) {
_, err = storageRenameFileHandler.Call(ctx, client.gridConn, &RenameFileHandlerParams{
_, err = storageRenameFileRPC.Call(ctx, client.gridConn, &RenameFileHandlerParams{
DiskID: client.diskID,
SrcVolume: srcVolume,
SrcFilePath: srcPath,

View File

@ -57,6 +57,23 @@ type storageRESTServer struct {
poolIndex, setIndex, diskIndex int
}
var (
storageCheckPartsRPC = grid.NewSingleHandler[*CheckPartsHandlerParams, grid.NoPayload](grid.HandlerCheckParts, func() *CheckPartsHandlerParams { return &CheckPartsHandlerParams{} }, grid.NewNoPayload)
storageDeleteFileRPC = grid.NewSingleHandler[*DeleteFileHandlerParams, grid.NoPayload](grid.HandlerDeleteFile, func() *DeleteFileHandlerParams { return &DeleteFileHandlerParams{} }, grid.NewNoPayload).AllowCallRequestPool(true)
storageDeleteVersionRPC = grid.NewSingleHandler[*DeleteVersionHandlerParams, grid.NoPayload](grid.HandlerDeleteVersion, func() *DeleteVersionHandlerParams { return &DeleteVersionHandlerParams{} }, grid.NewNoPayload)
storageDiskInfoRPC = grid.NewSingleHandler[*DiskInfoOptions, *DiskInfo](grid.HandlerDiskInfo, func() *DiskInfoOptions { return &DiskInfoOptions{} }, func() *DiskInfo { return &DiskInfo{} }).WithSharedResponse().AllowCallRequestPool(true)
storageNSScannerRPC = grid.NewStream[*nsScannerOptions, grid.NoPayload, *nsScannerResp](grid.HandlerNSScanner, func() *nsScannerOptions { return &nsScannerOptions{} }, nil, func() *nsScannerResp { return &nsScannerResp{} })
storageReadAllRPC = grid.NewSingleHandler[*ReadAllHandlerParams, *grid.Bytes](grid.HandlerReadAll, func() *ReadAllHandlerParams { return &ReadAllHandlerParams{} }, grid.NewBytes).AllowCallRequestPool(true)
storageReadVersionRPC = grid.NewSingleHandler[*grid.MSS, *FileInfo](grid.HandlerReadVersion, grid.NewMSS, func() *FileInfo { return &FileInfo{} })
storageReadXLRPC = grid.NewSingleHandler[*grid.MSS, *RawFileInfo](grid.HandlerReadXL, grid.NewMSS, func() *RawFileInfo { return &RawFileInfo{} })
storageRenameDataRPC = grid.NewSingleHandler[*RenameDataHandlerParams, *RenameDataResp](grid.HandlerRenameData, func() *RenameDataHandlerParams { return &RenameDataHandlerParams{} }, func() *RenameDataResp { return &RenameDataResp{} })
storageRenameFileRPC = grid.NewSingleHandler[*RenameFileHandlerParams, grid.NoPayload](grid.HandlerRenameFile, func() *RenameFileHandlerParams { return &RenameFileHandlerParams{} }, grid.NewNoPayload).AllowCallRequestPool(true)
storageStatVolRPC = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerStatVol, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
storageUpdateMetadataRPC = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerUpdateMetadata, func() *MetadataHandlerParams { return &MetadataHandlerParams{} }, grid.NewNoPayload)
storageWriteMetadataRPC = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerWriteMetadata, func() *MetadataHandlerParams { return &MetadataHandlerParams{} }, grid.NewNoPayload)
storageListDirRPC = grid.NewStream[*grid.MSS, grid.NoPayload, *ListDirResult](grid.HandlerListDir, grid.NewMSS, nil, func() *ListDirResult { return &ListDirResult{} }).WithOutCapacity(1)
)
func (s *storageRESTServer) getStorage() StorageAPI {
globalLocalDrivesMu.RLock()
defer globalLocalDrivesMu.RUnlock()
@ -198,11 +215,6 @@ func (s *storageRESTServer) HealthHandler(w http.ResponseWriter, r *http.Request
s.IsValid(w, r)
}
// DiskInfo types.
// DiskInfo.Metrics elements are shared, so we cannot reuse.
var storageDiskInfoHandler = grid.NewSingleHandler[*DiskInfoOptions, *DiskInfo](grid.HandlerDiskInfo, func() *DiskInfoOptions { return &DiskInfoOptions{} },
func() *DiskInfo { return &DiskInfo{} }).WithSharedResponse().AllowCallRequestPool(true)
// DiskInfoHandler - returns disk info.
func (s *storageRESTServer) DiskInfoHandler(opts *DiskInfoOptions) (*DiskInfo, *grid.RemoteErr) {
if !s.checkID(opts.DiskID) {
@ -215,12 +227,6 @@ func (s *storageRESTServer) DiskInfoHandler(opts *DiskInfoOptions) (*DiskInfo, *
return &info, nil
}
// scanner rpc handler.
var storageNSScannerHandler = grid.NewStream[*nsScannerOptions, grid.NoPayload, *nsScannerResp](grid.HandlerNSScanner,
func() *nsScannerOptions { return &nsScannerOptions{} },
nil,
func() *nsScannerResp { return &nsScannerResp{} })
func (s *storageRESTServer) NSScannerHandler(ctx context.Context, params *nsScannerOptions, out chan<- *nsScannerResp) *grid.RemoteErr {
if !s.checkID(params.DiskID) {
return grid.NewRemoteErr(errDiskNotFound)
@ -236,7 +242,7 @@ func (s *storageRESTServer) NSScannerHandler(ctx context.Context, params *nsScan
go func() {
defer wg.Done()
for update := range updates {
resp := storageNSScannerHandler.NewResponse()
resp := storageNSScannerRPC.NewResponse()
resp.Update = &update
out <- resp
}
@ -247,7 +253,7 @@ func (s *storageRESTServer) NSScannerHandler(ctx context.Context, params *nsScan
return grid.NewRemoteErr(err)
}
// Send final response.
resp := storageNSScannerHandler.NewResponse()
resp := storageNSScannerRPC.NewResponse()
resp.Final = &ui
out <- resp
return nil
@ -277,22 +283,6 @@ func (s *storageRESTServer) MakeVolBulkHandler(w http.ResponseWriter, r *http.Re
}
}
// ListVolsHandler - list volumes.
func (s *storageRESTServer) ListVolsHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
}
infos, err := s.getStorage().ListVols(r.Context())
if err != nil {
s.writeErrorResponse(w, err)
return
}
logger.LogIf(r.Context(), msgp.Encode(w, VolsInfo(infos)))
}
// statvol types.
var storageStatVolHandler = grid.NewSingleHandler[*grid.MSS, *VolInfo](grid.HandlerStatVol, grid.NewMSS, func() *VolInfo { return &VolInfo{} })
// StatVolHandler - stat a volume.
func (s *storageRESTServer) StatVolHandler(params *grid.MSS) (*VolInfo, *grid.RemoteErr) {
if !s.checkID(params.Get(storageRESTDiskID)) {
@ -346,10 +336,6 @@ func (s *storageRESTServer) CreateFileHandler(w http.ResponseWriter, r *http.Req
done(s.getStorage().CreateFile(r.Context(), origvolume, volume, filePath, int64(fileSize), body))
}
var storageDeleteVersionHandler = grid.NewSingleHandler[*DeleteVersionHandlerParams, grid.NoPayload](grid.HandlerDeleteVersion, func() *DeleteVersionHandlerParams {
return &DeleteVersionHandlerParams{}
}, grid.NewNoPayload)
// DeleteVersionHandler delete updated metadata.
func (s *storageRESTServer) DeleteVersionHandler(p *DeleteVersionHandlerParams) (np grid.NoPayload, gerr *grid.RemoteErr) {
if !s.checkID(p.DiskID) {
@ -364,10 +350,6 @@ func (s *storageRESTServer) DeleteVersionHandler(p *DeleteVersionHandlerParams)
return np, grid.NewRemoteErr(err)
}
var storageReadVersionHandler = grid.NewSingleHandler[*grid.MSS, *FileInfo](grid.HandlerReadVersion, grid.NewMSS, func() *FileInfo {
return &FileInfo{}
})
// ReadVersionHandlerWS read metadata of versionID
func (s *storageRESTServer) ReadVersionHandlerWS(params *grid.MSS) (*FileInfo, *grid.RemoteErr) {
if !s.checkID(params.Get(storageRESTDiskID)) {
@ -422,10 +404,6 @@ func (s *storageRESTServer) ReadVersionHandler(w http.ResponseWriter, r *http.Re
logger.LogIf(r.Context(), msgp.Encode(w, &fi))
}
var storageWriteMetadataHandler = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerWriteMetadata, func() *MetadataHandlerParams {
return &MetadataHandlerParams{}
}, grid.NewNoPayload)
// WriteMetadataHandler rpc handler to write new updated metadata.
func (s *storageRESTServer) WriteMetadataHandler(p *MetadataHandlerParams) (np grid.NoPayload, gerr *grid.RemoteErr) {
if !s.checkID(p.DiskID) {
@ -440,10 +418,6 @@ func (s *storageRESTServer) WriteMetadataHandler(p *MetadataHandlerParams) (np g
return np, grid.NewRemoteErr(err)
}
var storageUpdateMetadataHandler = grid.NewSingleHandler[*MetadataHandlerParams, grid.NoPayload](grid.HandlerUpdateMetadata, func() *MetadataHandlerParams {
return &MetadataHandlerParams{}
}, grid.NewNoPayload)
// UpdateMetadataHandler update new updated metadata.
func (s *storageRESTServer) UpdateMetadataHandler(p *MetadataHandlerParams) (grid.NoPayload, *grid.RemoteErr) {
if !s.checkID(p.DiskID) {
@ -479,10 +453,6 @@ func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Reque
}
}
var storageCheckPartsHandler = grid.NewSingleHandler[*CheckPartsHandlerParams, grid.NoPayload](grid.HandlerCheckParts, func() *CheckPartsHandlerParams {
return &CheckPartsHandlerParams{}
}, grid.NewNoPayload)
// CheckPartsHandler - check if a file metadata exists.
func (s *storageRESTServer) CheckPartsHandler(p *CheckPartsHandlerParams) (grid.NoPayload, *grid.RemoteErr) {
if !s.checkID(p.DiskID) {
@ -493,10 +463,6 @@ func (s *storageRESTServer) CheckPartsHandler(p *CheckPartsHandlerParams) (grid.
return grid.NewNPErr(s.getStorage().CheckParts(context.Background(), volume, filePath, p.FI))
}
var storageReadAllHandler = grid.NewSingleHandler[*ReadAllHandlerParams, *grid.Bytes](grid.HandlerReadAll, func() *ReadAllHandlerParams {
return &ReadAllHandlerParams{}
}, grid.NewBytes).AllowCallRequestPool(true)
// ReadAllHandler - read all the contents of a file.
func (s *storageRESTServer) ReadAllHandler(p *ReadAllHandlerParams) (*grid.Bytes, *grid.RemoteErr) {
if !s.checkID(p.DiskID) {
@ -532,10 +498,6 @@ func (s *storageRESTServer) ReadXLHandler(w http.ResponseWriter, r *http.Request
logger.LogIf(r.Context(), msgp.Encode(w, &rf))
}
var storageReadXLHandler = grid.NewSingleHandler[*grid.MSS, *RawFileInfo](grid.HandlerReadXL, grid.NewMSS, func() *RawFileInfo {
return &RawFileInfo{}
})
// ReadXLHandlerWS - read xl.meta for an object at path.
func (s *storageRESTServer) ReadXLHandlerWS(params *grid.MSS) (*RawFileInfo, *grid.RemoteErr) {
if !s.checkID(params.Get(storageRESTDiskID)) {
@ -650,31 +612,26 @@ func (s *storageRESTServer) ReadFileStreamHandler(w http.ResponseWriter, r *http
}
// ListDirHandler - list a directory.
func (s *storageRESTServer) ListDirHandler(w http.ResponseWriter, r *http.Request) {
if !s.IsValid(w, r) {
return
func (s *storageRESTServer) ListDirHandler(ctx context.Context, params *grid.MSS, out chan<- *ListDirResult) *grid.RemoteErr {
if !s.checkID(params.Get(storageRESTDiskID)) {
return grid.NewRemoteErr(errDiskNotFound)
}
volume := r.Form.Get(storageRESTVolume)
dirPath := r.Form.Get(storageRESTDirPath)
origvolume := r.Form.Get(storageRESTOrigVolume)
count, err := strconv.Atoi(r.Form.Get(storageRESTCount))
volume := params.Get(storageRESTVolume)
dirPath := params.Get(storageRESTDirPath)
origvolume := params.Get(storageRESTOrigVolume)
count, err := strconv.Atoi(params.Get(storageRESTCount))
if err != nil {
s.writeErrorResponse(w, err)
return
return grid.NewRemoteErr(err)
}
entries, err := s.getStorage().ListDir(r.Context(), origvolume, volume, dirPath, count)
entries, err := s.getStorage().ListDir(ctx, origvolume, volume, dirPath, count)
if err != nil {
s.writeErrorResponse(w, err)
return
return grid.NewRemoteErr(err)
}
gob.NewEncoder(w).Encode(&entries)
out <- &ListDirResult{Entries: entries}
return nil
}
var storageDeleteFileHandler = grid.NewSingleHandler[*DeleteFileHandlerParams, grid.NoPayload](grid.HandlerDeleteFile, func() *DeleteFileHandlerParams {
return &DeleteFileHandlerParams{}
}, grid.NewNoPayload).AllowCallRequestPool(true)
// DeleteFileHandler - delete a file.
func (s *storageRESTServer) DeleteFileHandler(p *DeleteFileHandlerParams) (grid.NoPayload, *grid.RemoteErr) {
if !s.checkID(p.DiskID) {
@ -730,12 +687,6 @@ func (s *storageRESTServer) DeleteVersionsHandler(w http.ResponseWriter, r *http
encoder.Encode(dErrsResp)
}
var storageRenameDataHandler = grid.NewSingleHandler[*RenameDataHandlerParams, *RenameDataResp](grid.HandlerRenameData, func() *RenameDataHandlerParams {
return &RenameDataHandlerParams{}
}, func() *RenameDataResp {
return &RenameDataResp{}
})
// RenameDataHandler - renames a meta object and data dir to destination.
func (s *storageRESTServer) RenameDataHandler(p *RenameDataHandlerParams) (*RenameDataResp, *grid.RemoteErr) {
if !s.checkID(p.DiskID) {
@ -749,10 +700,6 @@ func (s *storageRESTServer) RenameDataHandler(p *RenameDataHandlerParams) (*Rena
return resp, grid.NewRemoteErr(err)
}
var storageRenameFileHandler = grid.NewSingleHandler[*RenameFileHandlerParams, grid.NoPayload](grid.HandlerRenameFile, func() *RenameFileHandlerParams {
return &RenameFileHandlerParams{}
}, grid.NewNoPayload).AllowCallRequestPool(true)
// RenameFileHandler - rename a file from source to destination
func (s *storageRESTServer) RenameFileHandler(p *RenameFileHandlerParams) (grid.NoPayload, *grid.RemoteErr) {
if !s.checkID(p.DiskID) {
@ -1356,26 +1303,26 @@ func registerStorageRESTHandlers(router *mux.Router, endpointServerPools Endpoin
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCreateFile).HandlerFunc(h(server.CreateFileHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFile).HandlerFunc(h(server.ReadFileHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadFileStream).HandlerFunc(h(server.ReadFileStreamHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodListDir).HandlerFunc(h(server.ListDirHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodDeleteVersions).HandlerFunc(h(server.DeleteVersionsHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodVerifyFile).HandlerFunc(h(server.VerifyFileHandler))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodStatInfoFile).HandlerFunc(h(server.StatInfoFile))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodReadMultiple).HandlerFunc(h(server.ReadMultiple))
subrouter.Methods(http.MethodPost).Path(storageRESTVersionPrefix + storageRESTMethodCleanAbandoned).HandlerFunc(h(server.CleanAbandonedDataHandler))
logger.FatalIf(storageReadAllHandler.Register(gm, server.ReadAllHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageRenameFileHandler.Register(gm, server.RenameFileHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageRenameDataHandler.Register(gm, server.RenameDataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDeleteFileHandler.Register(gm, server.DeleteFileHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageCheckPartsHandler.Register(gm, server.CheckPartsHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageReadVersionHandler.Register(gm, server.ReadVersionHandlerWS, endpoint.Path), "unable to register handler")
logger.FatalIf(storageWriteMetadataHandler.Register(gm, server.WriteMetadataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageUpdateMetadataHandler.Register(gm, server.UpdateMetadataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDeleteVersionHandler.Register(gm, server.DeleteVersionHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageReadXLHandler.Register(gm, server.ReadXLHandlerWS, endpoint.Path), "unable to register handler")
logger.FatalIf(storageNSScannerHandler.RegisterNoInput(gm, server.NSScannerHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDiskInfoHandler.Register(gm, server.DiskInfoHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageStatVolHandler.Register(gm, server.StatVolHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageListDirRPC.RegisterNoInput(gm, server.ListDirHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageReadAllRPC.Register(gm, server.ReadAllHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageRenameFileRPC.Register(gm, server.RenameFileHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageRenameDataRPC.Register(gm, server.RenameDataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDeleteFileRPC.Register(gm, server.DeleteFileHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageCheckPartsRPC.Register(gm, server.CheckPartsHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageReadVersionRPC.Register(gm, server.ReadVersionHandlerWS, endpoint.Path), "unable to register handler")
logger.FatalIf(storageWriteMetadataRPC.Register(gm, server.WriteMetadataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageUpdateMetadataRPC.Register(gm, server.UpdateMetadataHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDeleteVersionRPC.Register(gm, server.DeleteVersionHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageReadXLRPC.Register(gm, server.ReadXLHandlerWS, endpoint.Path), "unable to register handler")
logger.FatalIf(storageNSScannerRPC.RegisterNoInput(gm, server.NSScannerHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageDiskInfoRPC.Register(gm, server.DiskInfoHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(storageStatVolRPC.Register(gm, server.StatVolHandler, endpoint.Path), "unable to register handler")
logger.FatalIf(gm.RegisterStreamingHandler(grid.HandlerWalkDir, grid.StreamHandler{
Subroute: endpoint.Path,
Handle: server.WalkDirHandler,

View File

@ -23,6 +23,8 @@ import (
"github.com/minio/madmin-go/v3"
)
//go:generate msgp -file=$GOFILE -unexported
type lastDayTierStats struct {
Bins [24]tierStats
UpdatedAt time.Time

View File

@ -0,0 +1,417 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *DailyAllTierStats) DecodeMsg(dc *msgp.Reader) (err error) {
var zb0004 uint32
zb0004, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(DailyAllTierStats, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
for zb0004 > 0 {
zb0004--
var zb0001 string
var zb0002 lastDayTierStats
zb0001, err = dc.ReadString()
if err != nil {
err = msgp.WrapError(err)
return
}
var field []byte
_ = field
var zb0005 uint32
zb0005, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
for zb0005 > 0 {
zb0005--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "Bins":
var zb0006 uint32
zb0006, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, zb0001, "Bins")
return
}
if zb0006 != uint32(24) {
err = msgp.ArrayError{Wanted: uint32(24), Got: zb0006}
return
}
for zb0003 := range zb0002.Bins {
err = zb0002.Bins[zb0003].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, zb0001, "Bins", zb0003)
return
}
}
case "UpdatedAt":
zb0002.UpdatedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, zb0001, "UpdatedAt")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
(*z)[zb0001] = zb0002
}
return
}
// EncodeMsg implements msgp.Encodable
func (z DailyAllTierStats) EncodeMsg(en *msgp.Writer) (err error) {
err = en.WriteMapHeader(uint32(len(z)))
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0007, zb0008 := range z {
err = en.WriteString(zb0007)
if err != nil {
err = msgp.WrapError(err)
return
}
// map header, size 2
// write "Bins"
err = en.Append(0x82, 0xa4, 0x42, 0x69, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(24))
if err != nil {
err = msgp.WrapError(err, zb0007, "Bins")
return
}
for zb0009 := range zb0008.Bins {
err = zb0008.Bins[zb0009].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, zb0007, "Bins", zb0009)
return
}
}
// write "UpdatedAt"
err = en.Append(0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
if err != nil {
return
}
err = en.WriteTime(zb0008.UpdatedAt)
if err != nil {
err = msgp.WrapError(err, zb0007, "UpdatedAt")
return
}
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z DailyAllTierStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
o = msgp.AppendMapHeader(o, uint32(len(z)))
for zb0007, zb0008 := range z {
o = msgp.AppendString(o, zb0007)
// map header, size 2
// string "Bins"
o = append(o, 0x82, 0xa4, 0x42, 0x69, 0x6e, 0x73)
o = msgp.AppendArrayHeader(o, uint32(24))
for zb0009 := range zb0008.Bins {
o, err = zb0008.Bins[zb0009].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, zb0007, "Bins", zb0009)
return
}
}
// string "UpdatedAt"
o = append(o, 0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
o = msgp.AppendTime(o, zb0008.UpdatedAt)
}
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *DailyAllTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var zb0004 uint32
zb0004, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
if (*z) == nil {
(*z) = make(DailyAllTierStats, zb0004)
} else if len((*z)) > 0 {
for key := range *z {
delete((*z), key)
}
}
for zb0004 > 0 {
var zb0001 string
var zb0002 lastDayTierStats
zb0004--
zb0001, bts, err = msgp.ReadStringBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
var field []byte
_ = field
var zb0005 uint32
zb0005, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
for zb0005 > 0 {
zb0005--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
switch msgp.UnsafeString(field) {
case "Bins":
var zb0006 uint32
zb0006, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "Bins")
return
}
if zb0006 != uint32(24) {
err = msgp.ArrayError{Wanted: uint32(24), Got: zb0006}
return
}
for zb0003 := range zb0002.Bins {
bts, err = zb0002.Bins[zb0003].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "Bins", zb0003)
return
}
}
case "UpdatedAt":
zb0002.UpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, zb0001, "UpdatedAt")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err, zb0001)
return
}
}
}
(*z)[zb0001] = zb0002
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z DailyAllTierStats) Msgsize() (s int) {
s = msgp.MapHeaderSize
if z != nil {
for zb0007, zb0008 := range z {
_ = zb0008
s += msgp.StringPrefixSize + len(zb0007) + 1 + 5 + msgp.ArrayHeaderSize
for zb0009 := range zb0008.Bins {
s += zb0008.Bins[zb0009].Msgsize()
}
s += 10 + msgp.TimeSize
}
}
return
}
// DecodeMsg implements msgp.Decodable
func (z *lastDayTierStats) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Bins":
var zb0002 uint32
zb0002, err = dc.ReadArrayHeader()
if err != nil {
err = msgp.WrapError(err, "Bins")
return
}
if zb0002 != uint32(24) {
err = msgp.ArrayError{Wanted: uint32(24), Got: zb0002}
return
}
for za0001 := range z.Bins {
err = z.Bins[za0001].DecodeMsg(dc)
if err != nil {
err = msgp.WrapError(err, "Bins", za0001)
return
}
}
case "UpdatedAt":
z.UpdatedAt, err = dc.ReadTime()
if err != nil {
err = msgp.WrapError(err, "UpdatedAt")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z *lastDayTierStats) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "Bins"
err = en.Append(0x82, 0xa4, 0x42, 0x69, 0x6e, 0x73)
if err != nil {
return
}
err = en.WriteArrayHeader(uint32(24))
if err != nil {
err = msgp.WrapError(err, "Bins")
return
}
for za0001 := range z.Bins {
err = z.Bins[za0001].EncodeMsg(en)
if err != nil {
err = msgp.WrapError(err, "Bins", za0001)
return
}
}
// write "UpdatedAt"
err = en.Append(0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
if err != nil {
return
}
err = en.WriteTime(z.UpdatedAt)
if err != nil {
err = msgp.WrapError(err, "UpdatedAt")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z *lastDayTierStats) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "Bins"
o = append(o, 0x82, 0xa4, 0x42, 0x69, 0x6e, 0x73)
o = msgp.AppendArrayHeader(o, uint32(24))
for za0001 := range z.Bins {
o, err = z.Bins[za0001].MarshalMsg(o)
if err != nil {
err = msgp.WrapError(err, "Bins", za0001)
return
}
}
// string "UpdatedAt"
o = append(o, 0xa9, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74)
o = msgp.AppendTime(o, z.UpdatedAt)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *lastDayTierStats) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "Bins":
var zb0002 uint32
zb0002, bts, err = msgp.ReadArrayHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err, "Bins")
return
}
if zb0002 != uint32(24) {
err = msgp.ArrayError{Wanted: uint32(24), Got: zb0002}
return
}
for za0001 := range z.Bins {
bts, err = z.Bins[za0001].UnmarshalMsg(bts)
if err != nil {
err = msgp.WrapError(err, "Bins", za0001)
return
}
}
case "UpdatedAt":
z.UpdatedAt, bts, err = msgp.ReadTimeBytes(bts)
if err != nil {
err = msgp.WrapError(err, "UpdatedAt")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z *lastDayTierStats) Msgsize() (s int) {
s = 1 + 5 + msgp.ArrayHeaderSize
for za0001 := range z.Bins {
s += z.Bins[za0001].Msgsize()
}
s += 10 + msgp.TimeSize
return
}

View File

@ -0,0 +1,236 @@
package cmd
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalDailyAllTierStats(t *testing.T) {
v := DailyAllTierStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDailyAllTierStats(t *testing.T) {
v := DailyAllTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDailyAllTierStats Msgsize() is inaccurate")
}
vn := DailyAllTierStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDailyAllTierStats(b *testing.B) {
v := DailyAllTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshallastDayTierStats(t *testing.T) {
v := lastDayTierStats{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsglastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsglastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshallastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodelastDayTierStats(t *testing.T) {
v := lastDayTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodelastDayTierStats Msgsize() is inaccurate")
}
vn := lastDayTierStats{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodelastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodelastDayTierStats(b *testing.B) {
v := lastDayTierStats{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -17,6 +17,8 @@
package bandwidth
//go:generate msgp -file=$GOFILE -unexported
import (
"context"
"sync"
@ -25,6 +27,8 @@ import (
"golang.org/x/time/rate"
)
//msgp:ignore bucketThrottle Monitor
type bucketThrottle struct {
*rate.Limiter
NodeBandwidthPerSec int64

View File

@ -0,0 +1,218 @@
package bandwidth
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"github.com/tinylib/msgp/msgp"
)
// DecodeMsg implements msgp.Decodable
func (z *BucketBandwidthReport) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z BucketBandwidthReport) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 0
err = en.Append(0x80)
if err != nil {
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z BucketBandwidthReport) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 0
o = append(o, 0x80)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *BucketBandwidthReport) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z BucketBandwidthReport) Msgsize() (s int) {
s = 1
return
}
// DecodeMsg implements msgp.Decodable
func (z *Details) DecodeMsg(dc *msgp.Reader) (err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, err = dc.ReadMapHeader()
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, err = dc.ReadMapKeyPtr()
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "LimitInBytesPerSecond":
z.LimitInBytesPerSecond, err = dc.ReadInt64()
if err != nil {
err = msgp.WrapError(err, "LimitInBytesPerSecond")
return
}
case "CurrentBandwidthInBytesPerSecond":
z.CurrentBandwidthInBytesPerSecond, err = dc.ReadFloat64()
if err != nil {
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
return
}
default:
err = dc.Skip()
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
return
}
// EncodeMsg implements msgp.Encodable
func (z Details) EncodeMsg(en *msgp.Writer) (err error) {
// map header, size 2
// write "LimitInBytesPerSecond"
err = en.Append(0x82, 0xb5, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
if err != nil {
return
}
err = en.WriteInt64(z.LimitInBytesPerSecond)
if err != nil {
err = msgp.WrapError(err, "LimitInBytesPerSecond")
return
}
// write "CurrentBandwidthInBytesPerSecond"
err = en.Append(0xd9, 0x20, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
if err != nil {
return
}
err = en.WriteFloat64(z.CurrentBandwidthInBytesPerSecond)
if err != nil {
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
return
}
return
}
// MarshalMsg implements msgp.Marshaler
func (z Details) MarshalMsg(b []byte) (o []byte, err error) {
o = msgp.Require(b, z.Msgsize())
// map header, size 2
// string "LimitInBytesPerSecond"
o = append(o, 0x82, 0xb5, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
o = msgp.AppendInt64(o, z.LimitInBytesPerSecond)
// string "CurrentBandwidthInBytesPerSecond"
o = append(o, 0xd9, 0x20, 0x43, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x42, 0x61, 0x6e, 0x64, 0x77, 0x69, 0x64, 0x74, 0x68, 0x49, 0x6e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64)
o = msgp.AppendFloat64(o, z.CurrentBandwidthInBytesPerSecond)
return
}
// UnmarshalMsg implements msgp.Unmarshaler
func (z *Details) UnmarshalMsg(bts []byte) (o []byte, err error) {
var field []byte
_ = field
var zb0001 uint32
zb0001, bts, err = msgp.ReadMapHeaderBytes(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
for zb0001 > 0 {
zb0001--
field, bts, err = msgp.ReadMapKeyZC(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
switch msgp.UnsafeString(field) {
case "LimitInBytesPerSecond":
z.LimitInBytesPerSecond, bts, err = msgp.ReadInt64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "LimitInBytesPerSecond")
return
}
case "CurrentBandwidthInBytesPerSecond":
z.CurrentBandwidthInBytesPerSecond, bts, err = msgp.ReadFloat64Bytes(bts)
if err != nil {
err = msgp.WrapError(err, "CurrentBandwidthInBytesPerSecond")
return
}
default:
bts, err = msgp.Skip(bts)
if err != nil {
err = msgp.WrapError(err)
return
}
}
}
o = bts
return
}
// Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message
func (z Details) Msgsize() (s int) {
s = 1 + 22 + msgp.Int64Size + 34 + msgp.Float64Size
return
}

View File

@ -0,0 +1,236 @@
package bandwidth
// Code generated by github.com/tinylib/msgp DO NOT EDIT.
import (
"bytes"
"testing"
"github.com/tinylib/msgp/msgp"
)
func TestMarshalUnmarshalBucketBandwidthReport(t *testing.T) {
v := BucketBandwidthReport{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeBucketBandwidthReport(t *testing.T) {
v := BucketBandwidthReport{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeBucketBandwidthReport Msgsize() is inaccurate")
}
vn := BucketBandwidthReport{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeBucketBandwidthReport(b *testing.B) {
v := BucketBandwidthReport{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}
func TestMarshalUnmarshalDetails(t *testing.T) {
v := Details{}
bts, err := v.MarshalMsg(nil)
if err != nil {
t.Fatal(err)
}
left, err := v.UnmarshalMsg(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after UnmarshalMsg(): %q", len(left), left)
}
left, err = msgp.Skip(bts)
if err != nil {
t.Fatal(err)
}
if len(left) > 0 {
t.Errorf("%d bytes left over after Skip(): %q", len(left), left)
}
}
func BenchmarkMarshalMsgDetails(b *testing.B) {
v := Details{}
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.MarshalMsg(nil)
}
}
func BenchmarkAppendMsgDetails(b *testing.B) {
v := Details{}
bts := make([]byte, 0, v.Msgsize())
bts, _ = v.MarshalMsg(bts[0:0])
b.SetBytes(int64(len(bts)))
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
bts, _ = v.MarshalMsg(bts[0:0])
}
}
func BenchmarkUnmarshalDetails(b *testing.B) {
v := Details{}
bts, _ := v.MarshalMsg(nil)
b.ReportAllocs()
b.SetBytes(int64(len(bts)))
b.ResetTimer()
for i := 0; i < b.N; i++ {
_, err := v.UnmarshalMsg(bts)
if err != nil {
b.Fatal(err)
}
}
}
func TestEncodeDecodeDetails(t *testing.T) {
v := Details{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
m := v.Msgsize()
if buf.Len() > m {
t.Log("WARNING: TestEncodeDecodeDetails Msgsize() is inaccurate")
}
vn := Details{}
err := msgp.Decode(&buf, &vn)
if err != nil {
t.Error(err)
}
buf.Reset()
msgp.Encode(&buf, &v)
err = msgp.NewReader(&buf).Skip()
if err != nil {
t.Error(err)
}
}
func BenchmarkEncodeDetails(b *testing.B) {
v := Details{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
en := msgp.NewWriter(msgp.Nowhere)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
v.EncodeMsg(en)
}
en.Flush()
}
func BenchmarkDecodeDetails(b *testing.B) {
v := Details{}
var buf bytes.Buffer
msgp.Encode(&buf, &v)
b.SetBytes(int64(buf.Len()))
rd := msgp.NewEndlessReader(buf.Bytes(), b)
dc := msgp.NewReader(rd)
b.ReportAllocs()
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := v.DecodeMsg(dc)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -36,6 +36,7 @@ import (
// HandlerID is a handler identifier.
// It is used to determine request routing on the server.
// Handlers can be registered with a static subroute.
// Do NOT remove or change the order of existing handlers.
const (
// handlerInvalid is reserved to check for uninitialized values.
handlerInvalid HandlerID = iota
@ -69,7 +70,6 @@ const (
HandlerStopRebalance
HandlerLoadRebalanceMeta
HandlerLoadTransitionTierConfig
HandlerDeletePolicy
HandlerLoadPolicy
HandlerLoadPolicyMapping
@ -78,11 +78,37 @@ const (
HandlerDeleteUser
HandlerLoadUser
HandlerLoadGroup
HandlerHealBucket
HandlerMakeBucket
HandlerHeadBucket
HandlerDeleteBucket
HandlerGetMetrics
HandlerGetResourceMetrics
HandlerGetMemInfo
HandlerGetProcInfo
HandlerGetOSInfo
HandlerGetPartitions
HandlerGetNetInfo
HandlerGetCPUs
HandlerServerInfo
HandlerGetSysConfig
HandlerGetSysServices
HandlerGetSysErrors
HandlerGetAllBucketStats
HandlerGetBucketStats
HandlerGetSRMetrics
HandlerGetPeerMetrics
HandlerGetMetacacheListing
HandlerUpdateMetacacheListing
HandlerGetPeerBucketMetrics
HandlerStorageInfo
HandlerConsoleLog
HandlerListDir
HandlerGetLocks
HandlerBackgroundHealStatus
HandlerGetLastDayTierStats
HandlerSignalService
HandlerGetBandwidth
// Add more above here ^^^
// If all handlers are used, the type of Handler can be changed.
@ -137,6 +163,28 @@ var handlerPrefixes = [handlerLast]string{
HandlerHeadBucket: peerPrefixS3,
HandlerDeleteBucket: peerPrefixS3,
HandlerHealBucket: healPrefix,
HandlerGetMetrics: peerPrefix,
HandlerGetResourceMetrics: peerPrefix,
HandlerGetMemInfo: peerPrefix,
HandlerGetProcInfo: peerPrefix,
HandlerGetOSInfo: peerPrefix,
HandlerGetPartitions: peerPrefix,
HandlerGetNetInfo: peerPrefix,
HandlerGetCPUs: peerPrefix,
HandlerServerInfo: peerPrefix,
HandlerGetSysConfig: peerPrefix,
HandlerGetSysServices: peerPrefix,
HandlerGetSysErrors: peerPrefix,
HandlerGetAllBucketStats: peerPrefix,
HandlerGetBucketStats: peerPrefix,
HandlerGetSRMetrics: peerPrefix,
HandlerGetPeerMetrics: peerPrefix,
HandlerGetMetacacheListing: peerPrefix,
HandlerUpdateMetacacheListing: peerPrefix,
HandlerGetPeerBucketMetrics: peerPrefix,
HandlerStorageInfo: peerPrefix,
HandlerConsoleLog: peerPrefix,
HandlerListDir: storagePrefix,
}
const (
@ -344,9 +392,10 @@ type RoundTripper interface {
// SingleHandler is a type safe handler for single roundtrip requests.
type SingleHandler[Req, Resp RoundTripper] struct {
id HandlerID
sharedResp bool
callReuseReq bool
id HandlerID
sharedResp bool
callReuseReq bool
ignoreNilConn bool
newReq func() Req
newResp func() Resp
@ -407,6 +456,17 @@ func (h *SingleHandler[Req, Resp]) AllowCallRequestPool(b bool) *SingleHandler[R
return h
}
// IgnoreNilConn will ignore nil connections when calling.
// This will make Call return nil instead of ErrDisconnected when the connection is nil.
// This may only be set ONCE before use.
func (h *SingleHandler[Req, Resp]) IgnoreNilConn() *SingleHandler[Req, Resp] {
if h.ignoreNilConn {
logger.LogOnceIf(context.Background(), fmt.Errorf("%s: IgnoreNilConn called twice", h.id.String()), h.id.String()+"IgnoreNilConn")
}
h.ignoreNilConn = true
return h
}
// WithSharedResponse indicates it is unsafe to reuse the response
// when it has been returned on a handler.
// This will disable automatic response recycling/pooling.
@ -476,6 +536,12 @@ type Requester interface {
// The response should be returned with PutResponse when no error.
// If no deadline is set, a 1-minute deadline is added.
func (h *SingleHandler[Req, Resp]) Call(ctx context.Context, c Requester, req Req) (resp Resp, err error) {
if c == nil {
if h.ignoreNilConn {
return resp, nil
}
return resp, ErrDisconnected
}
payload, err := req.MarshalMsg(GetByteBuffer()[:0])
if err != nil {
return resp, err
@ -777,6 +843,9 @@ type Streamer interface {
// Call the remove with the request and
func (h *StreamTypeHandler[Payload, Req, Resp]) Call(ctx context.Context, c Streamer, payload Payload) (st *TypedStream[Req, Resp], err error) {
if c == nil {
return nil, ErrDisconnected
}
var payloadB []byte
if h.WithPayload {
var err error

View File

@ -51,14 +51,41 @@ func _() {
_ = x[HandlerMakeBucket-40]
_ = x[HandlerHeadBucket-41]
_ = x[HandlerDeleteBucket-42]
_ = x[handlerTest-43]
_ = x[handlerTest2-44]
_ = x[handlerLast-45]
_ = x[HandlerGetMetrics-43]
_ = x[HandlerGetResourceMetrics-44]
_ = x[HandlerGetMemInfo-45]
_ = x[HandlerGetProcInfo-46]
_ = x[HandlerGetOSInfo-47]
_ = x[HandlerGetPartitions-48]
_ = x[HandlerGetNetInfo-49]
_ = x[HandlerGetCPUs-50]
_ = x[HandlerServerInfo-51]
_ = x[HandlerGetSysConfig-52]
_ = x[HandlerGetSysServices-53]
_ = x[HandlerGetSysErrors-54]
_ = x[HandlerGetAllBucketStats-55]
_ = x[HandlerGetBucketStats-56]
_ = x[HandlerGetSRMetrics-57]
_ = x[HandlerGetPeerMetrics-58]
_ = x[HandlerGetMetacacheListing-59]
_ = x[HandlerUpdateMetacacheListing-60]
_ = x[HandlerGetPeerBucketMetrics-61]
_ = x[HandlerStorageInfo-62]
_ = x[HandlerConsoleLog-63]
_ = x[HandlerListDir-64]
_ = x[HandlerGetLocks-65]
_ = x[HandlerBackgroundHealStatus-66]
_ = x[HandlerGetLastDayTierStats-67]
_ = x[HandlerSignalService-68]
_ = x[HandlerGetBandwidth-69]
_ = x[handlerTest-70]
_ = x[handlerTest2-71]
_ = x[handlerLast-72]
}
const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenDeleteBucketMetadataLoadBucketMetadataReloadSiteReplicationConfigReloadPoolMetaStopRebalanceLoadRebalanceMetaLoadTransitionTierConfigDeletePolicyLoadPolicyLoadPolicyMappingDeleteServiceAccountLoadServiceAccountDeleteUserLoadUserLoadGroupHealBucketMakeBucketHeadBucketDeleteBuckethandlerTesthandlerTest2handlerLast"
const _HandlerID_name = "handlerInvalidLockLockLockRLockLockUnlockLockRUnlockLockRefreshLockForceUnlockWalkDirStatVolDiskInfoNSScannerReadXLReadVersionDeleteFileDeleteVersionUpdateMetadataWriteMetadataCheckPartsRenameDataRenameFileReadAllServerVerifyTraceListenDeleteBucketMetadataLoadBucketMetadataReloadSiteReplicationConfigReloadPoolMetaStopRebalanceLoadRebalanceMetaLoadTransitionTierConfigDeletePolicyLoadPolicyLoadPolicyMappingDeleteServiceAccountLoadServiceAccountDeleteUserLoadUserLoadGroupHealBucketMakeBucketHeadBucketDeleteBucketGetMetricsGetResourceMetricsGetMemInfoGetProcInfoGetOSInfoGetPartitionsGetNetInfoGetCPUsServerInfoGetSysConfigGetSysServicesGetSysErrorsGetAllBucketStatsGetBucketStatsGetSRMetricsGetPeerMetricsGetMetacacheListingUpdateMetacacheListingGetPeerBucketMetricsStorageInfoConsoleLogListDirGetLocksBackgroundHealStatusGetLastDayTierStatsSignalServiceGetBandwidthhandlerTesthandlerTest2handlerLast"
var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 256, 274, 301, 315, 328, 345, 369, 381, 391, 408, 428, 446, 456, 464, 473, 483, 493, 503, 515, 526, 538, 549}
var _HandlerID_index = [...]uint16{0, 14, 22, 31, 41, 52, 63, 78, 85, 92, 100, 109, 115, 126, 136, 149, 163, 176, 186, 196, 206, 213, 225, 230, 236, 256, 274, 301, 315, 328, 345, 369, 381, 391, 408, 428, 446, 456, 464, 473, 483, 493, 503, 515, 525, 543, 553, 564, 573, 586, 596, 603, 613, 625, 639, 651, 668, 682, 694, 708, 727, 749, 769, 780, 790, 797, 805, 825, 844, 857, 869, 880, 892, 903}
func (i HandlerID) String() string {
if i >= HandlerID(len(_HandlerID_index)-1) {

View File

@ -250,7 +250,7 @@ func (m *Manager) RegisterSingleHandler(id HandlerID, h SingleHandlerFn, subrout
if len(subroute) == 0 {
if m.handlers.hasAny(id) && !id.isTestHandler() {
return ErrHandlerAlreadyExists
return fmt.Errorf("handler %v: %w", id.String(), ErrHandlerAlreadyExists)
}
m.handlers.single[id] = h
@ -258,7 +258,7 @@ func (m *Manager) RegisterSingleHandler(id HandlerID, h SingleHandlerFn, subrout
}
subID := makeSubHandlerID(id, s)
if m.handlers.hasSubhandler(subID) && !id.isTestHandler() {
return ErrHandlerAlreadyExists
return fmt.Errorf("handler %v, subroute:%v: %w", id.String(), s, ErrHandlerAlreadyExists)
}
m.handlers.subSingle[subID] = h
// Copy so clients can also pick it up for other subpaths.

View File

@ -18,7 +18,10 @@
package grid
import (
"bytes"
"encoding/json"
"errors"
"math"
"net/url"
"sort"
"strings"
@ -394,6 +397,137 @@ func (u URLValues) Msgsize() (s int) {
return
}
// JSONPool is a pool for JSON objects that unmarshal into T.
type JSONPool[T any] struct {
pool sync.Pool
emptySz int
}
// NewJSONPool returns a new JSONPool.
func NewJSONPool[T any]() *JSONPool[T] {
var t T
sz := 128
if b, err := json.Marshal(t); err != nil {
sz = len(b)
}
return &JSONPool[T]{
pool: sync.Pool{
New: func() interface{} {
var t T
return &t
},
},
emptySz: sz,
}
}
func (p *JSONPool[T]) new() *T {
var zero T
t := p.pool.Get().(*T)
*t = zero
return t
}
// JSON is a wrapper around a T object that can be serialized.
// There is an internal value
type JSON[T any] struct {
p *JSONPool[T]
val *T
}
// NewJSON returns a new JSONPool.
// No initial value is set.
func (p *JSONPool[T]) NewJSON() *JSON[T] {
var j JSON[T]
j.p = p
return &j
}
// NewJSONWith returns a new JSON with the provided value.
func (p *JSONPool[T]) NewJSONWith(val *T) *JSON[T] {
var j JSON[T]
j.p = p
j.val = val
return &j
}
// Value returns the underlying value.
// If not set yet, a new value is created.
func (j *JSON[T]) Value() *T {
if j.val == nil {
j.val = j.p.new()
}
return j.val
}
// ValueOrZero returns the underlying value.
// If the underlying value is nil, a zero value is returned.
func (j *JSON[T]) ValueOrZero() T {
if j == nil || j.val == nil {
var t T
return t
}
return *j.val
}
// Set the underlying value.
func (j *JSON[T]) Set(v *T) {
j.val = v
}
// Recycle the underlying value.
func (j *JSON[T]) Recycle() {
if j.val != nil {
j.p.pool.Put(j.val)
j.val = nil
}
}
// MarshalMsg implements msgp.Marshaler
func (j *JSON[T]) MarshalMsg(b []byte) (o []byte, err error) {
if j.val == nil {
return msgp.AppendNil(b), nil
}
buf := bytes.NewBuffer(GetByteBuffer()[:0])
defer func() {
PutByteBuffer(buf.Bytes())
}()
enc := json.NewEncoder(buf)
err = enc.Encode(j.val)
if err != nil {
return b, err
}
return msgp.AppendBytes(b, buf.Bytes()), nil
}
// UnmarshalMsg will JSON marshal the value and wrap as a msgp byte array.
// Nil values are supported.
func (j *JSON[T]) UnmarshalMsg(bytes []byte) ([]byte, error) {
if bytes, err := msgp.ReadNilBytes(bytes); err == nil {
if j.val != nil {
j.p.pool.Put(j.val)
}
j.val = nil
return bytes, nil
}
val, bytes, err := msgp.ReadBytesZC(bytes)
if err != nil {
return bytes, err
}
if j.val == nil {
j.val = j.p.new()
} else {
var t T
*j.val = t
}
return bytes, json.Unmarshal(val, j.val)
}
// Msgsize returns the size of an empty JSON object.
func (j *JSON[T]) Msgsize() int {
return j.p.emptySz
}
// NoPayload is a type that can be used for handlers that do not use a payload.
type NoPayload struct{}
@ -419,3 +553,156 @@ func NewNoPayload() NoPayload {
// Recycle is a no-op.
func (NoPayload) Recycle() {}
// ArrayOf wraps an array of Messagepack compatible objects.
type ArrayOf[T RoundTripper] struct {
aPool sync.Pool // Arrays
ePool sync.Pool // Elements
}
// NewArrayOf returns a new ArrayOf.
// You must provide a function that returns a new instance of T.
func NewArrayOf[T RoundTripper](newFn func() T) *ArrayOf[T] {
return &ArrayOf[T]{
ePool: sync.Pool{New: func() any {
return newFn()
}},
}
}
// New returns a new empty Array.
func (p *ArrayOf[T]) New() *Array[T] {
return &Array[T]{
p: p,
}
}
// NewWith returns a new Array with the provided value (not copied).
func (p *ArrayOf[T]) NewWith(val []T) *Array[T] {
return &Array[T]{
p: p,
val: val,
}
}
func (p *ArrayOf[T]) newA(sz uint32) []T {
t, ok := p.aPool.Get().(*[]T)
if !ok || t == nil {
return make([]T, 0, sz)
}
t2 := *t
return t2[:0]
}
func (p *ArrayOf[T]) putA(v []T) {
for _, t := range v {
p.ePool.Put(t)
}
if v != nil {
v = v[:0]
p.aPool.Put(&v)
}
}
func (p *ArrayOf[T]) newE() T {
return p.ePool.Get().(T)
}
// Array provides a wrapper for an underlying array of serializable objects.
type Array[T RoundTripper] struct {
p *ArrayOf[T]
val []T
}
// Msgsize returns the size of the array in bytes.
func (j *Array[T]) Msgsize() int {
if j.val == nil {
return msgp.NilSize
}
sz := msgp.ArrayHeaderSize
for _, v := range j.val {
sz += v.Msgsize()
}
return sz
}
// Value returns the underlying value.
// Regular append mechanics should be observed.
// If no value has been set yet, a new array is created.
func (j *Array[T]) Value() []T {
if j.val == nil {
j.val = j.p.newA(10)
}
return j.val
}
// Append a value to the underlying array.
// The returned Array is always the same as the one called.
func (j *Array[T]) Append(v ...T) *Array[T] {
if j.val == nil {
j.val = j.p.newA(uint32(len(v)))
}
j.val = append(j.val, v...)
return j
}
// Set the underlying value.
func (j *Array[T]) Set(val []T) {
j.val = val
}
// Recycle the underlying value.
func (j *Array[T]) Recycle() {
if j.val != nil {
j.p.putA(j.val)
j.val = nil
}
}
// MarshalMsg implements msgp.Marshaler
func (j *Array[T]) MarshalMsg(b []byte) (o []byte, err error) {
if j.val == nil {
return msgp.AppendNil(b), nil
}
if uint64(len(j.val)) > math.MaxUint32 {
return b, errors.New("array: length of array exceeds math.MaxUint32")
}
b = msgp.AppendArrayHeader(b, uint32(len(j.val)))
for _, v := range j.val {
b, err = v.MarshalMsg(b)
if err != nil {
return b, err
}
}
return b, err
}
// UnmarshalMsg will JSON marshal the value and wrap as a msgp byte array.
// Nil values are supported.
func (j *Array[T]) UnmarshalMsg(bytes []byte) ([]byte, error) {
if bytes, err := msgp.ReadNilBytes(bytes); err == nil {
if j.val != nil {
j.p.putA(j.val)
}
j.val = nil
return bytes, nil
}
l, bytes, err := msgp.ReadArrayHeaderBytes(bytes)
if err != nil {
return bytes, err
}
if j.val == nil {
j.val = j.p.newA(l)
} else {
j.val = j.val[:0]
}
for i := uint32(0); i < l; i++ {
v := j.p.newE()
bytes, err = v.UnmarshalMsg(bytes)
if err != nil {
return bytes, err
}
j.val = append(j.val, v)
}
return bytes, nil
}