remove unnecessary metrics in 'mc admin info' output (#19020)

Reduce the amount of data transfer on large deployments
This commit is contained in:
Harshavardhana 2024-02-08 19:28:46 -08:00 committed by GitHub
parent 404d8b3084
commit 62761a23e6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
8 changed files with 45 additions and 18 deletions

View file

@ -2201,7 +2201,10 @@ func getPoolsInfo(ctx context.Context, allDisks []madmin.Disk) (map[int]map[int]
return nil, errServerNotInitialized
}
z, _ := objectAPI.(*erasureServerPools)
z, ok := objectAPI.(*erasureServerPools)
if !ok {
return nil, errServerNotInitialized
}
poolsInfo := make(map[int]map[int]madmin.ErasureSetInfo)
for _, d := range allDisks {
@ -2232,7 +2235,7 @@ func getPoolsInfo(ctx context.Context, allDisks []madmin.Disk) (map[int]map[int]
return poolsInfo, nil
}
func getServerInfo(ctx context.Context, poolsInfoEnabled bool, r *http.Request) madmin.InfoMessage {
func getServerInfo(ctx context.Context, pools, metrics bool, r *http.Request) madmin.InfoMessage {
ldap := madmin.LDAP{}
if globalIAMSys.LDAPConfig.Enabled() {
ldapConn, err := globalIAMSys.LDAPConfig.LDAP.Connect()
@ -2253,8 +2256,8 @@ func getServerInfo(ctx context.Context, poolsInfoEnabled bool, r *http.Request)
// Get the notification target info
notifyTarget := fetchLambdaInfo()
local := getLocalServerProperty(globalEndpoints, r)
servers := globalNotificationSys.ServerInfo()
local := getLocalServerProperty(globalEndpoints, r, metrics)
servers := globalNotificationSys.ServerInfo(metrics)
servers = append(servers, local)
assignPoolNumbers(servers)
@ -2308,7 +2311,7 @@ func getServerInfo(ctx context.Context, poolsInfoEnabled bool, r *http.Request)
DrivesPerSet: backendInfo.DrivesPerSet,
}
if poolsInfoEnabled {
if pools {
poolsInfo, _ = getPoolsInfo(ctx, allDisks)
}
}
@ -2690,7 +2693,7 @@ func fetchHealthInfo(healthCtx context.Context, objectAPI ObjectLayer, query *ur
getAndWriteSysConfig()
if query.Get("minioinfo") == "true" {
infoMessage := getServerInfo(healthCtx, false, nil)
infoMessage := getServerInfo(healthCtx, false, true, nil)
servers := make([]madmin.ServerInfo, 0, len(infoMessage.Servers))
for _, server := range infoMessage.Servers {
anonEndpoint := anonAddr(server.Endpoint)
@ -2878,8 +2881,10 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
return
}
metrics := r.Form.Get(peerRESTMetrics) == "true"
// Marshal API response
jsonBytes, err := json.Marshal(getServerInfo(ctx, true, r))
jsonBytes, err := json.Marshal(getServerInfo(ctx, true, metrics, r))
if err != nil {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
return

View file

@ -34,7 +34,7 @@ import (
// getLocalServerProperty - returns madmin.ServerProperties for only the
// local endpoints from given list of endpoints
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request) madmin.ServerProperties {
func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Request, metrics bool) madmin.ServerProperties {
addr := globalLocalNodeName
if r != nil {
addr = r.Host
@ -141,7 +141,7 @@ func getLocalServerProperty(endpointServerPools EndpointServerPools, r *http.Req
objLayer := newObjectLayerFn()
if objLayer != nil {
storageInfo := objLayer.LocalStorageInfo(GlobalContext, true)
storageInfo := objLayer.LocalStorageInfo(GlobalContext, metrics)
props.State = string(madmin.ItemOnline)
props.Disks = storageInfo.Disks
} else {

View file

@ -381,7 +381,7 @@ func storageMetricsPrometheus(ch chan<- prometheus.Metric) {
server := getLocalServerProperty(globalEndpoints, &http.Request{
Host: globalLocalNodeName,
})
}, true)
onlineDisks, offlineDisks := getOnlineOfflineDisksStats(server.Disks)
totalDisks := offlineDisks.Merge(onlineDisks)

View file

@ -1080,7 +1080,7 @@ func (sys *NotificationSys) StorageInfo(objLayer ObjectLayer, metrics bool) Stor
}
// ServerInfo - calls ServerInfo RPC call on all peers.
func (sys *NotificationSys) ServerInfo() []madmin.ServerProperties {
func (sys *NotificationSys) ServerInfo(metrics bool) []madmin.ServerProperties {
reply := make([]madmin.ServerProperties, len(sys.peerClients))
var wg sync.WaitGroup
for i, client := range sys.peerClients {
@ -1090,7 +1090,7 @@ func (sys *NotificationSys) ServerInfo() []madmin.ServerProperties {
wg.Add(1)
go func(client *peerRESTClient, idx int) {
defer wg.Done()
info, err := client.ServerInfo()
info, err := client.ServerInfo(metrics)
if err != nil {
info.Endpoint = client.host.String()
info.State = string(madmin.ItemOffline)

View file

@ -178,8 +178,11 @@ func (client *peerRESTClient) LocalStorageInfo(metrics bool) (info StorageInfo,
}
// ServerInfo - fetch server information for a remote node.
func (client *peerRESTClient) ServerInfo() (info madmin.ServerProperties, err error) {
respBody, err := client.call(peerRESTMethodServerInfo, nil, nil, -1)
func (client *peerRESTClient) ServerInfo(metrics bool) (info madmin.ServerProperties, err error) {
values := make(url.Values)
values.Set(peerRESTMetrics, strconv.FormatBool(metrics))
respBody, err := client.call(peerRESTMethodServerInfo, values, nil, -1)
if err != nil {
return
}

View file

@ -18,7 +18,7 @@
package cmd
const (
peerRESTVersion = "v36" // Rewrite VerifyBinaryHandler()
peerRESTVersion = "v37" // Add 'metrics' option for ServerInfo
peerRESTVersionPrefix = SlashSeparator + peerRESTVersion
peerRESTPrefix = minioReservedBucketPath + "/peer"
peerRESTPath = peerRESTPrefix + peerRESTVersionPrefix

View file

@ -328,7 +328,19 @@ func (s *peerRESTServer) ServerInfoHandler(w http.ResponseWriter, r *http.Reques
}
ctx := newContext(r, w, "ServerInfo")
info := getLocalServerProperty(globalEndpoints, r)
objLayer := newObjectLayerFn()
if objLayer == nil {
s.writeErrorResponse(w, errServerNotInitialized)
return
}
metrics, err := strconv.ParseBool(r.Form.Get(peerRESTMetrics))
if err != nil {
s.writeErrorResponse(w, err)
return
}
info := getLocalServerProperty(globalEndpoints, r, metrics)
logger.LogIf(ctx, gob.NewEncoder(w).Encode(info))
}
@ -1519,7 +1531,7 @@ func registerPeerRESTHandlers(router *mux.Router, gm *grid.Manager) {
subrouter := router.PathPrefix(peerRESTPrefix).Subrouter()
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodHealth).HandlerFunc(h(server.HealthHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodGetLocks).HandlerFunc(h(server.GetLocksHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(h(server.ServerInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodServerInfo).HandlerFunc(h(server.ServerInfoHandler)).Queries(restQueries(peerRESTMetrics)...)
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodLocalStorageInfo).HandlerFunc(h(server.LocalStorageInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodProcInfo).HandlerFunc(h(server.GetProcInfoHandler))
subrouter.Methods(http.MethodPost).Path(peerRESTVersionPrefix + peerRESTMethodMemInfo).HandlerFunc(h(server.GetMemInfoHandler))

View file

@ -299,12 +299,19 @@ func (p *xlStorageDiskIDCheck) DiskInfo(ctx context.Context, opts DiskInfoOption
defer si(&err)
if opts.NoOp {
if opts.Metrics {
info.Metrics = p.getMetrics()
}
info.Metrics.TotalWrites = p.totalWrites.Load()
info.Metrics.TotalDeletes = p.totalDeletes.Load()
info.Metrics.TotalWaiting = uint32(p.health.waiting.Load())
info.Metrics.TotalErrorsTimeout = p.totalErrsTimeout.Load()
info.Metrics.TotalErrorsAvailability = p.totalErrsAvailability.Load()
return
if p.health.isFaulty() {
// if disk is already faulty return faulty for 'mc admin info' output and prometheus alerts.
return info, errFaultyDisk
}
return info, nil
}
defer func() {