optimize speedtest for smaller setups (#15414)

this has been observed in multiple environments
where the setups are small `speedtest` naturally
fails with default '10s' and the concurrency
of '32' is big for such clusters.

choose a smaller value i.e equal to number of
drives in such clusters and let 'autotune'
increase the concurrency instead.
This commit is contained in:
Harshavardhana 2022-07-27 14:41:59 -07:00 committed by GitHub
parent 5e763b71dc
commit cbd70d26b5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 29 additions and 1226 deletions

View file

@ -1210,12 +1210,22 @@ func (a adminAPIHandlers) ObjectSpeedTestHandler(w http.ResponseWriter, r *http.
concurrent = runtime.GOMAXPROCS(0)
}
// if we have less drives than concurrency then choose
// only the concurrency to be number of drives to start
// with - since default '32' might be big and may not
// complete in total time of 10s.
if globalEndpoints.NEndpoints() < concurrent {
concurrent = globalEndpoints.NEndpoints()
}
duration, err := time.ParseDuration(durationStr)
if err != nil {
duration = time.Second * 10
}
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, objectAPI, concurrent, size, autotune)
storageInfo, _ := objectAPI.StorageInfo(ctx)
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, storageInfo, concurrent, size, autotune)
if !sufficientCapacity {
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, AdminError{
Code: "XMinioSpeedtestInsufficientCapacity",
@ -1306,8 +1316,7 @@ func deleteObjectPerfBucket(objectAPI ObjectLayer) {
})
}
func validateObjPerfOptions(ctx context.Context, objectAPI ObjectLayer, concurrent int, size int, autotune bool) (sufficientCapacity bool, canAutotune bool, capacityErrMsg string) {
storageInfo, _ := objectAPI.StorageInfo(ctx)
func validateObjPerfOptions(ctx context.Context, storageInfo madmin.StorageInfo, concurrent int, size int, autotune bool) (bool, bool, string) {
capacityNeeded := uint64(concurrent * size)
capacity := GetTotalUsableCapacityFree(storageInfo.Disks, storageInfo)
@ -2220,11 +2229,20 @@ func (a adminAPIHandlers) HealthInfoHandler(w http.ResponseWriter, r *http.Reque
concurrent = runtime.GOMAXPROCS(0)
}
// if we have less drives than concurrency then choose
// only the concurrency to be number of drives to start
// with - since default '32' might be big and may not
// complete in total time of 10s.
if globalEndpoints.NEndpoints() < concurrent {
concurrent = globalEndpoints.NEndpoints()
}
storageInfo, _ := objectAPI.StorageInfo(ctx)
size := 64 * humanize.MiByte
autotune := true
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, objectAPI, concurrent, size, autotune)
sufficientCapacity, canAutotune, capacityErrMsg := validateObjPerfOptions(ctx, storageInfo, concurrent, size, autotune)
if !sufficientCapacity {
healthInfo.Perf.Error = capacityErrMsg
partialWrite(healthInfo)

View file

@ -533,9 +533,11 @@ func (z *erasureServerPools) BackendInfo() (b madmin.BackendInfo) {
rrSCParity := globalStorageClass.GetParityForSC(storageclass.RRS)
// Data blocks can vary per pool, but parity is same.
for _, setDriveCount := range z.SetDriveCounts() {
for i, setDriveCount := range z.SetDriveCounts() {
b.StandardSCData = append(b.StandardSCData, setDriveCount-scParity)
b.RRSCData = append(b.RRSCData, setDriveCount-rrSCParity)
b.DrivesPerSet = append(b.DrivesPerSet, setDriveCount)
b.TotalSets = append(b.TotalSets, z.serverPools[i].setCount)
}
b.StandardSCParity = scParity

2
go.mod
View file

@ -48,7 +48,7 @@ require (
github.com/minio/dperf v0.4.2
github.com/minio/highwayhash v1.0.2
github.com/minio/kes v0.20.0
github.com/minio/madmin-go v1.4.6
github.com/minio/madmin-go v1.4.9
github.com/minio/minio-go/v7 v7.0.32
github.com/minio/pkg v1.1.26
github.com/minio/selfupdate v0.5.0

1221
go.sum

File diff suppressed because it is too large Load diff