diff --git a/cmd/mrf.go b/cmd/mrf.go index 60c302d2b..7eccda884 100644 --- a/cmd/mrf.go +++ b/cmd/mrf.go @@ -47,7 +47,7 @@ type mrfState struct { ctx context.Context pools *erasureServerPools - mu sync.Mutex + mu sync.RWMutex opCh chan partialOperation } @@ -72,6 +72,9 @@ func (m *mrfState) addPartialOp(op partialOperation) { return } + m.mu.RLock() + defer m.mu.RUnlock() + select { case m.opCh <- op: default: diff --git a/cmd/xl-storage-disk-id-check.go b/cmd/xl-storage-disk-id-check.go index f85578291..bb1ee7e91 100644 --- a/cmd/xl-storage-disk-id-check.go +++ b/cmd/xl-storage-disk-id-check.go @@ -209,9 +209,24 @@ func newXLStorageDiskIDCheck(storage *xlStorage, healthCheck bool) *xlStorageDis }) if driveMaxConcurrent <= 0 { - driveMaxConcurrent = 512 + // nr_requests is for both READ and WRITE separately + // so we this 2x tokens on our end. + // + // https://www.kernel.org/doc/Documentation/block/queue-sysfs.txt + // + // nr_requests (RW) + // ---------------- + // This controls how many requests may be allocated in the block layer for + // read or write requests. Note that the total allocated number may be twice + // this amount, since it applies only to reads or writes (not the accumulated + // sum). + driveMaxConcurrent = int(storage.nrRequests) * 2 + if driveMaxConcurrent <= 0 { + driveMaxConcurrent = 1023 * 2 // Default value on Linux for most NVMe + } if storage.rotational { - driveMaxConcurrent = int(storage.nrRequests) / 2 + // use 80% of the available nr_requests on HDDs + driveMaxConcurrent = int(float64(storage.nrRequests)*0.8) * 2 if driveMaxConcurrent < 32 { driveMaxConcurrent = 32 }