mirror of
https://github.com/minio/minio
synced 2024-11-05 17:34:01 +00:00
update quorum requirement to list all objects (#14201)
some upgraded objects might not get listed due to different quorum ratios across objects. make sure to list all objects that satisfy the maximum possible quorum.
This commit is contained in:
parent
c3d9c45f58
commit
aaea94a48d
4 changed files with 9 additions and 15 deletions
|
@ -289,7 +289,7 @@ func scanDataFolder(ctx context.Context, poolIdx, setIdx int, basePath string, c
|
|||
if poolIdx < len(objAPI.serverPools) && setIdx < len(objAPI.serverPools[poolIdx].sets) {
|
||||
// Pass the disks belonging to the set.
|
||||
s.disks = objAPI.serverPools[poolIdx].sets[setIdx].getDisks()
|
||||
s.disksQuorum = objAPI.serverPools[poolIdx].sets[setIdx].defaultRQuorum()
|
||||
s.disksQuorum = len(s.disks) / 2
|
||||
} else {
|
||||
logger.LogIf(ctx, fmt.Errorf("Matching pool %s, set %s not found", humanize.Ordinal(poolIdx+1), humanize.Ordinal(setIdx+1)))
|
||||
}
|
||||
|
|
|
@ -1037,7 +1037,7 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
|||
// class for objects which have reduced quorum
|
||||
// storage class only needs to be honored for
|
||||
// Read() requests alone which we already do.
|
||||
writeQuorums[i] = er.defaultWQuorum()
|
||||
writeQuorums[i] = len(storageDisks)/2 + 1
|
||||
}
|
||||
|
||||
versionsMap := make(map[string]FileInfoVersions, len(objects))
|
||||
|
@ -1153,9 +1153,7 @@ func (er erasureObjects) DeleteObjects(ctx context.Context, bucket string, objec
|
|||
errs[objIndex] = toObjectErr(err, bucket, objects[objIndex].ObjectName)
|
||||
}
|
||||
|
||||
if errs[objIndex] == nil {
|
||||
defer NSUpdated(bucket, objects[objIndex].ObjectName)
|
||||
}
|
||||
defer NSUpdated(bucket, objects[objIndex].ObjectName)
|
||||
}
|
||||
|
||||
// Check failed deletes across multiple objects
|
||||
|
|
|
@ -682,8 +682,8 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
|||
|
||||
// How to resolve partial results.
|
||||
resolver := metadataResolutionParams{
|
||||
dirQuorum: set.defaultRQuorum(),
|
||||
objQuorum: set.defaultRQuorum(),
|
||||
dirQuorum: len(disks) / 2, // make sure to capture all quorum ratios
|
||||
objQuorum: len(disks) / 2, // make sure to capture all quorum ratios
|
||||
bucket: bName,
|
||||
}
|
||||
|
||||
|
@ -692,7 +692,7 @@ func (z *erasureServerPools) decommissionPool(ctx context.Context, idx int, pool
|
|||
bucket: bName,
|
||||
recursive: true,
|
||||
forwardTo: forwardTo,
|
||||
minDisks: len(disks),
|
||||
minDisks: len(disks) / 2, // to capture all quorum ratios
|
||||
reportNotFound: false,
|
||||
agreed: decommissionEntry,
|
||||
partial: func(entries metaCacheEntries, nAgreed int, errs []error) {
|
||||
|
|
|
@ -542,13 +542,9 @@ func (er *erasureObjects) listPath(ctx context.Context, o listPathOptions, resul
|
|||
var fallbackDisks []StorageAPI
|
||||
|
||||
// Special case: ask all disks if the drive count is 4
|
||||
if askDisks == -1 || er.setDriveCount == 4 {
|
||||
askDisks = len(disks) // with 'strict' quorum list on all online disks.
|
||||
listingQuorum = er.defaultRQuorum()
|
||||
}
|
||||
if askDisks == 0 {
|
||||
askDisks = globalAPIConfig.getListQuorum()
|
||||
listingQuorum = askDisks
|
||||
if askDisks <= 0 || er.setDriveCount == 4 {
|
||||
askDisks = len(disks) // with 'strict' quorum list on all online disks.
|
||||
listingQuorum = len(disks) / 2 // keep this such that we can list all objects with different quorum ratio.
|
||||
}
|
||||
if askDisks > 0 && len(disks) > askDisks {
|
||||
rand.Shuffle(len(disks), func(i, j int) {
|
||||
|
|
Loading…
Reference in a new issue