fix: dangling objects honor parityBlocks instead of dataBlocks (#19019)

Bonus: do not recreate buckets if NoRecreate is asked.
This commit is contained in:
Harshavardhana 2024-02-08 15:22:16 -08:00 committed by GitHub
parent 6005ad3d48
commit 404d8b3084
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 11 additions and 20 deletions

View file

@ -961,24 +961,18 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
return validMeta, notFoundMetaErrs > dataBlocks
}
quorum := validMeta.Erasure.DataBlocks
if validMeta.Erasure.DataBlocks == validMeta.Erasure.ParityBlocks {
quorum++
}
// TODO: It is possible to replay the object via just single
// xl.meta file, considering quorum number of data-dirs are still
// present on other drives.
//
// However this requires a bit of a rewrite, leave this up for
// future work.
if notFoundMetaErrs > 0 && notFoundMetaErrs >= quorum {
if notFoundMetaErrs > 0 && notFoundMetaErrs > validMeta.Erasure.ParityBlocks {
// All xl.meta is beyond data blocks missing, this is dangling
return validMeta, true
}
if !validMeta.IsRemote() && notFoundPartsErrs > 0 && notFoundPartsErrs >= quorum {
if !validMeta.IsRemote() && notFoundPartsErrs > 0 && notFoundPartsErrs > validMeta.Erasure.ParityBlocks {
// All data-dir is beyond data blocks missing, this is dangling
return validMeta, true
}

View file

@ -301,10 +301,10 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
// to read the metadata entry.
var uploads []MultipartInfo
populatedUploadIds := set.NewStringSet()
populatedUploadIDs := set.NewStringSet()
for _, uploadID := range uploadIDs {
if populatedUploadIds.Contains(uploadID) {
if populatedUploadIDs.Contains(uploadID) {
continue
}
// If present, use time stored in ID.
@ -321,7 +321,7 @@ func (er erasureObjects) ListMultipartUploads(ctx context.Context, bucket, objec
UploadID: base64.RawURLEncoding.EncodeToString([]byte(fmt.Sprintf("%s.%s", globalDeploymentID(), uploadID))),
Initiated: startTime,
})
populatedUploadIds.Add(uploadID)
populatedUploadIDs.Add(uploadID)
}
sort.Slice(uploads, func(i int, j int) bool {

View file

@ -1840,12 +1840,6 @@ func (z *erasureServerPools) DeleteBucket(ctx context.Context, bucket string, op
}
}
if err != nil && !isErrBucketNotFound(err) {
if !opts.NoRecreate {
z.s3Peer.MakeBucket(ctx, bucket, MakeBucketOptions{})
}
}
if err == nil {
// Purge the entire bucket metadata entirely.
z.deleteAll(context.Background(), minioMetaBucket, pathJoin(bucketMetaPrefix, bucket))

View file

@ -474,9 +474,12 @@ func (sys *S3PeerSys) DeleteBucket(ctx context.Context, bucket string, opts Dele
perPoolErrs = append(perPoolErrs, errs[i])
}
}
if poolErr := reduceWriteQuorumErrs(ctx, perPoolErrs, bucketOpIgnoredErrs, len(perPoolErrs)/2+1); poolErr != nil && poolErr != errVolumeNotFound {
// re-create successful deletes, since we are return an error.
sys.MakeBucket(ctx, bucket, MakeBucketOptions{})
poolErr := reduceWriteQuorumErrs(ctx, perPoolErrs, bucketOpIgnoredErrs, len(perPoolErrs)/2+1)
if poolErr != nil && !errors.Is(poolErr, errVolumeNotFound) {
if !opts.NoRecreate {
// re-create successful deletes, since we are return an error.
sys.MakeBucket(ctx, bucket, MakeBucketOptions{})
}
return toObjectErr(poolErr, bucket)
}
}