fix: compressed+encrypted block overhead (#17289)

This commit is contained in:
Klaus Post 2023-05-26 10:57:07 -07:00 committed by GitHub
parent 6425fec366
commit c839b64f6a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 3 additions and 25 deletions

View file

@ -499,21 +499,6 @@ func (er erasureObjects) NewMultipartUpload(ctx context.Context, bucket, object
return er.newMultipartUpload(ctx, bucket, object, opts)
}
// CopyObjectPart - reads incoming stream and internally erasure codes
// them. This call is similar to put object part operation but the source
// data is read from an existing object.
//
// Implements S3 compatible Upload Part Copy API.
func (er erasureObjects) CopyObjectPart(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject, uploadID string, partID int, startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (pi PartInfo, e error) {
partInfo, err := er.PutObjectPart(ctx, dstBucket, dstObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts)
if err != nil {
return pi, toObjectErr(err, dstBucket, dstObject)
}
// Success.
return partInfo, nil
}
// renamePart - renames multipart part to its relevant location under uploadID.
func renamePart(ctx context.Context, disks []StorageAPI, srcBucket, srcEntry, dstBucket, dstEntry string, writeQuorum int) ([]StorageAPI, error) {
g := errgroup.WithNErrs(len(disks))
@ -667,7 +652,8 @@ func (er erasureObjects) PutObjectPart(ctx context.Context, bucket, object, uplo
buffer = make([]byte, 1) // Allocate atleast a byte to reach EOF
case size == -1:
if size := data.ActualSize(); size > 0 && size < fi.Erasure.BlockSize {
buffer = make([]byte, data.ActualSize()+256, data.ActualSize()*2+512)
// Account for padding and forced compression overhead and encryption.
buffer = make([]byte, data.ActualSize()+256+32+32, data.ActualSize()*2+512)
} else {
buffer = er.bp.Get()
defer er.bp.Put(buffer)

View file

@ -1459,7 +1459,7 @@ func (z *erasureServerPools) CopyObjectPart(ctx context.Context, srcBucket, srcO
}
return z.PutObjectPart(ctx, destBucket, destObject, uploadID, partID,
NewPutObjReader(srcInfo.Reader), dstOpts)
srcInfo.PutObjReader, dstOpts)
}
// PutObjectPart - writes part of an object to hashedSet based on the object name.

View file

@ -899,14 +899,6 @@ func (s *erasureSets) NewMultipartUpload(ctx context.Context, bucket, object str
return set.NewMultipartUpload(ctx, bucket, object, opts)
}
// Copies a part of an object from source hashedSet to destination hashedSet.
func (s *erasureSets) CopyObjectPart(ctx context.Context, srcBucket, srcObject, destBucket, destObject string, uploadID string, partID int,
startOffset int64, length int64, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions,
) (partInfo PartInfo, err error) {
destSet := s.getHashedSet(destObject)
return destSet.PutObjectPart(ctx, destBucket, destObject, uploadID, partID, NewPutObjReader(srcInfo.Reader), dstOpts)
}
// PutObjectPart - writes part of an object to hashedSet based on the object name.
func (s *erasureSets) PutObjectPart(ctx context.Context, bucket, object, uploadID string, partID int, data *PutObjReader, opts ObjectOptions) (info PartInfo, err error) {
set := s.getHashedSet(object)