results must be a single channel to avoid overwriting healing.bin (#19702)

This commit is contained in:
Harshavardhana 2024-05-09 10:15:03 -07:00 committed by GitHub
parent f5e3eedf34
commit 3549e583a6
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
7 changed files with 259 additions and 231 deletions

View file

@ -121,10 +121,6 @@ func listAllBuckets(ctx context.Context, storageDisks []StorageAPI, healBuckets
// we ignore disk not found errors
return nil
}
if storageDisks[index].Healing() != nil {
// we ignore disks under healing
return nil
}
volsInfo, err := storageDisks[index].ListVols(ctx)
if err != nil {
return err
@ -216,7 +212,7 @@ func (fi FileInfo) DataMov() bool {
return ok
}
func auditHealObject(ctx context.Context, bucket, object, versionID string, result madmin.HealResultItem, err error) {
func (er *erasureObjects) auditHealObject(ctx context.Context, bucket, object, versionID string, result madmin.HealResultItem, err error) {
if len(logger.AuditTargets()) == 0 {
return
}
@ -231,8 +227,14 @@ func auditHealObject(ctx context.Context, bucket, object, versionID string, resu
opts.Error = err.Error()
}
if result.After.Drives != nil {
opts.Tags = map[string]interface{}{"drives-result": result.After.Drives}
opts.Tags = map[string]interface{}{
"healResult": result,
"objectLocation": auditObjectOp{
Name: decodeDirObject(object),
Pool: er.poolIndex + 1,
Set: er.setIndex + 1,
Drives: er.getEndpointStrings(),
},
}
auditLogInternal(ctx, opts)
@ -247,7 +249,7 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
storageEndpoints := er.getEndpoints()
defer func() {
auditHealObject(ctx, bucket, object, versionID, result, err)
er.auditHealObject(ctx, bucket, object, versionID, result, err)
}()
if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 {
@ -289,21 +291,18 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
readQuorum, _, err := objectQuorumFromMeta(ctx, partsMetadata, errs, er.defaultParityCount)
if err != nil {
m, err := er.deleteIfDangling(ctx, bucket, object, partsMetadata, errs, nil, ObjectOptions{
m, derr := er.deleteIfDangling(ctx, bucket, object, partsMetadata, errs, nil, ObjectOptions{
VersionID: versionID,
})
errs = make([]error, len(errs))
for i := range errs {
errs[i] = err
}
if err == nil {
// Dangling object successfully purged, size is '0'
m.Size = 0
}
// Generate file/version not found with default heal result
err = errFileNotFound
if versionID != "" {
err = errFileVersionNotFound
if derr == nil {
derr = errFileNotFound
if versionID != "" {
derr = errFileVersionNotFound
}
// We did find a new danging object
return er.defaultHealResult(m, storageDisks, storageEndpoints,
errs, bucket, object, versionID), derr
}
return er.defaultHealResult(m, storageDisks, storageEndpoints,
errs, bucket, object, versionID), err
@ -360,11 +359,10 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
switch {
case v != nil:
driveState = madmin.DriveStateOk
case errs[i] == errDiskNotFound, dataErrs[i] == errDiskNotFound:
case errors.Is(errs[i], errDiskNotFound), errors.Is(dataErrs[i], errDiskNotFound):
driveState = madmin.DriveStateOffline
case errs[i] == errFileNotFound, errs[i] == errFileVersionNotFound, errs[i] == errVolumeNotFound:
fallthrough
case dataErrs[i] == errFileNotFound, dataErrs[i] == errFileVersionNotFound, dataErrs[i] == errVolumeNotFound:
case IsErr(errs[i], errFileNotFound, errFileVersionNotFound, errVolumeNotFound),
IsErr(dataErrs[i], errFileNotFound, errFileVersionNotFound, errVolumeNotFound):
driveState = madmin.DriveStateMissing
default:
// all remaining cases imply corrupt data/metadata
@ -417,18 +415,18 @@ func (er *erasureObjects) healObject(ctx context.Context, bucket string, object
VersionID: versionID,
})
errs = make([]error, len(errs))
if err == nil {
err = errFileNotFound
if versionID != "" {
err = errFileVersionNotFound
}
// We did find a new danging object
return er.defaultHealResult(m, storageDisks, storageEndpoints,
errs, bucket, object, versionID), err
}
for i := range errs {
errs[i] = err
}
if err == nil {
// Dangling object successfully purged, size is '0'
m.Size = 0
}
// Generate file/version not found with default heal result
err = errFileNotFound
if versionID != "" {
err = errFileVersionNotFound
}
return er.defaultHealResult(m, storageDisks, storageEndpoints,
errs, bucket, object, versionID), err
}
@ -641,6 +639,7 @@ func (er *erasureObjects) checkAbandonedParts(ctx context.Context, bucket string
if !opts.Remove || opts.DryRun {
return nil
}
if globalTrace.NumSubscribers(madmin.TraceHealing) > 0 {
startTime := time.Now()
defer func() {
@ -983,12 +982,12 @@ func isObjectDangling(metaArr []FileInfo, errs []error, dataErrs []error) (valid
// However this requires a bit of a rewrite, leave this up for
// future work.
if notFoundMetaErrs > 0 && notFoundMetaErrs > validMeta.Erasure.ParityBlocks {
// All xl.meta is beyond data blocks missing, this is dangling
// All xl.meta is beyond parity blocks missing, this is dangling
return validMeta, true
}
if !validMeta.IsRemote() && notFoundPartsErrs > 0 && notFoundPartsErrs > validMeta.Erasure.ParityBlocks {
// All data-dir is beyond data blocks missing, this is dangling
// All data-dir is beyond parity blocks missing, this is dangling
return validMeta, true
}
@ -1069,8 +1068,7 @@ func healTrace(funcName healingMetric, startTime time.Time, bucket, object strin
}
if err != nil {
tr.Error = err.Error()
} else {
tr.HealResult = result
}
tr.HealResult = result
globalTrace.Publish(tr)
}

View file

@ -1497,7 +1497,13 @@ func TestHealObjectErasure(t *testing.T) {
er.getDisks = func() []StorageAPI {
// Nil more than half the disks, to remove write quorum.
for i := 0; i <= len(erasureDisks)/2; i++ {
erasureDisks[i] = nil
err := erasureDisks[i].Delete(context.Background(), bucket, object, DeleteOptions{
Recursive: true,
Immediate: false,
})
if err != nil {
t.Fatalf("Failed to delete a file - %v", err)
}
}
return erasureDisks
}

View file

@ -103,8 +103,12 @@ func (er erasureObjects) CopyObject(ctx context.Context, srcBucket, srcObject, d
if err != nil {
if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(srcBucket, minioMetaBucket) {
_, derr := er.deleteIfDangling(context.Background(), srcBucket, srcObject, metaArr, errs, nil, srcOpts)
if derr != nil {
err = derr
if derr == nil {
if srcOpts.VersionID != "" {
err = errFileVersionNotFound
} else {
err = errFileNotFound
}
}
}
return ObjectInfo{}, toObjectErr(err, srcBucket, srcObject)
@ -485,85 +489,82 @@ func joinErrs(errs []error) []string {
}
func (er erasureObjects) deleteIfDangling(ctx context.Context, bucket, object string, metaArr []FileInfo, errs []error, dataErrs []error, opts ObjectOptions) (FileInfo, error) {
var err error
m, ok := isObjectDangling(metaArr, errs, dataErrs)
if ok {
tags := make(map[string]interface{}, 4)
tags["set"] = er.setIndex
tags["pool"] = er.poolIndex
tags["merrs"] = joinErrs(errs)
tags["derrs"] = joinErrs(dataErrs)
if m.IsValid() {
tags["size"] = m.Size
tags["mtime"] = m.ModTime.Format(http.TimeFormat)
tags["data"] = m.Erasure.DataBlocks
tags["parity"] = m.Erasure.ParityBlocks
} else {
tags["invalid-meta"] = true
tags["data"] = er.setDriveCount - er.defaultParityCount
tags["parity"] = er.defaultParityCount
}
// count the number of offline disks
offline := 0
for i := 0; i < max(len(errs), len(dataErrs)); i++ {
if i < len(errs) && errors.Is(errs[i], errDiskNotFound) || i < len(dataErrs) && errors.Is(dataErrs[i], errDiskNotFound) {
offline++
}
}
if offline > 0 {
tags["offline"] = offline
}
_, file, line, cok := runtime.Caller(1)
if cok {
tags["caller"] = fmt.Sprintf("%s:%d", file, line)
}
defer auditDanglingObjectDeletion(ctx, bucket, object, m.VersionID, tags)
err = errFileNotFound
if opts.VersionID != "" {
err = errFileVersionNotFound
}
fi := FileInfo{
VersionID: m.VersionID,
}
if opts.VersionID != "" {
fi.VersionID = opts.VersionID
}
fi.SetTierFreeVersionID(mustGetUUID())
disks := er.getDisks()
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
return disks[index].DeleteVersion(ctx, bucket, object, fi, false, DeleteOptions{})
}, index)
}
rmDisks := make(map[string]string, len(disks))
for index, err := range g.Wait() {
var errStr, diskName string
if err != nil {
errStr = err.Error()
} else {
errStr = "<nil>"
}
if disks[index] != nil {
diskName = disks[index].String()
} else {
diskName = fmt.Sprintf("disk-%d", index)
}
rmDisks[diskName] = errStr
}
tags["cleanupResult"] = rmDisks
if !ok {
// We only come here if we cannot figure out if the object
// can be deleted safely, in such a scenario return ReadQuorum error.
return FileInfo{}, errErasureReadQuorum
}
return m, err
tags := make(map[string]interface{}, 4)
tags["set"] = er.setIndex
tags["pool"] = er.poolIndex
tags["merrs"] = joinErrs(errs)
tags["derrs"] = joinErrs(dataErrs)
if m.IsValid() {
tags["size"] = m.Size
tags["mtime"] = m.ModTime.Format(http.TimeFormat)
tags["data"] = m.Erasure.DataBlocks
tags["parity"] = m.Erasure.ParityBlocks
} else {
tags["invalid-meta"] = true
tags["data"] = er.setDriveCount - er.defaultParityCount
tags["parity"] = er.defaultParityCount
}
// count the number of offline disks
offline := 0
for i := 0; i < max(len(errs), len(dataErrs)); i++ {
if i < len(errs) && errors.Is(errs[i], errDiskNotFound) || i < len(dataErrs) && errors.Is(dataErrs[i], errDiskNotFound) {
offline++
}
}
if offline > 0 {
tags["offline"] = offline
}
_, file, line, cok := runtime.Caller(1)
if cok {
tags["caller"] = fmt.Sprintf("%s:%d", file, line)
}
defer auditDanglingObjectDeletion(ctx, bucket, object, m.VersionID, tags)
fi := FileInfo{
VersionID: m.VersionID,
}
if opts.VersionID != "" {
fi.VersionID = opts.VersionID
}
fi.SetTierFreeVersionID(mustGetUUID())
disks := er.getDisks()
g := errgroup.WithNErrs(len(disks))
for index := range disks {
index := index
g.Go(func() error {
if disks[index] == nil {
return errDiskNotFound
}
return disks[index].DeleteVersion(ctx, bucket, object, fi, false, DeleteOptions{})
}, index)
}
rmDisks := make(map[string]string, len(disks))
for index, err := range g.Wait() {
var errStr, diskName string
if err != nil {
errStr = err.Error()
} else {
errStr = "<nil>"
}
if disks[index] != nil {
diskName = disks[index].String()
} else {
diskName = fmt.Sprintf("disk-%d", index)
}
rmDisks[diskName] = errStr
}
tags["cleanupResult"] = rmDisks
return m, nil
}
func fileInfoFromRaw(ri RawFileInfo, bucket, object string, readData, inclFreeVers, allParts bool) (FileInfo, error) {
@ -925,8 +926,12 @@ func (er erasureObjects) getObjectFileInfo(ctx context.Context, bucket, object s
// not we simply ignore it, since we can't tell for sure if its dangling object.
if totalResp == er.setDriveCount && shouldCheckForDangling(err, errs, bucket) {
_, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts)
if derr != nil {
err = derr
if derr == nil {
if opts.VersionID != "" {
err = errFileVersionNotFound
} else {
err = errFileNotFound
}
}
}
return fi, nil, nil, toObjectErr(err, bucket, object)
@ -2141,8 +2146,12 @@ func (er erasureObjects) PutObjectMetadata(ctx context.Context, bucket, object s
if err != nil {
if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) {
_, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts)
if derr != nil {
err = derr
if derr == nil {
if opts.VersionID != "" {
err = errFileVersionNotFound
} else {
err = errFileNotFound
}
}
}
return ObjectInfo{}, toObjectErr(err, bucket, object)
@ -2214,8 +2223,12 @@ func (er erasureObjects) PutObjectTags(ctx context.Context, bucket, object strin
if err != nil {
if errors.Is(err, errErasureReadQuorum) && !strings.HasPrefix(bucket, minioMetaBucket) {
_, derr := er.deleteIfDangling(context.Background(), bucket, object, metaArr, errs, nil, opts)
if derr != nil {
err = derr
if derr == nil {
if opts.VersionID != "" {
err = errFileVersionNotFound
} else {
err = errFileNotFound
}
}
}
return ObjectInfo{}, toObjectErr(err, bucket, object)

View file

@ -554,10 +554,10 @@ func (s *erasureSets) cleanupStaleUploads(ctx context.Context) {
}
type auditObjectOp struct {
Name string `json:"name"`
Pool int `json:"poolId"`
Set int `json:"setId"`
Disks []string `json:"disks"`
Name string `json:"name"`
Pool int `json:"poolId"`
Set int `json:"setId"`
Drives []string `json:"drives"`
}
// Add erasure set information to the current context
@ -567,10 +567,10 @@ func auditObjectErasureSet(ctx context.Context, object string, set *erasureObjec
}
op := auditObjectOp{
Name: decodeDirObject(object),
Pool: set.poolIndex + 1,
Set: set.setIndex + 1,
Disks: set.getEndpointStrings(),
Name: decodeDirObject(object),
Pool: set.poolIndex + 1,
Set: set.setIndex + 1,
Drives: set.getEndpointStrings(),
}
logger.GetReqInfo(ctx).AppendTags("objectLocation", op)

View file

@ -34,7 +34,6 @@ import (
"github.com/minio/minio/internal/bucket/versioning"
"github.com/minio/minio/internal/color"
"github.com/minio/minio/internal/config/storageclass"
xioutil "github.com/minio/minio/internal/ioutil"
"github.com/minio/minio/internal/logger"
"github.com/minio/pkg/v2/console"
"github.com/minio/pkg/v2/wildcard"
@ -141,6 +140,14 @@ func getLocalBackgroundHealStatus(ctx context.Context, o ObjectLayer) (madmin.Bg
return status, true
}
type healEntryResult struct {
bytes uint64
success bool
skipped bool
entryDone bool
name string
}
// healErasureSet lists and heals all objects in a specific erasure set
func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string, tracker *healingTracker) error {
scanMode := madmin.HealNormalScan
@ -187,21 +194,68 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
jt, _ := workers.New(int(numHealers))
healEntryDone := func(name string) healEntryResult {
return healEntryResult{
entryDone: true,
name: name,
}
}
healEntrySuccess := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
success: true,
}
}
healEntryFailure := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
}
}
healEntrySkipped := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
skipped: true,
}
}
// Collect updates to tracker from concurrent healEntry calls
results := make(chan healEntryResult, 1000)
defer close(results)
go func() {
for res := range results {
if res.entryDone {
tracker.setObject(res.name)
if time.Since(tracker.getLastUpdate()) > time.Minute {
healingLogIf(ctx, tracker.update(ctx))
}
continue
}
tracker.updateProgress(res.success, res.skipped, res.bytes)
}
}()
var retErr error
// Heal all buckets with all objects
for _, bucket := range healBuckets {
if tracker.isHealed(bucket) {
continue
}
var forwardTo string
// If we resume to the same bucket, forward to last known item.
if b := tracker.getBucket(); b != "" {
if b == bucket {
forwardTo = tracker.getObject()
} else {
// Reset to where last bucket ended if resuming.
tracker.resume()
}
b := tracker.getBucket()
if b == bucket {
forwardTo = tracker.getObject()
}
if b != "" {
// Reset to where last bucket ended if resuming.
tracker.resume()
}
tracker.setObject("")
tracker.setBucket(bucket)
@ -280,37 +334,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
fallbackDisks := disks[expectedDisks:]
disks = disks[:expectedDisks]
type healEntryResult struct {
bytes uint64
success bool
skipped bool
entryDone bool
name string
}
healEntryDone := func(name string) healEntryResult {
return healEntryResult{
entryDone: true,
name: name,
}
}
healEntrySuccess := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
success: true,
}
}
healEntryFailure := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
}
}
healEntrySkipped := func(sz uint64) healEntryResult {
return healEntryResult{
bytes: sz,
skipped: true,
}
}
filterLifecycle := func(bucket, object string, fi FileInfo) bool {
if lc == nil {
return false
@ -331,22 +354,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
}
}
// Collect updates to tracker from concurrent healEntry calls
results := make(chan healEntryResult, 1000)
go func() {
for res := range results {
if res.entryDone {
tracker.setObject(res.name)
if time.Since(tracker.getLastUpdate()) > time.Minute {
healingLogIf(ctx, tracker.update(ctx))
}
continue
}
tracker.updateProgress(res.success, res.skipped, res.bytes)
}
}()
send := func(result healEntryResult) bool {
select {
case <-ctx.Done():
@ -393,7 +400,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
var result healEntryResult
fivs, err := entry.fileInfoVersions(bucket)
if err != nil {
_, err := er.HealObject(ctx, bucket, encodedEntryName, "",
res, err := er.HealObject(ctx, bucket, encodedEntryName, "",
madmin.HealOpts{
ScanMode: scanMode,
Remove: healDeleteDangling,
@ -407,7 +414,7 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
result = healEntryFailure(0)
healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, entry.name, err))
} else {
result = healEntrySuccess(0)
result = healEntrySuccess(uint64(res.ObjectSize))
}
send(result)
@ -430,11 +437,12 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
continue
}
if _, err := er.HealObject(ctx, bucket, encodedEntryName,
res, err := er.HealObject(ctx, bucket, encodedEntryName,
version.VersionID, madmin.HealOpts{
ScanMode: scanMode,
Remove: healDeleteDangling,
}); err != nil {
})
if err != nil {
if isErrObjectNotFound(err) || isErrVersionNotFound(err) {
// queueing happens across namespace, ignore
// objects that are not found.
@ -449,22 +457,20 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
healingLogIf(ctx, fmt.Errorf("unable to heal object %s/%s: %w", bucket, version.Name, err))
}
} else {
result = healEntrySuccess(uint64(version.Size))
result = healEntrySuccess(uint64(res.ObjectSize))
}
if !send(result) {
return
}
}
// All versions resulted in 'ObjectNotFound/VersionNotFound'
if versionNotFound == len(fivs.Versions) {
return
}
select {
case <-ctx.Done():
return
case results <- healEntryDone(entry.name):
}
send(healEntryDone(entry.name))
// Wait and proceed if there are active requests
waitForLowHTTPReq()
@ -502,7 +508,6 @@ func (er *erasureObjects) healErasureSet(ctx context.Context, buckets []string,
finished: nil,
})
jt.Wait() // synchronize all the concurrent heal jobs
xioutil.SafeClose(results)
if err != nil {
// Set this such that when we return this function
// we let the caller retry this disk again for the

View file

@ -417,6 +417,10 @@ func (s *xlStorage) Healing() *healingTracker {
if err != nil {
return nil
}
if len(b) == 0 {
// 'healing.bin' might be truncated
return nil
}
h := newHealingTracker()
_, err = h.UnmarshalMsg(b)
bugLogIf(GlobalContext, err)

View file

@ -74,7 +74,7 @@ Setting this environment variable automatically enables audit logging to the HTT
NOTE:
- `timeToFirstByte` and `timeToResponse` will be expressed in Nanoseconds.
- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about
- Additionally in the case of the erasure coded setup `tags.objectLocation` provides per object details about
- Pool number the object operation was performed on.
- Set number the object operation was performed on.
- The list of drives participating in this operation belong to the set.
@ -82,8 +82,9 @@ NOTE:
```json
{
"version": "1",
"deploymentid": "51bcc7b9-a447-4251-a940-d9d0aab9af69",
"time": "2021-10-08T00:46:36.801714978Z",
"deploymentid": "90e81272-45d9-4fe8-9c45-c9a7322bf4b5",
"time": "2024-05-09T07:38:10.449688982Z",
"event": "",
"trigger": "incoming",
"api": {
"name": "PutObject",
@ -91,51 +92,52 @@ NOTE:
"object": "hosts",
"status": "OK",
"statusCode": 200,
"rx": 380,
"tx": 476,
"timeToResponse": "257694819ns"
"rx": 401,
"tx": 0,
"timeToResponse": "13309747ns",
"timeToResponseInNS": "13309747"
},
"remotehost": "127.0.0.1",
"requestID": "16ABE7A785E7AC2C",
"userAgent": "MinIO (linux; amd64) minio-go/v7.0.15 mc/DEVELOPMENT.2021-10-06T23-39-34Z",
"requestID": "17CDC1F4D7E69123",
"userAgent": "MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z",
"requestPath": "/testbucket/hosts",
"requestHost": "localhost:9000",
"requestHeader": {
"Authorization": "AWS4-HMAC-SHA256 Credential=minio/20211008/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=4c60a59e5eb3b0a68693c7fee9dbb5a8a509e0717668669194d37bf182fde031",
"Content-Length": "380",
"Accept-Encoding": "zstd,gzip",
"Authorization": "AWS4-HMAC-SHA256 Credential=minioadmin/20240509/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=d4d6862e6cc61011a61fa801da71048ece4f32a0562cad6bb88bdda50d7fcb95",
"Content-Length": "401",
"Content-Type": "application/octet-stream",
"User-Agent": "MinIO (linux; amd64) minio-go/v7.0.15 mc/DEVELOPMENT.2021-10-06T23-39-34Z",
"User-Agent": "MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z",
"X-Amz-Content-Sha256": "STREAMING-AWS4-HMAC-SHA256-PAYLOAD",
"X-Amz-Date": "20211008T004636Z",
"X-Amz-Decoded-Content-Length": "207",
"X-Amz-Server-Side-Encryption": "aws:kms"
"X-Amz-Date": "20240509T073810Z",
"X-Amz-Decoded-Content-Length": "228"
},
"responseHeader": {
"Accept-Ranges": "bytes",
"Content-Length": "0",
"ETag": "4939450d1beec11e10a91ee7700bb593",
"ETag": "9fe7a344ef4227d3e53751e9d88ce41e",
"Server": "MinIO",
"Strict-Transport-Security": "max-age=31536000; includeSubDomains",
"Vary": "Origin,Accept-Encoding",
"X-Amz-Request-Id": "16ABE7A785E7AC2C",
"X-Amz-Server-Side-Encryption": "aws:kms",
"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": "arn:aws:kms:my-minio-key",
"X-Amz-Id-2": "dd9025bab4ad464b049177c95eb6ebf374d3b3fd1af9251148b658df7ac2e3e8",
"X-Amz-Request-Id": "17CDC1F4D7E69123",
"X-Content-Type-Options": "nosniff",
"X-Xss-Protection": "1; mode=block",
"x-amz-version-id": "ac4639f6-c544-4f3f-af1e-b4c0736f67f9"
"X-Xss-Protection": "1; mode=block"
},
"tags": {
"objectErasureMap": {
"hosts": {
"poolId": 1,
"setId": 1,
"drives": [
"/mnt/data1",
"/mnt/data2",
"/mnt/data3",
"/mnt/data4"
]
}
"objectLocation": {
"name": "hosts",
"poolId": 1,
"setId": 1,
"drives": [
"/mnt/data1",
"/mnt/data2",
"/mnt/data3",
"/mnt/data4"
]
}
}
},
"accessKey": "minioadmin"
}
```
@ -176,7 +178,7 @@ On another terminal assuming you have `kafkacat` installed
```
kafkacat -b localhost:29092 -t auditlog -C
{"version":"1","deploymentid":"8a1d8091-b874-45df-b9ea-e044eede6ace","time":"2021-07-13T02:00:47.020547414Z","trigger":"incoming","api":{"name":"ListBuckets","status":"OK","statusCode":200,"timeToFirstByte":"261795ns","timeToResponse":"312490ns"},"remotehost":"127.0.0.1","requestID":"16913736591C237F","userAgent":"MinIO (linux; amd64) minio-go/v7.0.11 mc/DEVELOPMENT.2021-07-09T02-22-26Z","requestHeader":{"Authorization":"AWS4-HMAC-SHA256 Credential=minio/20210713/us-east-1/s3/aws4_request, SignedHeaders=host;x-amz-content-sha256;x-amz-date, Signature=7fe65c5467e05ca21de64094688da43f96f34fec82e8955612827079f4600527","User-Agent":"MinIO (linux; amd64) minio-go/v7.0.11 mc/DEVELOPMENT.2021-07-09T02-22-26Z","X-Amz-Content-Sha256":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","X-Amz-Date":"20210713T020047Z"},"responseHeader":{"Accept-Ranges":"bytes","Content-Length":"547","Content-Security-Policy":"block-all-mixed-content","Content-Type":"application/xml","Server":"MinIO","Vary":"Origin,Accept-Encoding","X-Amz-Request-Id":"16913736591C237F","X-Xss-Protection":"1; mode=block"}}
{"version":"1","deploymentid":"90e81272-45d9-4fe8-9c45-c9a7322bf4b5","time":"2024-05-09T07:38:10.449688982Z","event":"","trigger":"incoming","api":{"name":"PutObject","bucket":"testbucket","object":"hosts","status":"OK","statusCode":200,"rx":401,"tx":0,"timeToResponse":"13309747ns","timeToResponseInNS":"13309747"},"remotehost":"127.0.0.1","requestID":"17CDC1F4D7E69123","userAgent":"MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z","requestPath":"/testbucket/hosts","requestHost":"localhost:9000","requestHeader":{"Accept-Encoding":"zstd,gzip","Authorization":"AWS4-HMAC-SHA256 Credential=minioadmin/20240509/us-east-1/s3/aws4_request,SignedHeaders=host;x-amz-content-sha256;x-amz-date;x-amz-decoded-content-length,Signature=d4d6862e6cc61011a61fa801da71048ece4f32a0562cad6bb88bdda50d7fcb95","Content-Length":"401","Content-Type":"application/octet-stream","User-Agent":"MinIO (linux; amd64) minio-go/v7.0.70 mc/RELEASE.2024-04-30T17-44-48Z","X-Amz-Content-Sha256":"STREAMING-AWS4-HMAC-SHA256-PAYLOAD","X-Amz-Date":"20240509T073810Z","X-Amz-Decoded-Content-Length":"228"},"responseHeader":{"Accept-Ranges":"bytes","Content-Length":"0","ETag":"9fe7a344ef4227d3e53751e9d88ce41e","Server":"MinIO","Strict-Transport-Security":"max-age=31536000; includeSubDomains","Vary":"Origin,Accept-Encoding","X-Amz-Id-2":"dd9025bab4ad464b049177c95eb6ebf374d3b3fd1af9251148b658df7ac2e3e8","X-Amz-Request-Id":"17CDC1F4D7E69123","X-Content-Type-Options":"nosniff","X-Xss-Protection":"1; mode=block"},"tags":{"objectLocation":{"name":"hosts","poolId":1,"setId":1,"drives":["/mnt/data1","/mnt/data2","/mnt/data3","/mnt/data4"]}},"accessKey":"minioadmin"}
```
MinIO also honors environment variable for Kafka target Audit logging as shown below, this setting will override the endpoint settings in the MinIO server config.
@ -215,7 +217,7 @@ Setting this environment variable automatically enables audit logging to the Kaf
NOTE:
- `timeToFirstByte` and `timeToResponse` will be expressed in Nanoseconds.
- Additionally in the case of the erasure coded setup `tags.objectErasureMap` provides per object details about
- Additionally in the case of the erasure coded setup `tags.objectLocation` provides per object details about
- Pool number the object operation was performed on.
- Set number the object operation was performed on.
- The list of drives participating in this operation belong to the set.