diff --git a/cmd/admin-bucket-handlers.go b/cmd/admin-bucket-handlers.go index ac175541a..49ab89443 100644 --- a/cmd/admin-bucket-handlers.go +++ b/cmd/admin-bucket-handlers.go @@ -120,7 +120,7 @@ func (a adminAPIHandlers) GetBucketQuotaConfigHandler(w http.ResponseWriter, r * return } - config, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket) + config, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket) if err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) return diff --git a/cmd/admin-handlers-users.go b/cmd/admin-handlers-users.go index 05ff446a8..c21203ab3 100644 --- a/cmd/admin-handlers-users.go +++ b/cmd/admin-handlers-users.go @@ -1182,8 +1182,8 @@ func (a adminAPIHandlers) AccountInfoHandler(w http.ResponseWriter, r *http.Requ lcfg, _ := globalBucketObjectLockSys.Get(bucket.Name) quota, _ := globalBucketQuotaSys.Get(ctx, bucket.Name) - rcfg, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket.Name) - tcfg, _ := globalBucketMetadataSys.GetTaggingConfig(bucket.Name) + rcfg, _, _ := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket.Name) + tcfg, _, _ := globalBucketMetadataSys.GetTaggingConfig(bucket.Name) acctInfo.Buckets = append(acctInfo.Buckets, madmin.BucketAccessInfo{ Name: bucket.Name, diff --git a/cmd/bucket-encryption-handlers.go b/cmd/bucket-encryption-handlers.go index 6d084b5b1..a8cf59b79 100644 --- a/cmd/bucket-encryption-handlers.go +++ b/cmd/bucket-encryption-handlers.go @@ -158,7 +158,7 @@ func (api objectAPIHandlers) GetBucketEncryptionHandler(w http.ResponseWriter, r return } - config, err := globalBucketMetadataSys.GetSSEConfig(bucket) + config, _, err := globalBucketMetadataSys.GetSSEConfig(bucket) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return diff --git a/cmd/bucket-encryption.go b/cmd/bucket-encryption.go index d1136d16d..f31ebdde9 100644 --- a/cmd/bucket-encryption.go +++ b/cmd/bucket-encryption.go @@ -43,7 +43,8 @@ func (sys *BucketSSEConfigSys) Get(bucket string) (*sse.BucketSSEConfig, error) return nil, BucketSSEConfigNotFound{Bucket: bucket} } - return globalBucketMetadataSys.GetSSEConfig(bucket) + sseCfg, _, err := globalBucketMetadataSys.GetSSEConfig(bucket) + return sseCfg, err } // validateBucketSSEConfig parses bucket encryption configuration and validates if it is supported by MinIO. diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 41df522c3..8d9acf20d 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -1374,7 +1374,7 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri } // Deny object locking configuration settings on existing buckets without object lock enabled. - if _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil { + if _, _, err = globalBucketMetadataSys.GetObjectLockConfig(bucket); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } @@ -1427,7 +1427,7 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri return } - config, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) + config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1529,7 +1529,7 @@ func (api objectAPIHandlers) GetBucketTaggingHandler(w http.ResponseWriter, r *h return } - config, err := globalBucketMetadataSys.GetTaggingConfig(bucket) + config, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1677,7 +1677,7 @@ func (api objectAPIHandlers) GetBucketReplicationConfigHandler(w http.ResponseWr return } - config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) + config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1820,7 +1820,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStartHandler(w http.ResponseW return } - config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) + config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return @@ -1908,7 +1908,7 @@ func (api objectAPIHandlers) ResetBucketReplicationStatusHandler(w http.Response return } - if _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil { + if _, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL) return } diff --git a/cmd/bucket-metadata-sys.go b/cmd/bucket-metadata-sys.go index 82dbea459..c5e72a896 100644 --- a/cmd/bucket-metadata-sys.go +++ b/cmd/bucket-metadata-sys.go @@ -23,6 +23,7 @@ import ( "errors" "fmt" "sync" + "time" "github.com/minio/madmin-go" "github.com/minio/minio-go/v7/pkg/tags" @@ -111,21 +112,26 @@ func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configF switch configFile { case bucketPolicyConfig: meta.PolicyConfigJSON = configData + meta.PolicyConfigUpdatedAt = UTCNow() case bucketNotificationConfig: meta.NotificationConfigXML = configData case bucketLifecycleConfig: meta.LifecycleConfigXML = configData case bucketSSEConfig: meta.EncryptionConfigXML = configData + meta.EncryptionConfigUpdatedAt = UTCNow() case bucketTaggingConfig: meta.TaggingConfigXML = configData + meta.TaggingConfigUpdatedAt = UTCNow() case bucketQuotaConfigFile: meta.QuotaConfigJSON = configData + meta.QuotaConfigUpdatedAt = UTCNow() case objectLockConfig: if !globalIsErasure && !globalIsDistErasure { return NotImplemented{} } meta.ObjectLockConfigXML = configData + meta.ObjectLockConfigUpdatedAt = UTCNow() case bucketVersioningConfig: if !globalIsErasure && !globalIsDistErasure { return NotImplemented{} @@ -136,6 +142,7 @@ func (sys *BucketMetadataSys) Update(ctx context.Context, bucket string, configF return NotImplemented{} } meta.ReplicationConfigXML = configData + meta.ReplicationConfigUpdatedAt = UTCNow() case bucketTargetsFile: meta.BucketTargetsConfigJSON, meta.BucketTargetsConfigMetaJSON, err = encryptBucketMetadata(meta.Name, configData, kms.Context{ bucket: meta.Name, @@ -196,34 +203,34 @@ func (sys *BucketMetadataSys) GetVersioningConfig(bucket string) (*versioning.Ve // GetTaggingConfig returns configured tagging config // The returned object may not be modified. -func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, error) { +func (sys *BucketMetadataSys) GetTaggingConfig(bucket string) (*tags.Tags, time.Time, error) { meta, err := sys.GetConfig(GlobalContext, bucket) if err != nil { if errors.Is(err, errConfigNotFound) { - return nil, BucketTaggingNotFound{Bucket: bucket} + return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket} } - return nil, err + return nil, time.Time{}, err } if meta.taggingConfig == nil { - return nil, BucketTaggingNotFound{Bucket: bucket} + return nil, time.Time{}, BucketTaggingNotFound{Bucket: bucket} } - return meta.taggingConfig, nil + return meta.taggingConfig, meta.TaggingConfigUpdatedAt, nil } // GetObjectLockConfig returns configured object lock config // The returned object may not be modified. -func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, error) { +func (sys *BucketMetadataSys) GetObjectLockConfig(bucket string) (*objectlock.Config, time.Time, error) { meta, err := sys.GetConfig(GlobalContext, bucket) if err != nil { if errors.Is(err, errConfigNotFound) { - return nil, BucketObjectLockConfigNotFound{Bucket: bucket} + return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket} } - return nil, err + return nil, time.Time{}, err } if meta.objectLockConfig == nil { - return nil, BucketObjectLockConfigNotFound{Bucket: bucket} + return nil, time.Time{}, BucketObjectLockConfigNotFound{Bucket: bucket} } - return meta.objectLockConfig, nil + return meta.objectLockConfig, meta.ObjectLockConfigUpdatedAt, nil } // GetLifecycleConfig returns configured lifecycle config @@ -283,18 +290,27 @@ func (sys *BucketMetadataSys) GetNotificationConfig(bucket string) (*event.Confi // GetSSEConfig returns configured SSE config // The returned object may not be modified. -func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, error) { +func (sys *BucketMetadataSys) GetSSEConfig(bucket string) (*bucketsse.BucketSSEConfig, time.Time, error) { meta, err := sys.GetConfig(GlobalContext, bucket) if err != nil { if errors.Is(err, errConfigNotFound) { - return nil, BucketSSEConfigNotFound{Bucket: bucket} + return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket} } - return nil, err + return nil, time.Time{}, err } if meta.sseConfig == nil { - return nil, BucketSSEConfigNotFound{Bucket: bucket} + return nil, time.Time{}, BucketSSEConfigNotFound{Bucket: bucket} } - return meta.sseConfig, nil + return meta.sseConfig, meta.EncryptionConfigUpdatedAt, nil +} + +// CreatedAt returns the time of creation of bucket +func (sys *BucketMetadataSys) CreatedAt(bucket string) (time.Time, error) { + meta, err := sys.GetConfig(GlobalContext, bucket) + if err != nil { + return time.Time{}, err + } + return meta.Created.UTC(), nil } // GetPolicyConfig returns configured bucket policy @@ -323,29 +339,29 @@ func (sys *BucketMetadataSys) GetPolicyConfig(bucket string) (*policy.Policy, er // GetQuotaConfig returns configured bucket quota // The returned object may not be modified. -func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, error) { +func (sys *BucketMetadataSys) GetQuotaConfig(ctx context.Context, bucket string) (*madmin.BucketQuota, time.Time, error) { meta, err := sys.GetConfig(ctx, bucket) if err != nil { - return nil, err + return nil, time.Time{}, err } - return meta.quotaConfig, nil + return meta.quotaConfig, meta.QuotaConfigUpdatedAt, nil } // GetReplicationConfig returns configured bucket replication config // The returned object may not be modified. -func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, error) { +func (sys *BucketMetadataSys) GetReplicationConfig(ctx context.Context, bucket string) (*replication.Config, time.Time, error) { meta, err := sys.GetConfig(ctx, bucket) if err != nil { if errors.Is(err, errConfigNotFound) { - return nil, BucketReplicationConfigNotFound{Bucket: bucket} + return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket} } - return nil, err + return nil, time.Time{}, err } if meta.replicationConfig == nil { - return nil, BucketReplicationConfigNotFound{Bucket: bucket} + return nil, time.Time{}, BucketReplicationConfigNotFound{Bucket: bucket} } - return meta.replicationConfig, nil + return meta.replicationConfig, meta.ReplicationConfigUpdatedAt, nil } // GetBucketTargetsConfig returns configured bucket targets for this bucket diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go index f90c8183d..8837495ff 100644 --- a/cmd/bucket-metadata.go +++ b/cmd/bucket-metadata.go @@ -81,6 +81,12 @@ type BucketMetadata struct { ReplicationConfigXML []byte BucketTargetsConfigJSON []byte BucketTargetsConfigMetaJSON []byte + PolicyConfigUpdatedAt time.Time + ObjectLockConfigUpdatedAt time.Time + EncryptionConfigUpdatedAt time.Time + TaggingConfigUpdatedAt time.Time + QuotaConfigUpdatedAt time.Time + ReplicationConfigUpdatedAt time.Time // Unexported fields. Must be updated atomically. policyConfig *policy.Policy @@ -98,9 +104,10 @@ type BucketMetadata struct { // newBucketMetadata creates BucketMetadata with the supplied name and Created to Now. func newBucketMetadata(name string) BucketMetadata { + now := UTCNow() return BucketMetadata{ Name: name, - Created: UTCNow(), + Created: now, notificationConfig: &event.Config{ XMLNS: "http://s3.amazonaws.com/doc/2006-03-01/", }, @@ -157,8 +164,13 @@ func loadBucketMetadata(ctx context.Context, objectAPI ObjectLayer, bucket strin if err := b.convertLegacyConfigs(ctx, objectAPI); err != nil { return b, err } + // migrate unencrypted remote targets - return b, b.migrateTargetConfig(ctx, objectAPI) + if err = b.migrateTargetConfig(ctx, objectAPI); err != nil { + return b, err + } + b.defaultTimestamps() + return b, nil } // parseAllConfigs will parse all configs and populate the private fields. @@ -347,6 +359,32 @@ func (b *BucketMetadata) convertLegacyConfigs(ctx context.Context, objectAPI Obj return nil } +// default timestamps to metadata Created timestamp if unset. +func (b *BucketMetadata) defaultTimestamps() { + if b.PolicyConfigUpdatedAt.IsZero() { + b.PolicyConfigUpdatedAt = b.Created + } + if b.EncryptionConfigUpdatedAt.IsZero() { + b.EncryptionConfigUpdatedAt = b.Created + } + + if b.TaggingConfigUpdatedAt.IsZero() { + b.TaggingConfigUpdatedAt = b.Created + } + + if b.ObjectLockConfigUpdatedAt.IsZero() { + b.ObjectLockConfigUpdatedAt = b.Created + } + + if b.QuotaConfigUpdatedAt.IsZero() { + b.QuotaConfigUpdatedAt = b.Created + } + + if b.ReplicationConfigUpdatedAt.IsZero() { + b.ReplicationConfigUpdatedAt = b.Created + } +} + // Save config to supplied ObjectLayer api. func (b *BucketMetadata) Save(ctx context.Context, api ObjectLayer) error { if err := b.parseAllConfigs(ctx, api); err != nil { diff --git a/cmd/bucket-metadata_gen.go b/cmd/bucket-metadata_gen.go index fd869aa18..2907e9d38 100644 --- a/cmd/bucket-metadata_gen.go +++ b/cmd/bucket-metadata_gen.go @@ -108,6 +108,42 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) { err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON") return } + case "PolicyConfigUpdatedAt": + z.PolicyConfigUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "PolicyConfigUpdatedAt") + return + } + case "ObjectLockConfigUpdatedAt": + z.ObjectLockConfigUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt") + return + } + case "EncryptionConfigUpdatedAt": + z.EncryptionConfigUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "EncryptionConfigUpdatedAt") + return + } + case "TaggingConfigUpdatedAt": + z.TaggingConfigUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "TaggingConfigUpdatedAt") + return + } + case "QuotaConfigUpdatedAt": + z.QuotaConfigUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "QuotaConfigUpdatedAt") + return + } + case "ReplicationConfigUpdatedAt": + z.ReplicationConfigUpdatedAt, err = dc.ReadTime() + if err != nil { + err = msgp.WrapError(err, "ReplicationConfigUpdatedAt") + return + } default: err = dc.Skip() if err != nil { @@ -121,9 +157,9 @@ func (z *BucketMetadata) DecodeMsg(dc *msgp.Reader) (err error) { // EncodeMsg implements msgp.Encodable func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { - // map header, size 14 + // map header, size 20 // write "Name" - err = en.Append(0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + err = en.Append(0xde, 0x0, 0x14, 0xa4, 0x4e, 0x61, 0x6d, 0x65) if err != nil { return } @@ -262,15 +298,75 @@ func (z *BucketMetadata) EncodeMsg(en *msgp.Writer) (err error) { err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON") return } + // write "PolicyConfigUpdatedAt" + err = en.Append(0xb5, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.PolicyConfigUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "PolicyConfigUpdatedAt") + return + } + // write "ObjectLockConfigUpdatedAt" + err = en.Append(0xb9, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.ObjectLockConfigUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt") + return + } + // write "EncryptionConfigUpdatedAt" + err = en.Append(0xb9, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.EncryptionConfigUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "EncryptionConfigUpdatedAt") + return + } + // write "TaggingConfigUpdatedAt" + err = en.Append(0xb6, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.TaggingConfigUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "TaggingConfigUpdatedAt") + return + } + // write "QuotaConfigUpdatedAt" + err = en.Append(0xb4, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.QuotaConfigUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "QuotaConfigUpdatedAt") + return + } + // write "ReplicationConfigUpdatedAt" + err = en.Append(0xba, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + if err != nil { + return + } + err = en.WriteTime(z.ReplicationConfigUpdatedAt) + if err != nil { + err = msgp.WrapError(err, "ReplicationConfigUpdatedAt") + return + } return } // MarshalMsg implements msgp.Marshaler func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { o = msgp.Require(b, z.Msgsize()) - // map header, size 14 + // map header, size 20 // string "Name" - o = append(o, 0x8e, 0xa4, 0x4e, 0x61, 0x6d, 0x65) + o = append(o, 0xde, 0x0, 0x14, 0xa4, 0x4e, 0x61, 0x6d, 0x65) o = msgp.AppendString(o, z.Name) // string "Created" o = append(o, 0xa7, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64) @@ -311,6 +407,24 @@ func (z *BucketMetadata) MarshalMsg(b []byte) (o []byte, err error) { // string "BucketTargetsConfigMetaJSON" o = append(o, 0xbb, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x4d, 0x65, 0x74, 0x61, 0x4a, 0x53, 0x4f, 0x4e) o = msgp.AppendBytes(o, z.BucketTargetsConfigMetaJSON) + // string "PolicyConfigUpdatedAt" + o = append(o, 0xb5, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.PolicyConfigUpdatedAt) + // string "ObjectLockConfigUpdatedAt" + o = append(o, 0xb9, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x4c, 0x6f, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.ObjectLockConfigUpdatedAt) + // string "EncryptionConfigUpdatedAt" + o = append(o, 0xb9, 0x45, 0x6e, 0x63, 0x72, 0x79, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.EncryptionConfigUpdatedAt) + // string "TaggingConfigUpdatedAt" + o = append(o, 0xb6, 0x54, 0x61, 0x67, 0x67, 0x69, 0x6e, 0x67, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.TaggingConfigUpdatedAt) + // string "QuotaConfigUpdatedAt" + o = append(o, 0xb4, 0x51, 0x75, 0x6f, 0x74, 0x61, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.QuotaConfigUpdatedAt) + // string "ReplicationConfigUpdatedAt" + o = append(o, 0xba, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x64, 0x41, 0x74) + o = msgp.AppendTime(o, z.ReplicationConfigUpdatedAt) return } @@ -416,6 +530,42 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) { err = msgp.WrapError(err, "BucketTargetsConfigMetaJSON") return } + case "PolicyConfigUpdatedAt": + z.PolicyConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "PolicyConfigUpdatedAt") + return + } + case "ObjectLockConfigUpdatedAt": + z.ObjectLockConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ObjectLockConfigUpdatedAt") + return + } + case "EncryptionConfigUpdatedAt": + z.EncryptionConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "EncryptionConfigUpdatedAt") + return + } + case "TaggingConfigUpdatedAt": + z.TaggingConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "TaggingConfigUpdatedAt") + return + } + case "QuotaConfigUpdatedAt": + z.QuotaConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "QuotaConfigUpdatedAt") + return + } + case "ReplicationConfigUpdatedAt": + z.ReplicationConfigUpdatedAt, bts, err = msgp.ReadTimeBytes(bts) + if err != nil { + err = msgp.WrapError(err, "ReplicationConfigUpdatedAt") + return + } default: bts, err = msgp.Skip(bts) if err != nil { @@ -430,6 +580,6 @@ func (z *BucketMetadata) UnmarshalMsg(bts []byte) (o []byte, err error) { // Msgsize returns an upper bound estimate of the number of bytes occupied by the serialized message func (z *BucketMetadata) Msgsize() (s int) { - s = 1 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + s = 3 + 5 + msgp.StringPrefixSize + len(z.Name) + 8 + msgp.TimeSize + 12 + msgp.BoolSize + 17 + msgp.BytesPrefixSize + len(z.PolicyConfigJSON) + 22 + msgp.BytesPrefixSize + len(z.NotificationConfigXML) + 19 + msgp.BytesPrefixSize + len(z.LifecycleConfigXML) + 20 + msgp.BytesPrefixSize + len(z.ObjectLockConfigXML) + 20 + msgp.BytesPrefixSize + len(z.VersioningConfigXML) + 20 + msgp.BytesPrefixSize + len(z.EncryptionConfigXML) + 17 + msgp.BytesPrefixSize + len(z.TaggingConfigXML) + 16 + msgp.BytesPrefixSize + len(z.QuotaConfigJSON) + 21 + msgp.BytesPrefixSize + len(z.ReplicationConfigXML) + 24 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigJSON) + 28 + msgp.BytesPrefixSize + len(z.BucketTargetsConfigMetaJSON) + 22 + msgp.TimeSize + 26 + msgp.TimeSize + 26 + msgp.TimeSize + 23 + msgp.TimeSize + 21 + msgp.TimeSize + 27 + msgp.TimeSize return } diff --git a/cmd/bucket-object-lock.go b/cmd/bucket-object-lock.go index ac7b360ea..28aa8fb20 100644 --- a/cmd/bucket-object-lock.go +++ b/cmd/bucket-object-lock.go @@ -44,7 +44,7 @@ func (sys *BucketObjectLockSys) Get(bucketName string) (r objectlock.Retention, return r, nil } - config, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName) + config, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucketName) if err != nil { if _, ok := err.(BucketObjectLockConfigNotFound); ok { return r, nil diff --git a/cmd/bucket-quota.go b/cmd/bucket-quota.go index 5c03f5f5d..3771532e3 100644 --- a/cmd/bucket-quota.go +++ b/cmd/bucket-quota.go @@ -42,8 +42,8 @@ func (sys *BucketQuotaSys) Get(ctx context.Context, bucketName string) (*madmin. } return &madmin.BucketQuota{}, nil } - - return globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName) + qCfg, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucketName) + return qCfg, err } // NewBucketQuotaSys returns initialized BucketQuotaSys diff --git a/cmd/bucket-replication.go b/cmd/bucket-replication.go index 29e934386..57cd23850 100644 --- a/cmd/bucket-replication.go +++ b/cmd/bucket-replication.go @@ -81,7 +81,8 @@ func getReplicationConfig(ctx context.Context, bucketName string) (rc *replicati return rc, BucketReplicationConfigNotFound{Bucket: bucketName} } - return globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName) + rCfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucketName) + return rCfg, err } // validateReplicationDestination returns error if replication destination bucket missing or not configured diff --git a/cmd/iam-store.go b/cmd/iam-store.go index 233e7ee6e..f88a469b5 100644 --- a/cmd/iam-store.go +++ b/cmd/iam-store.go @@ -127,27 +127,30 @@ func getMappedPolicyPath(name string, userType IAMUserType, isGroup bool) string type UserIdentity struct { Version int `json:"version"` Credentials auth.Credentials `json:"credentials"` + UpdatedAt time.Time `json:"updatedAt,omitempty"` } func newUserIdentity(cred auth.Credentials) UserIdentity { - return UserIdentity{Version: 1, Credentials: cred} + return UserIdentity{Version: 1, Credentials: cred, UpdatedAt: UTCNow()} } // GroupInfo contains info about a group type GroupInfo struct { - Version int `json:"version"` - Status string `json:"status"` - Members []string `json:"members"` + Version int `json:"version"` + Status string `json:"status"` + Members []string `json:"members"` + UpdatedAt time.Time `json:"updatedAt,omitempty"` } func newGroupInfo(members []string) GroupInfo { - return GroupInfo{Version: 1, Status: statusEnabled, Members: members} + return GroupInfo{Version: 1, Status: statusEnabled, Members: members, UpdatedAt: UTCNow()} } // MappedPolicy represents a policy name mapped to a user or group type MappedPolicy struct { - Version int `json:"version"` - Policies string `json:"policy"` + Version int `json:"version"` + Policies string `json:"policy"` + UpdatedAt time.Time `json:"updatedAt,omitempty"` } // converts a mapped policy into a slice of distinct policies @@ -168,7 +171,7 @@ func (mp MappedPolicy) policySet() set.StringSet { } func newMappedPolicy(policy string) MappedPolicy { - return MappedPolicy{Version: 1, Policies: policy} + return MappedPolicy{Version: 1, Policies: policy, UpdatedAt: UTCNow()} } // PolicyDoc represents an IAM policy with some metadata. @@ -314,26 +317,26 @@ func (c *iamCache) removeGroupFromMembershipsMap(group string) { // information in IAM (i.e sys.iam*Map) - this info is stored only in the STS // generated credentials. Thus we skip looking up group memberships, user map, // and group map and check the appropriate policy maps directly. -func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([]string, error) { +func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([]string, time.Time, error) { if isGroup { if mode == MinIOUsersSysType { g, ok := c.iamGroupsMap[name] if !ok { - return nil, errNoSuchGroup + return nil, time.Time{}, errNoSuchGroup } // Group is disabled, so we return no policy - this // ensures the request is denied. if g.Status == statusDisabled { - return nil, nil + return nil, time.Time{}, nil } } - return c.iamGroupPolicyMap[name].toSlice(), nil + return c.iamGroupPolicyMap[name].toSlice(), c.iamGroupPolicyMap[name].UpdatedAt, nil } if name == globalActiveCred.AccessKey { - return []string{"consoleAdmin"}, nil + return []string{"consoleAdmin"}, time.Time{}, nil } // When looking for a user's policies, we also check if the user @@ -342,7 +345,7 @@ func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([] u, ok := c.iamUsersMap[name] if ok { if !u.IsValid() { - return nil, nil + return nil, time.Time{}, nil } parentName = u.ParentUser } @@ -353,7 +356,7 @@ func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([] if parentName == globalActiveCred.AccessKey && u.IsServiceAccount() { // even if this is set, the claims present in the service // accounts apply the final permissions if any. - return []string{"consoleAdmin"}, nil + return []string{"consoleAdmin"}, mp.UpdatedAt, nil } if parentName != "" { mp = c.iamUserPolicyMap[parentName] @@ -373,7 +376,7 @@ func (c *iamCache) policyDBGet(mode UsersSysType, name string, isGroup bool) ([] policies = append(policies, c.iamGroupPolicyMap[group].toSlice()...) } - return policies, nil + return policies, mp.UpdatedAt, nil } // IAMStorageAPI defines an interface for the IAM persistence layer @@ -572,14 +575,14 @@ func (store *IAMStoreSys) PolicyDBGet(name string, isGroup bool, groups ...strin cache := store.rlock() defer store.runlock() - policies, err := cache.policyDBGet(store.getUsersSysType(), name, isGroup) + policies, _, err := cache.policyDBGet(store.getUsersSysType(), name, isGroup) if err != nil { return nil, err } if !isGroup { for _, group := range groups { - ps, err := cache.policyDBGet(store.getUsersSysType(), group, true) + ps, _, err := cache.policyDBGet(store.getUsersSysType(), group, true) if err != nil { return nil, err } @@ -755,7 +758,7 @@ func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc cache := store.rlock() defer store.runlock() - ps, err := cache.policyDBGet(store.getUsersSysType(), group, true) + ps, updatedAt, err := cache.policyDBGet(store.getUsersSysType(), group, true) if err != nil { return gd, err } @@ -764,8 +767,9 @@ func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc if store.getUsersSysType() != MinIOUsersSysType { return madmin.GroupDesc{ - Name: group, - Policy: policy, + Name: group, + Policy: policy, + UpdatedAt: updatedAt, }, nil } @@ -775,10 +779,11 @@ func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc } return madmin.GroupDesc{ - Name: group, - Status: gi.Status, - Members: gi.Members, - Policy: policy, + Name: group, + Status: gi.Status, + Members: gi.Members, + Policy: policy, + UpdatedAt: gi.UpdatedAt, }, nil } @@ -1082,6 +1087,33 @@ func (store *IAMStoreSys) ListPolicies(ctx context.Context, bucketName string) ( return ret, nil } +// ListPolicyDocs - fetches all policy docs from storage and updates cache as well. +// If bucketName is non-empty, returns policy docs matching the bucket. +func (store *IAMStoreSys) ListPolicyDocs(ctx context.Context, bucketName string) (map[string]PolicyDoc, error) { + cache := store.lock() + defer store.unlock() + + m := map[string]PolicyDoc{} + err := store.loadPolicyDocs(ctx, m) + if err != nil { + return nil, err + } + + // Sets default canned policies + setDefaultCannedPolicies(m) + + cache.iamPolicyDocsMap = m + + ret := map[string]PolicyDoc{} + for k, v := range m { + if bucketName == "" || v.Policy.MatchResource(bucketName) { + ret[k] = v + } + } + + return ret, nil +} + // helper function - does not take locks. func filterPolicies(cache *iamCache, policyName string, bucketName string) (string, iampolicy.Policy) { var policies []string @@ -1177,7 +1209,8 @@ func (store *IAMStoreSys) GetUsers() map[string]madmin.UserInfo { } return madmin.AccountDisabled }(), - MemberOf: cache.iamUserGroupMemberships[k].ToSlice(), + MemberOf: cache.iamUserGroupMemberships[k].ToSlice(), + UpdatedAt: cache.iamUserPolicyMap[k].UpdatedAt, } } @@ -1222,6 +1255,7 @@ func (store *IAMStoreSys) GetUserInfo(name string) (u madmin.UserInfo, err error return madmin.UserInfo{ PolicyName: mappedPolicy.Policies, MemberOf: groups, + UpdatedAt: mappedPolicy.UpdatedAt, }, nil } @@ -1242,7 +1276,8 @@ func (store *IAMStoreSys) GetUserInfo(name string) (u madmin.UserInfo, err error } return madmin.AccountDisabled }(), - MemberOf: cache.iamUserGroupMemberships[name].ToSlice(), + MemberOf: cache.iamUserGroupMemberships[name].ToSlice(), + UpdatedAt: cache.iamUserPolicyMap[name].UpdatedAt, }, nil } diff --git a/cmd/iam.go b/cmd/iam.go index cedd90206..f0bea5b46 100644 --- a/cmd/iam.go +++ b/cmd/iam.go @@ -546,6 +546,20 @@ func (sys *IAMSys) ListPolicies(ctx context.Context, bucketName string) (map[str } } +// ListPolicyDocs - lists all canned policy docs. +func (sys *IAMSys) ListPolicyDocs(ctx context.Context, bucketName string) (map[string]PolicyDoc, error) { + if !sys.Initialized() { + return nil, errServerNotInitialized + } + + select { + case <-sys.configLoaded: + return sys.store.ListPolicyDocs(ctx, bucketName) + case <-ctx.Done(): + return nil, ctx.Err() + } +} + // SetPolicy - sets a new named policy. func (sys *IAMSys) SetPolicy(ctx context.Context, policyName string, p iampolicy.Policy) error { if !sys.Initialized() { diff --git a/cmd/site-replication.go b/cmd/site-replication.go index 62695cf93..c5addb40f 100644 --- a/cmd/site-replication.go +++ b/cmd/site-replication.go @@ -191,6 +191,8 @@ func (c *SiteReplicationSys) Init(ctx context.Context, objAPI ObjectLayer) error logger.Info("Cluster replication initialized") } + go c.startHealRoutine(ctx, objAPI) + return err } @@ -825,7 +827,7 @@ func (c *SiteReplicationSys) PeerBucketConfigureReplHandler(ctx context.Context, // Though we do not check if the rule already exists, this is // not a problem as we are always using the same replication // rule ID - if the rule already exists, it is just replaced. - replicationConfigS, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) + replicationConfigS, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) if err != nil { _, ok := err.(BucketReplicationConfigNotFound) if !ok { @@ -1379,7 +1381,7 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context) error { // MinIO does not store bucket location - so we just check if // object locking is enabled. - lockConfig, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) + lockConfig, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) if err != nil { if _, ok := err.(BucketObjectLockConfigNotFound); !ok { return errSRBackendIssue(err) @@ -1422,7 +1424,7 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context) error { } // Replicate bucket tags if present. - tags, err := globalBucketMetadataSys.GetTaggingConfig(bucket) + tags, _, err := globalBucketMetadataSys.GetTaggingConfig(bucket) found = true if _, ok := err.(BucketTaggingNotFound); ok { found = false @@ -1446,7 +1448,7 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context) error { } // Replicate object-lock config if present. - objLockCfg, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) + objLockCfg, _, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) found = true if _, ok := err.(BucketObjectLockConfigNotFound); ok { found = false @@ -1470,7 +1472,7 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context) error { } // Replicate existing bucket bucket encryption settings - sseConfig, err := globalBucketMetadataSys.GetSSEConfig(bucket) + sseConfig, _, err := globalBucketMetadataSys.GetSSEConfig(bucket) found = true if _, ok := err.(BucketSSEConfigNotFound); ok { found = false @@ -1493,7 +1495,7 @@ func (c *SiteReplicationSys) syncToAllPeers(ctx context.Context) error { } } - quotaConfig, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket) + quotaConfig, _, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket) found = true if _, ok := err.(BucketQuotaConfigNotFound); ok { found = false @@ -2002,7 +2004,7 @@ func (c *SiteReplicationSys) RemoveRemoteTargetsForEndpoint(ctx context.Context, } buckets, err := objectAPI.ListBuckets(ctx) for _, b := range buckets { - config, err := globalBucketMetadataSys.GetReplicationConfig(ctx, b.Name) + config, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, b.Name) if err != nil { return err } @@ -2090,16 +2092,11 @@ type srBucketMetaInfo struct { } type srPolicy struct { - policy json.RawMessage + madmin.SRIAMPolicy DeploymentID string } -type srUserPolicyMapping struct { - madmin.SRPolicyMapping - DeploymentID string -} - -type srGroupPolicyMapping struct { +type srPolicyMapping struct { madmin.SRPolicyMapping DeploymentID string } @@ -2116,6 +2113,76 @@ type srGroupDesc struct { // SiteReplicationStatus returns the site replication status across clusters participating in site replication. func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI ObjectLayer, opts madmin.SRStatusOptions) (info madmin.SRStatusInfo, err error) { + sinfo, err := c.siteReplicationStatus(ctx, objAPI, opts) + if err != nil { + return info, err + } + info = madmin.SRStatusInfo{ + Enabled: sinfo.Enabled, + MaxBuckets: sinfo.MaxBuckets, + MaxUsers: sinfo.MaxUsers, + MaxGroups: sinfo.MaxGroups, + MaxPolicies: sinfo.MaxPolicies, + Sites: sinfo.Sites, + StatsSummary: sinfo.StatsSummary, + } + info.BucketStats = make(map[string]map[string]madmin.SRBucketStatsSummary, len(sinfo.Sites)) + info.PolicyStats = make(map[string]map[string]madmin.SRPolicyStatsSummary) + info.UserStats = make(map[string]map[string]madmin.SRUserStatsSummary) + info.GroupStats = make(map[string]map[string]madmin.SRGroupStatsSummary) + numSites := len(info.Sites) + for b, stat := range sinfo.BucketStats { + for dID, st := range stat { + if st.TagMismatch || + st.OLockConfigMismatch || + st.SSEConfigMismatch || + st.PolicyMismatch || + st.ReplicationCfgMismatch || + st.QuotaCfgMismatch || + opts.Entity == madmin.SRBucketEntity { + if _, ok := info.BucketStats[b]; !ok { + info.BucketStats[b] = make(map[string]madmin.SRBucketStatsSummary, numSites) + } + info.BucketStats[b][dID] = st.SRBucketStatsSummary + } + } + } + for u, stat := range sinfo.UserStats { + for dID, st := range stat { + if st.PolicyMismatch || st.UserInfoMismatch || opts.Entity == madmin.SRUserEntity { + if _, ok := info.UserStats[u]; !ok { + info.UserStats[u] = make(map[string]madmin.SRUserStatsSummary, numSites) + } + info.UserStats[u][dID] = st.SRUserStatsSummary + } + } + } + for g, stat := range sinfo.GroupStats { + for dID, st := range stat { + if st.PolicyMismatch || st.GroupDescMismatch || opts.Entity == madmin.SRGroupEntity { + if _, ok := info.GroupStats[g]; !ok { + info.GroupStats[g] = make(map[string]madmin.SRGroupStatsSummary, numSites) + } + info.GroupStats[g][dID] = st.SRGroupStatsSummary + } + } + } + for p, stat := range sinfo.PolicyStats { + for dID, st := range stat { + if st.PolicyMismatch || opts.Entity == madmin.SRPolicyEntity { + if _, ok := info.PolicyStats[p]; !ok { + info.PolicyStats[p] = make(map[string]madmin.SRPolicyStatsSummary, numSites) + } + info.PolicyStats[p][dID] = st.SRPolicyStatsSummary + } + } + } + + return +} + +// siteReplicationStatus returns the site replication status across clusters participating in site replication. +func (c *SiteReplicationSys) siteReplicationStatus(ctx context.Context, objAPI ObjectLayer, opts madmin.SRStatusOptions) (info srStatusInfo, err error) { c.RLock() defer c.RUnlock() if !c.enabled { @@ -2123,19 +2190,19 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O } sris := make([]madmin.SRInfo, len(c.state.Peers)) - idxMap := make(map[string]int, len(c.state.Peers)) + depIdx := make(map[string]int, len(c.state.Peers)) var depIDs []string i := 0 for d := range c.state.Peers { depIDs = append(depIDs, d) - idxMap[d] = i + depIdx[d] = i i++ } metaInfoConcErr := c.concDo( func() error { srInfo, err := c.SiteReplicationMetaInfo(ctx, objAPI, opts) - k := idxMap[globalDeploymentID] + k := depIdx[globalDeploymentID] sris[k] = srInfo return err }, @@ -2145,7 +2212,7 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O return err } srInfo, err := admClient.SRMetaInfo(ctx, opts) - k := idxMap[deploymentID] + k := depIdx[deploymentID] sris[k] = srInfo return err }, @@ -2162,9 +2229,7 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O } var maxBuckets int - depIdxes := make(map[string]int) - for i, sri := range sris { - depIdxes[sri.DeploymentID] = i + for _, sri := range sris { if len(sri.Buckets) > maxBuckets { maxBuckets = len(sri.Buckets) } @@ -2172,142 +2237,167 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O // mapping b/w entity and entity config across sites bucketStats := make(map[string][]srBucketMetaInfo) policyStats := make(map[string][]srPolicy) - userPolicyStats := make(map[string][]srUserPolicyMapping) - groupPolicyStats := make(map[string][]srGroupPolicyMapping) + userPolicyStats := make(map[string][]srPolicyMapping) + groupPolicyStats := make(map[string][]srPolicyMapping) userInfoStats := make(map[string][]srUserInfo) groupDescStats := make(map[string][]srGroupDesc) numSites := len(sris) + allBuckets := set.NewStringSet() // across sites + allUsers := set.NewStringSet() + allUserWPolicies := set.NewStringSet() + allGroups := set.NewStringSet() + allGroupWPolicies := set.NewStringSet() + + allPolicies := set.NewStringSet() for _, sri := range sris { - for b, si := range sri.Buckets { - if _, ok := bucketStats[si.Bucket]; !ok { - bucketStats[b] = make([]srBucketMetaInfo, 0, numSites) - } - bucketStats[b] = append(bucketStats[b], srBucketMetaInfo{SRBucketInfo: si, DeploymentID: sri.DeploymentID}) + for b := range sri.Buckets { + allBuckets.Add(b) } - for pname, policy := range sri.Policies { + for u := range sri.UserInfoMap { + allUsers.Add(u) + } + for g := range sri.GroupDescMap { + allGroups.Add(g) + } + for p := range sri.Policies { + allPolicies.Add(p) + } + for u := range sri.UserPolicies { + allUserWPolicies.Add(u) + } + for g := range sri.GroupPolicies { + allGroupWPolicies.Add(g) + } + } + + for i, sri := range sris { + for b := range allBuckets { + if _, ok := bucketStats[b]; !ok { + bucketStats[b] = make([]srBucketMetaInfo, numSites) + } + si, ok := sri.Buckets[b] + if !ok { + si = madmin.SRBucketInfo{Bucket: b} + } + bucketStats[b][i] = srBucketMetaInfo{SRBucketInfo: si, DeploymentID: sri.DeploymentID} + } + + for pname := range allPolicies { if _, ok := policyStats[pname]; !ok { - policyStats[pname] = make([]srPolicy, 0, numSites) + policyStats[pname] = make([]srPolicy, numSites) } - policyStats[pname] = append(policyStats[pname], srPolicy{policy: policy, DeploymentID: sri.DeploymentID}) + + // if pname is not present in the map, the zero value + // will be returned. + pi := sri.Policies[pname] + policyStats[pname][i] = srPolicy{SRIAMPolicy: pi, DeploymentID: sri.DeploymentID} } - for user, policy := range sri.UserPolicies { + for user := range allUserWPolicies { if _, ok := userPolicyStats[user]; !ok { - userPolicyStats[user] = make([]srUserPolicyMapping, 0, numSites) + userPolicyStats[user] = make([]srPolicyMapping, numSites) } - userPolicyStats[user] = append(userPolicyStats[user], srUserPolicyMapping{SRPolicyMapping: policy, DeploymentID: sri.DeploymentID}) + up := sri.UserPolicies[user] + userPolicyStats[user][i] = srPolicyMapping{SRPolicyMapping: up, DeploymentID: sri.DeploymentID} } - for group, policy := range sri.GroupPolicies { + for group := range allGroupWPolicies { if _, ok := groupPolicyStats[group]; !ok { - groupPolicyStats[group] = make([]srGroupPolicyMapping, 0, numSites) + groupPolicyStats[group] = make([]srPolicyMapping, numSites) } - groupPolicyStats[group] = append(groupPolicyStats[group], srGroupPolicyMapping{SRPolicyMapping: policy, DeploymentID: sri.DeploymentID}) + up := sri.GroupPolicies[group] + groupPolicyStats[group][i] = srPolicyMapping{SRPolicyMapping: up, DeploymentID: sri.DeploymentID} } - for u, ui := range sri.UserInfoMap { + for u := range allUsers { if _, ok := userInfoStats[u]; !ok { - userInfoStats[u] = make([]srUserInfo, 0, numSites) + userInfoStats[u] = make([]srUserInfo, numSites) } - userInfoStats[u] = append(userInfoStats[u], srUserInfo{UserInfo: ui, DeploymentID: sri.DeploymentID}) + ui := sri.UserInfoMap[u] + userInfoStats[u][i] = srUserInfo{UserInfo: ui, DeploymentID: sri.DeploymentID} } - for g, gd := range sri.GroupDescMap { + for g := range allGroups { if _, ok := groupDescStats[g]; !ok { - groupDescStats[g] = make([]srGroupDesc, 0, numSites) + groupDescStats[g] = make([]srGroupDesc, numSites) } - groupDescStats[g] = append(groupDescStats[g], srGroupDesc{GroupDesc: gd, DeploymentID: sri.DeploymentID}) + gd := sri.GroupDescMap[g] + groupDescStats[g][i] = srGroupDesc{GroupDesc: gd, DeploymentID: sri.DeploymentID} } } info.StatsSummary = make(map[string]madmin.SRSiteSummary, len(c.state.Peers)) - info.BucketStats = make(map[string]map[string]madmin.SRBucketStatsSummary) - info.PolicyStats = make(map[string]map[string]madmin.SRPolicyStatsSummary) - info.UserStats = make(map[string]map[string]madmin.SRUserStatsSummary) - info.GroupStats = make(map[string]map[string]madmin.SRGroupStatsSummary) + info.BucketStats = make(map[string]map[string]srBucketStatsSummary) + info.PolicyStats = make(map[string]map[string]srPolicyStatsSummary) + info.UserStats = make(map[string]map[string]srUserStatsSummary) + info.GroupStats = make(map[string]map[string]srGroupStatsSummary) // collect user policy mapping replication status across sites if opts.Users || opts.Entity == madmin.SRUserEntity { for u, pslc := range userPolicyStats { - policySet := set.NewStringSet() + if len(info.UserStats[u]) == 0 { + info.UserStats[u] = make(map[string]srUserStatsSummary) + } + var policyMappings []madmin.SRPolicyMapping uPolicyCount := 0 for _, ps := range pslc { - policyBytes, err := json.Marshal(ps.SRPolicyMapping) - if err != nil { - continue - } + policyMappings = append(policyMappings, ps.SRPolicyMapping) uPolicyCount++ sum := info.StatsSummary[ps.DeploymentID] sum.TotalUserPolicyMappingCount++ info.StatsSummary[ps.DeploymentID] = sum - - if policyStr := string(policyBytes); !policySet.Contains(policyStr) { - policySet.Add(policyStr) - } } - userPolicyMismatch := !isReplicated(uPolicyCount, numSites, policySet) - switch { - case userPolicyMismatch, opts.Entity == madmin.SRUserEntity: - for _, ps := range pslc { - dID := depIdxes[ps.DeploymentID] - _, hasUser := sris[dID].UserPolicies[u] - if len(info.UserStats[u]) == 0 { - info.UserStats[u] = make(map[string]madmin.SRUserStatsSummary) - } - info.UserStats[u][ps.DeploymentID] = madmin.SRUserStatsSummary{ + userPolicyMismatch := !isPolicyMappingReplicated(uPolicyCount, numSites, policyMappings) + for _, ps := range pslc { + dID := depIdx[ps.DeploymentID] + _, hasUser := sris[dID].UserPolicies[u] + info.UserStats[u][ps.DeploymentID] = srUserStatsSummary{ + SRUserStatsSummary: madmin.SRUserStatsSummary{ PolicyMismatch: userPolicyMismatch, HasUser: hasUser, HasPolicyMapping: ps.Policy != "", - } + }, + userPolicy: ps, } - default: - // no mismatch - for _, s := range pslc { - sum := info.StatsSummary[s.DeploymentID] - if !s.IsGroup { + if !userPolicyMismatch || opts.Entity != madmin.SRUserEntity { + sum := info.StatsSummary[ps.DeploymentID] + if !ps.IsGroup { sum.ReplicatedUserPolicyMappings++ } - info.StatsSummary[s.DeploymentID] = sum + info.StatsSummary[ps.DeploymentID] = sum } } } + // collect user info replication status across sites for u, pslc := range userInfoStats { - uiSet := set.NewStringSet() + var uiSlc []madmin.UserInfo userCount := 0 for _, ps := range pslc { - uiBytes, err := json.Marshal(ps.UserInfo) - if err != nil { - continue - } + uiSlc = append(uiSlc, ps.UserInfo) userCount++ sum := info.StatsSummary[ps.DeploymentID] sum.TotalUsersCount++ info.StatsSummary[ps.DeploymentID] = sum - if uiStr := string(uiBytes); !uiSet.Contains(uiStr) { - uiSet.Add(uiStr) - } } - userInfoMismatch := !isReplicated(userCount, numSites, uiSet) - switch { - case userInfoMismatch, opts.Entity == madmin.SRUserEntity: - for _, ps := range pslc { - dID := depIdxes[ps.DeploymentID] - _, hasUser := sris[dID].UserInfoMap[u] - if len(info.UserStats[u]) == 0 { - info.UserStats[u] = make(map[string]madmin.SRUserStatsSummary) - } - umis, ok := info.UserStats[u][ps.DeploymentID] - if !ok { - umis = madmin.SRUserStatsSummary{ - HasUser: hasUser, - } - } - umis.UserInfoMismatch = userInfoMismatch - info.UserStats[u][ps.DeploymentID] = umis + userInfoMismatch := !isUserInfoReplicated(userCount, numSites, uiSlc) + for _, ps := range pslc { + dID := depIdx[ps.DeploymentID] + _, hasUser := sris[dID].UserInfoMap[u] + if len(info.UserStats[u]) == 0 { + info.UserStats[u] = make(map[string]srUserStatsSummary) } - default: - // no mismatch - for _, s := range pslc { - sum := info.StatsSummary[s.DeploymentID] + umis, ok := info.UserStats[u][ps.DeploymentID] + if !ok { + umis = srUserStatsSummary{ + SRUserStatsSummary: madmin.SRUserStatsSummary{ + HasUser: hasUser, + }, + } + } + umis.UserInfoMismatch = userInfoMismatch + umis.userInfo = ps + info.UserStats[u][ps.DeploymentID] = umis + if !userInfoMismatch || opts.Entity != madmin.SRUserEntity { + sum := info.StatsSummary[ps.DeploymentID] sum.ReplicatedUsers++ - info.StatsSummary[s.DeploymentID] = sum + info.StatsSummary[ps.DeploymentID] = sum } } } @@ -2315,90 +2405,73 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O if opts.Groups || opts.Entity == madmin.SRGroupEntity { // collect group policy mapping replication status across sites for g, pslc := range groupPolicyStats { - policySet := set.NewStringSet() + var policyMappings []madmin.SRPolicyMapping gPolicyCount := 0 for _, ps := range pslc { - policyBytes, err := json.Marshal(ps.SRPolicyMapping) - if err != nil { - continue - } + policyMappings = append(policyMappings, ps.SRPolicyMapping) gPolicyCount++ - if policyStr := string(policyBytes); !policySet.Contains(policyStr) { - policySet.Add(policyStr) - } sum := info.StatsSummary[ps.DeploymentID] sum.TotalGroupPolicyMappingCount++ info.StatsSummary[ps.DeploymentID] = sum } - groupPolicyMismatch := !isReplicated(gPolicyCount, numSites, policySet) - - switch { - case groupPolicyMismatch, opts.Entity == madmin.SRGroupEntity: - for _, ps := range pslc { - dID := depIdxes[ps.DeploymentID] - _, hasGroup := sris[dID].GroupPolicies[g] - if len(info.GroupStats[g]) == 0 { - info.GroupStats[g] = make(map[string]madmin.SRGroupStatsSummary) - } - info.GroupStats[g][ps.DeploymentID] = madmin.SRGroupStatsSummary{ + groupPolicyMismatch := !isPolicyMappingReplicated(gPolicyCount, numSites, policyMappings) + if len(info.GroupStats[g]) == 0 { + info.GroupStats[g] = make(map[string]srGroupStatsSummary) + } + for _, ps := range pslc { + dID := depIdx[ps.DeploymentID] + _, hasGroup := sris[dID].GroupPolicies[g] + info.GroupStats[g][ps.DeploymentID] = srGroupStatsSummary{ + SRGroupStatsSummary: madmin.SRGroupStatsSummary{ PolicyMismatch: groupPolicyMismatch, HasGroup: hasGroup, HasPolicyMapping: ps.Policy != "", - } + DeploymentID: ps.DeploymentID, + }, + groupPolicy: ps, } - default: - // no mismatch - for _, s := range pslc { - sum := info.StatsSummary[s.DeploymentID] + if !groupPolicyMismatch && opts.Entity != madmin.SRGroupEntity { + sum := info.StatsSummary[ps.DeploymentID] sum.ReplicatedGroupPolicyMappings++ - info.StatsSummary[s.DeploymentID] = sum + info.StatsSummary[ps.DeploymentID] = sum } + } } // collect group desc replication status across sites for g, pslc := range groupDescStats { - gdSet := set.NewStringSet() + var gds []madmin.GroupDesc groupCount := 0 for _, ps := range pslc { - gdBytes, err := json.Marshal(ps.GroupDesc) - if err != nil { - continue - } groupCount++ sum := info.StatsSummary[ps.DeploymentID] sum.TotalGroupsCount++ info.StatsSummary[ps.DeploymentID] = sum - - if gdStr := string(gdBytes); !gdSet.Contains(gdStr) { - gdSet.Add(gdStr) - } + gds = append(gds, ps.GroupDesc) } - gdMismatch := !isReplicated(groupCount, numSites, gdSet) - switch { - case gdMismatch, opts.Entity == madmin.SRGroupEntity: - for _, ps := range pslc { - dID := depIdxes[ps.DeploymentID] - _, hasGroup := sris[dID].GroupDescMap[g] - if len(info.GroupStats[g]) == 0 { - info.GroupStats[g] = make(map[string]madmin.SRGroupStatsSummary) - } - gmis, ok := info.GroupStats[g][ps.DeploymentID] - if !ok { - gmis = madmin.SRGroupStatsSummary{ - HasGroup: hasGroup, - } - } else { - gmis.GroupDescMismatch = gdMismatch - } - info.GroupStats[g][ps.DeploymentID] = gmis + gdMismatch := !isGroupDescReplicated(groupCount, numSites, gds) + for _, ps := range pslc { + dID := depIdx[ps.DeploymentID] + _, hasGroup := sris[dID].GroupDescMap[g] + if len(info.GroupStats[g]) == 0 { + info.GroupStats[g] = make(map[string]srGroupStatsSummary) } - default: - // no mismatch - for _, s := range pslc { - sum := info.StatsSummary[s.DeploymentID] + gmis, ok := info.GroupStats[g][ps.DeploymentID] + if !ok { + gmis = srGroupStatsSummary{ + SRGroupStatsSummary: madmin.SRGroupStatsSummary{ + HasGroup: hasGroup, + }, + } + } + gmis.GroupDescMismatch = gdMismatch + gmis.groupDesc = ps + info.GroupStats[g][ps.DeploymentID] = gmis + if !gdMismatch && opts.Entity != madmin.SRGroupEntity { + sum := info.StatsSummary[ps.DeploymentID] sum.ReplicatedGroups++ - info.StatsSummary[s.DeploymentID] = sum + info.StatsSummary[ps.DeploymentID] = sum } } } @@ -2409,7 +2482,7 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O var policies []*iampolicy.Policy uPolicyCount := 0 for _, ps := range pslc { - plcy, err := iampolicy.ParseConfig(bytes.NewReader(ps.policy)) + plcy, err := iampolicy.ParseConfig(bytes.NewReader([]byte(ps.SRIAMPolicy.Policy))) if err != nil { continue } @@ -2419,30 +2492,29 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O sum.TotalIAMPoliciesCount++ info.StatsSummary[ps.DeploymentID] = sum } + if len(info.PolicyStats[p]) == 0 { + info.PolicyStats[p] = make(map[string]srPolicyStatsSummary) + } policyMismatch := !isIAMPolicyReplicated(uPolicyCount, numSites, policies) - switch { - case policyMismatch, opts.Entity == madmin.SRPolicyEntity: - for _, ps := range pslc { - dID := depIdxes[ps.DeploymentID] - _, hasPolicy := sris[dID].Policies[p] - if len(info.PolicyStats[p]) == 0 { - info.PolicyStats[p] = make(map[string]madmin.SRPolicyStatsSummary) - } - info.PolicyStats[p][ps.DeploymentID] = madmin.SRPolicyStatsSummary{ + for _, ps := range pslc { + dID := depIdx[ps.DeploymentID] + _, hasPolicy := sris[dID].Policies[p] + info.PolicyStats[p][ps.DeploymentID] = srPolicyStatsSummary{ + SRPolicyStatsSummary: madmin.SRPolicyStatsSummary{ PolicyMismatch: policyMismatch, HasPolicy: hasPolicy, - } + }, + policy: ps, } - default: - // no mismatch - for _, s := range pslc { - sum := info.StatsSummary[s.DeploymentID] + switch { + case policyMismatch, opts.Entity == madmin.SRPolicyEntity: + default: + sum := info.StatsSummary[ps.DeploymentID] if !policyMismatch { sum.ReplicatedIAMPolicies++ } - info.StatsSummary[s.DeploymentID] = sum + info.StatsSummary[ps.DeploymentID] = sum } - } } } @@ -2451,12 +2523,12 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O for b, slc := range bucketStats { tagSet := set.NewStringSet() olockConfigSet := set.NewStringSet() - var policies []*bktpolicy.Policy - var replCfgs []*sreplication.Config - var quotaCfgs []*madmin.BucketQuota + policies := make([]*bktpolicy.Policy, numSites) + replCfgs := make([]*sreplication.Config, numSites) + quotaCfgs := make([]*madmin.BucketQuota, numSites) sseCfgSet := set.NewStringSet() var tagCount, olockCfgCount, sseCfgCount int - for _, s := range slc { + for i, s := range slc { if s.ReplicationConfig != nil { cfgBytes, err := base64.StdEncoding.DecodeString(*s.ReplicationConfig) if err != nil { @@ -2466,7 +2538,7 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O if err != nil { continue } - replCfgs = append(replCfgs, cfg) + replCfgs[i] = cfg } if s.QuotaConfig != nil { cfgBytes, err := base64.StdEncoding.DecodeString(*s.QuotaConfig) @@ -2477,7 +2549,7 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O if err != nil { continue } - quotaCfgs = append(quotaCfgs, cfg) + quotaCfgs[i] = cfg } if s.Tags != nil { tagBytes, err := base64.StdEncoding.DecodeString(*s.Tags) @@ -2494,17 +2566,26 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O if err != nil { continue } - policies = append(policies, plcy) + policies[i] = plcy } if s.ObjectLockConfig != nil { olockCfgCount++ - if !olockConfigSet.Contains(*s.ObjectLockConfig) { - olockConfigSet.Add(*s.ObjectLockConfig) + configData, err := base64.StdEncoding.DecodeString(*s.ObjectLockConfig) + if err != nil { + continue + } + + if !olockConfigSet.Contains(string(configData)) { + olockConfigSet.Add(string(configData)) } } if s.SSEConfig != nil { - if !sseCfgSet.Contains(*s.SSEConfig) { - sseCfgSet.Add(*s.SSEConfig) + configData, err := base64.StdEncoding.DecodeString(*s.SSEConfig) + if err != nil { + continue + } + if !sseCfgSet.Contains(string(configData)) { + sseCfgSet.Add(string(configData)) } sseCfgCount++ } @@ -2537,52 +2618,59 @@ func (c *SiteReplicationSys) SiteReplicationStatus(ctx context.Context, objAPI O policyMismatch := !isBktPolicyReplicated(numSites, policies) replCfgMismatch := !isBktReplCfgReplicated(numSites, replCfgs) quotaCfgMismatch := !isBktQuotaCfgReplicated(numSites, quotaCfgs) - switch { - case tagMismatch, olockCfgMismatch, sseCfgMismatch, policyMismatch, replCfgMismatch, quotaCfgMismatch, opts.Entity == madmin.SRBucketEntity: - info.BucketStats[b] = make(map[string]madmin.SRBucketStatsSummary, numSites) - for i, s := range slc { - dID := depIdxes[s.DeploymentID] - _, hasBucket := sris[dID].Buckets[s.Bucket] - - info.BucketStats[b][s.DeploymentID] = madmin.SRBucketStatsSummary{ - DeploymentID: s.DeploymentID, - HasBucket: hasBucket, - TagMismatch: tagMismatch, - OLockConfigMismatch: olockCfgMismatch, - SSEConfigMismatch: sseCfgMismatch, - PolicyMismatch: policyMismatch, - ReplicationCfgMismatch: replCfgMismatch, - QuotaCfgMismatch: quotaCfgMismatch, - HasReplicationCfg: s.ReplicationConfig != nil, - HasTagsSet: s.Tags != nil, - HasOLockConfigSet: s.ObjectLockConfig != nil, - HasPolicySet: s.Policy != nil, - HasQuotaCfgSet: *quotaCfgs[i] != madmin.BucketQuota{}, - HasSSECfgSet: s.SSEConfig != nil, - } + info.BucketStats[b] = make(map[string]srBucketStatsSummary, numSites) + for i, s := range slc { + dIdx := depIdx[s.DeploymentID] + var hasBucket bool + if bi, ok := sris[dIdx].Buckets[s.Bucket]; ok { + hasBucket = !bi.CreatedAt.Equal(timeSentinel) } - fallthrough - default: - // no mismatch - for _, s := range slc { - sum := info.StatsSummary[s.DeploymentID] - if !olockCfgMismatch && olockCfgCount == numSites { - sum.ReplicatedLockConfig++ - } - if !sseCfgMismatch && sseCfgCount == numSites { - sum.ReplicatedSSEConfig++ - } - if !policyMismatch && len(policies) == numSites { - sum.ReplicatedBucketPolicies++ - } - if !tagMismatch && tagCount == numSites { - sum.ReplicatedTags++ - } - info.StatsSummary[s.DeploymentID] = sum + quotaCfgSet := hasBucket && *quotaCfgs[i] != madmin.BucketQuota{} + ss := madmin.SRBucketStatsSummary{ + DeploymentID: s.DeploymentID, + HasBucket: hasBucket, + TagMismatch: tagMismatch, + OLockConfigMismatch: olockCfgMismatch, + SSEConfigMismatch: sseCfgMismatch, + PolicyMismatch: policyMismatch, + ReplicationCfgMismatch: replCfgMismatch, + QuotaCfgMismatch: quotaCfgMismatch, + HasReplicationCfg: s.ReplicationConfig != nil, + HasTagsSet: s.Tags != nil, + HasOLockConfigSet: s.ObjectLockConfig != nil, + HasPolicySet: s.Policy != nil, + HasQuotaCfgSet: quotaCfgSet, + HasSSECfgSet: s.SSEConfig != nil, } + var m srBucketMetaInfo + if len(bucketStats[s.Bucket]) > dIdx { + m = bucketStats[s.Bucket][dIdx] + } + info.BucketStats[b][s.DeploymentID] = srBucketStatsSummary{ + SRBucketStatsSummary: ss, + meta: m, + } + } + // no mismatch + for _, s := range slc { + sum := info.StatsSummary[s.DeploymentID] + if !olockCfgMismatch && olockCfgCount == numSites { + sum.ReplicatedLockConfig++ + } + if !sseCfgMismatch && sseCfgCount == numSites { + sum.ReplicatedSSEConfig++ + } + if !policyMismatch && len(policies) == numSites { + sum.ReplicatedBucketPolicies++ + } + if !tagMismatch && tagCount == numSites { + sum.ReplicatedTags++ + } + info.StatsSummary[s.DeploymentID] = sum } } } + // maximum buckets users etc seen across sites info.MaxBuckets = len(bucketStats) info.MaxUsers = len(userInfoStats) @@ -2624,8 +2712,74 @@ func isIAMPolicyReplicated(cntReplicated, total int, policies []*iampolicy.Polic return true } +// isPolicyMappingReplicated returns true if count of replicated IAM policy mappings matches total +// number of sites and IAM policy mappings are identical. +func isPolicyMappingReplicated(cntReplicated, total int, policies []madmin.SRPolicyMapping) bool { + if cntReplicated > 0 && cntReplicated != total { + return false + } + // check if policies match between sites + var prev madmin.SRPolicyMapping + for i, p := range policies { + if i == 0 { + prev = p + continue + } + if prev.IsGroup != p.IsGroup || + prev.Policy != p.Policy || + prev.UserOrGroup != p.UserOrGroup { + return false + } + } + return true +} + +func isUserInfoReplicated(cntReplicated, total int, uis []madmin.UserInfo) bool { + if cntReplicated > 0 && cntReplicated != total { + return false + } + // check if policies match between sites + var prev madmin.UserInfo + for i, ui := range uis { + if i == 0 { + prev = ui + continue + } + if !isUserInfoEqual(prev, ui) { + return false + } + } + return true +} + +func isGroupDescReplicated(cntReplicated, total int, gds []madmin.GroupDesc) bool { + if cntReplicated > 0 && cntReplicated != total { + return false + } + // check if policies match between sites + var prev madmin.GroupDesc + for i, gd := range gds { + if i == 0 { + prev = gd + continue + } + if !isGroupDescEqual(prev, gd) { + return false + } + } + return true +} + func isBktQuotaCfgReplicated(total int, quotaCfgs []*madmin.BucketQuota) bool { - if len(quotaCfgs) > 0 && len(quotaCfgs) != total { + numquotaCfgs := 0 + for _, q := range quotaCfgs { + if q == nil { + continue + } + numquotaCfgs++ + } + + if numquotaCfgs > 0 && numquotaCfgs != total { return false } var prev *madmin.BucketQuota @@ -2647,12 +2801,22 @@ func isBktQuotaCfgReplicated(total int, quotaCfgs []*madmin.BucketQuota) bool { // isBktPolicyReplicated returns true if count of replicated bucket policies matches total // number of sites and bucket policies are identical. func isBktPolicyReplicated(total int, policies []*bktpolicy.Policy) bool { - if len(policies) > 0 && len(policies) != total { + numPolicies := 0 + for _, p := range policies { + if p == nil { + continue + } + numPolicies++ + } + if numPolicies > 0 && numPolicies != total { return false } // check if policies match between sites var prev *bktpolicy.Policy for i, p := range policies { + if p == nil { + continue + } if i == 0 { prev = p continue @@ -2667,13 +2831,23 @@ func isBktPolicyReplicated(total int, policies []*bktpolicy.Policy) bool { // isBktReplCfgReplicated returns true if all the sites have same number // of replication rules with all replication features enabled. func isBktReplCfgReplicated(total int, cfgs []*sreplication.Config) bool { - cntReplicated := len(cfgs) - if cntReplicated > 0 && cntReplicated != len(cfgs) { + cntReplicated := 0 + for _, c := range cfgs { + if c == nil { + continue + } + cntReplicated++ + } + + if cntReplicated > 0 && cntReplicated != total { return false } // check if policies match between sites var prev *sreplication.Config for i, c := range cfgs { + if c == nil { + continue + } if i == 0 { prev = c continue @@ -2730,7 +2904,15 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI info.Buckets = make(map[string]madmin.SRBucketInfo, len(buckets)) for _, bucketInfo := range buckets { bucket := bucketInfo.Name - bms := madmin.SRBucketInfo{Bucket: bucket} + createdAt, err := globalBucketMetadataSys.CreatedAt(bucket) + if err != nil { + return info, errSRBackendIssue(err) + } + bms := madmin.SRBucketInfo{ + Bucket: bucket, + CreatedAt: createdAt, + Location: globalSite.Region, + } // Get bucket policy if present. policy, err := globalPolicySys.Get(bucket) found := true @@ -2748,7 +2930,7 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI } // Get bucket tags if present. - tags, err := globalBucketMetadataSys.GetTaggingConfig(bucket) + tags, updatedAt, err := globalBucketMetadataSys.GetTaggingConfig(bucket) found = true if _, ok := err.(BucketTaggingNotFound); ok { found = false @@ -2762,10 +2944,11 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI } tagCfgStr := base64.StdEncoding.EncodeToString(tagBytes) bms.Tags = &tagCfgStr + bms.TagConfigUpdatedAt = updatedAt } // Get object-lock config if present. - objLockCfg, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) + objLockCfg, updatedAt, err := globalBucketMetadataSys.GetObjectLockConfig(bucket) found = true if _, ok := err.(BucketObjectLockConfigNotFound); ok { found = false @@ -2779,10 +2962,11 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI } objLockStr := base64.StdEncoding.EncodeToString(objLockCfgData) bms.ObjectLockConfig = &objLockStr + bms.ObjectLockConfigUpdatedAt = updatedAt } // Get quota config if present - quotaConfig, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket) + quotaConfig, updatedAt, err := globalBucketMetadataSys.GetQuotaConfig(ctx, bucket) found = true if _, ok := err.(BucketQuotaConfigNotFound); ok { found = false @@ -2796,10 +2980,11 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI } quotaConfigStr := base64.StdEncoding.EncodeToString(quotaConfigJSON) bms.QuotaConfig = "aConfigStr + bms.QuotaConfigUpdatedAt = updatedAt } // Get existing bucket bucket encryption settings - sseConfig, err := globalBucketMetadataSys.GetSSEConfig(bucket) + sseConfig, updatedAt, err := globalBucketMetadataSys.GetSSEConfig(bucket) found = true if _, ok := err.(BucketSSEConfigNotFound); ok { found = false @@ -2813,10 +2998,11 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI } sseConfigStr := base64.StdEncoding.EncodeToString(sseConfigData) bms.SSEConfig = &sseConfigStr + bms.SSEConfigUpdatedAt = updatedAt } // Get replication config if present - rcfg, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) + rcfg, updatedAt, err := globalBucketMetadataSys.GetReplicationConfig(ctx, bucket) found = true if _, ok := err.(BucketReplicationConfigNotFound); ok { found = false @@ -2830,31 +3016,32 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI } rcfgXMLStr := base64.StdEncoding.EncodeToString(rcfgXML) bms.ReplicationConfig = &rcfgXMLStr + bms.ReplicationConfigUpdatedAt = updatedAt } info.Buckets[bucket] = bms } } if opts.Policies || opts.Entity == madmin.SRPolicyEntity { - var allPolicies map[string]iampolicy.Policy + var allPolicies map[string]PolicyDoc if opts.Entity == madmin.SRPolicyEntity { - if p, err := globalIAMSys.store.GetPolicy(opts.EntityValue); err == nil { - allPolicies = map[string]iampolicy.Policy{opts.EntityValue: p} + if p, err := globalIAMSys.store.GetPolicyDoc(opts.EntityValue); err == nil { + allPolicies = map[string]PolicyDoc{opts.EntityValue: p} } } else { // Replicate IAM policies on local to all peers. - allPolicies, err = globalIAMSys.ListPolicies(ctx, "") + allPolicies, err = globalIAMSys.ListPolicyDocs(ctx, "") if err != nil { return info, errSRBackendIssue(err) } } - info.Policies = make(map[string]json.RawMessage, len(allPolicies)) - for pname, policy := range allPolicies { - policyJSON, err := json.Marshal(policy) + info.Policies = make(map[string]madmin.SRIAMPolicy, len(allPolicies)) + for pname, policyDoc := range allPolicies { + policyJSON, err := json.Marshal(policyDoc.Policy) if err != nil { return info, wrapSRErr(err) } - info.Policies[pname] = json.RawMessage(policyJSON) + info.Policies[pname] = madmin.SRIAMPolicy{Policy: json.RawMessage(policyJSON), UpdatedAt: policyDoc.UpdateDate} } } @@ -2875,13 +3062,13 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI } globalIAMSys.store.runlock() } - info.UserPolicies = make(map[string]madmin.SRPolicyMapping, len(userPolicyMap)) for user, mp := range userPolicyMap { info.UserPolicies[user] = madmin.SRPolicyMapping{ IsGroup: false, UserOrGroup: user, Policy: mp.Policies, + UpdatedAt: mp.UpdatedAt, } } info.UserInfoMap = make(map[string]madmin.UserInfo) @@ -2924,6 +3111,7 @@ func (c *SiteReplicationSys) SiteReplicationMetaInfo(ctx context.Context, objAPI IsGroup: true, UserOrGroup: group, Policy: mp.Policies, + UpdatedAt: mp.UpdatedAt, } } info.GroupDescMap = make(map[string]madmin.GroupDesc) @@ -3058,3 +3246,971 @@ func (c *SiteReplicationSys) PeerEditReq(ctx context.Context, arg madmin.PeerInf } return nil } + +const siteHealTimeInterval = 10 * time.Second + +func (c *SiteReplicationSys) startHealRoutine(ctx context.Context, objAPI ObjectLayer) { + healTimer := time.NewTimer(siteHealTimeInterval) + defer healTimer.Stop() + + for { + select { + case <-healTimer.C: + healTimer.Reset(siteHealTimeInterval) + + c.RLock() + enabled := c.enabled + c.RUnlock() + if !enabled { + continue + } + + c.healIAMSystem(ctx, objAPI) // heal IAM system first + c.healBuckets(ctx, objAPI) // heal buckets subsequently + case <-ctx.Done(): + return + } + } +} + +type srBucketStatsSummary struct { + madmin.SRBucketStatsSummary + meta srBucketMetaInfo +} + +type srPolicyStatsSummary struct { + madmin.SRPolicyStatsSummary + policy srPolicy +} + +type srUserStatsSummary struct { + madmin.SRUserStatsSummary + userInfo srUserInfo + userPolicy srPolicyMapping +} + +type srGroupStatsSummary struct { + madmin.SRGroupStatsSummary + groupDesc srGroupDesc + groupPolicy srPolicyMapping +} + +type srStatusInfo struct { + // SRStatusInfo returns detailed status on site replication status + Enabled bool + MaxBuckets int // maximum buckets seen across sites + MaxUsers int // maximum users seen across sites + MaxGroups int // maximum groups seen across sites + MaxPolicies int // maximum policies across sites + Sites map[string]madmin.PeerInfo // deployment->sitename + StatsSummary map[string]madmin.SRSiteSummary // map of deployment id -> site stat + // BucketStats map of bucket to slice of deployment IDs with stats. This is populated only if there are + // mismatches or if a specific bucket's stats are requested + BucketStats map[string]map[string]srBucketStatsSummary + // PolicyStats map of policy to slice of deployment IDs with stats. This is populated only if there are + // mismatches or if a specific bucket's stats are requested + PolicyStats map[string]map[string]srPolicyStatsSummary + // UserStats map of user to slice of deployment IDs with stats. This is populated only if there are + // mismatches or if a specific bucket's stats are requested + UserStats map[string]map[string]srUserStatsSummary + // GroupStats map of group to slice of deployment IDs with stats. This is populated only if there are + // mismatches or if a specific bucket's stats are requested + GroupStats map[string]map[string]srGroupStatsSummary +} + +func (c *SiteReplicationSys) healBuckets(ctx context.Context, objAPI ObjectLayer) error { + info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{ + Buckets: true, + }) + if err != nil { + return err + } + + for bucket := range info.BucketStats { + c.healCreateMissingBucket(ctx, objAPI, bucket, info) + c.healOLockConfigMetadata(ctx, objAPI, bucket, info) + c.healSSEMetadata(ctx, objAPI, bucket, info) + c.healBucketReplicationConfig(ctx, objAPI, bucket, info) + c.healBucketPolicies(ctx, objAPI, bucket, info) + c.healTagMetadata(ctx, objAPI, bucket, info) + c.healBucketQuotaConfig(ctx, objAPI, bucket, info) + // Notification and ILM are site specific settings. + } + return nil +} + +func (c *SiteReplicationSys) healTagMetadata(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { + bs := info.BucketStats[bucket] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestTaggingConfig *string + ) + + for dID, ss := range bs { + if lastUpdate.IsZero() { + lastUpdate = ss.meta.TagConfigUpdatedAt + latestID = dID + latestTaggingConfig = ss.meta.Tags + } + // avoid considering just created buckets as latest. Perhaps this site + // just joined cluster replication and yet to be sync'd + if ss.meta.CreatedAt.Equal(ss.meta.TagConfigUpdatedAt) { + continue + } + if ss.meta.TagConfigUpdatedAt.After(lastUpdate) { + lastUpdate = ss.meta.TagConfigUpdatedAt + latestID = dID + latestTaggingConfig = ss.meta.Tags + } + } + latestPeerName = info.Sites[latestID].Name + var latestTaggingConfigBytes []byte + var err error + if latestTaggingConfig != nil { + latestTaggingConfigBytes, err = base64.StdEncoding.DecodeString(*latestTaggingConfig) + if err != nil { + return err + } + } + for dID, bStatus := range bs { + if !bStatus.TagMismatch { + continue + } + if isBucketMetadataEqual(latestTaggingConfig, bStatus.meta.Tags) { + continue + } + if dID == globalDeploymentID { + if err := globalBucketMetadataSys.Update(ctx, bucket, bucketTaggingConfig, latestTaggingConfigBytes); err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing tagging metadata from peer site %s : %w", latestPeerName, err)) + } + continue + } + + admClient, err := c.getAdminClient(ctx, dID) + if err != nil { + return wrapSRErr(err) + } + peerName := info.Sites[dID].Name + err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypeTags, + Bucket: bucket, + Tags: latestTaggingConfig, + }) + if err != nil { + logger.LogIf(ctx, c.annotatePeerErr(peerName, "SRPeerReplicateBucketMeta", fmt.Errorf("Error healing tagging metadata for peer %s from peer %s : %w", peerName, latestPeerName, err))) + } + } + return nil +} + +func (c *SiteReplicationSys) healBucketPolicies(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { + bs := info.BucketStats[bucket] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestIAMPolicy json.RawMessage + ) + + for dID, ss := range bs { + if lastUpdate.IsZero() { + lastUpdate = ss.meta.PolicyUpdatedAt + latestID = dID + latestIAMPolicy = ss.meta.Policy + } + // avoid considering just created buckets as latest. Perhaps this site + // just joined cluster replication and yet to be sync'd + if ss.meta.CreatedAt.Equal(ss.meta.PolicyUpdatedAt) { + continue + } + if ss.meta.PolicyUpdatedAt.After(lastUpdate) { + lastUpdate = ss.meta.PolicyUpdatedAt + latestID = dID + latestIAMPolicy = ss.meta.Policy + } + } + latestPeerName = info.Sites[latestID].Name + for dID, bStatus := range bs { + if !bStatus.PolicyMismatch { + continue + } + if strings.EqualFold(string(latestIAMPolicy), string(bStatus.meta.Policy)) { + continue + } + if dID == globalDeploymentID { + if err := globalBucketMetadataSys.Update(ctx, bucket, bucketPolicyConfig, latestIAMPolicy); err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing bucket policy metadata from peer site %s : %w", latestPeerName, err)) + } + continue + } + + admClient, err := c.getAdminClient(ctx, dID) + if err != nil { + return wrapSRErr(err) + } + peerName := info.Sites[dID].Name + if err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypePolicy, + Bucket: bucket, + Policy: latestIAMPolicy, + }); err != nil { + logger.LogIf(ctx, c.annotatePeerErr(peerName, "SRPeerReplicateBucketMeta", fmt.Errorf("Error healing bucket policy metadata for peer %s from peer %s : %w", + peerName, latestPeerName, err))) + } + } + return nil +} + +func (c *SiteReplicationSys) healBucketQuotaConfig(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { + bs := info.BucketStats[bucket] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestQuotaConfig *string + latestCfgJSON []byte + latestQuotaConfigBytes []byte + ) + + for dID, ss := range bs { + if lastUpdate.IsZero() { + lastUpdate = ss.meta.QuotaConfigUpdatedAt + latestID = dID + latestQuotaConfig = ss.meta.QuotaConfig + } + // avoid considering just created buckets as latest. Perhaps this site + // just joined cluster replication and yet to be sync'd + if ss.meta.CreatedAt.Equal(ss.meta.QuotaConfigUpdatedAt) { + continue + } + if ss.meta.QuotaConfigUpdatedAt.After(lastUpdate) { + lastUpdate = ss.meta.QuotaConfigUpdatedAt + latestID = dID + latestQuotaConfig = ss.meta.QuotaConfig + } + } + + var err error + if latestQuotaConfig != nil { + latestQuotaConfigBytes, err = base64.StdEncoding.DecodeString(*latestQuotaConfig) + if err != nil { + return err + } + } + + latestPeerName = info.Sites[latestID].Name + for dID, bStatus := range bs { + if !bStatus.QuotaCfgMismatch { + continue + } + if isBucketMetadataEqual(latestQuotaConfig, bStatus.meta.QuotaConfig) { + continue + } + if dID == globalDeploymentID { + if err := globalBucketMetadataSys.Update(ctx, bucket, bucketQuotaConfigFile, latestQuotaConfigBytes); err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing quota metadata from peer site %s : %s", latestPeerName, err.Error())) + } + continue + } + + admClient, err := c.getAdminClient(ctx, dID) + if err != nil { + return wrapSRErr(err) + } + peerName := info.Sites[dID].Name + + if err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypeQuotaConfig, + Bucket: bucket, + Quota: latestCfgJSON, + }); err != nil { + logger.LogIf(ctx, c.annotatePeerErr(peerName, "SRPeerReplicateBucketMeta", fmt.Errorf("Error healing quota config metadata for peer %s from peer %s : %s", + peerName, latestPeerName, err.Error()))) + } + } + return nil +} + +func (c *SiteReplicationSys) healSSEMetadata(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestSSEConfig *string + ) + + bs := info.BucketStats[bucket] + for dID, ss := range bs { + if lastUpdate.IsZero() { + lastUpdate = ss.meta.SSEConfigUpdatedAt + latestID = dID + latestSSEConfig = ss.meta.SSEConfig + } + // avoid considering just created buckets as latest. Perhaps this site + // just joined cluster replication and yet to be sync'd + if ss.meta.CreatedAt.Equal(ss.meta.SSEConfigUpdatedAt) { + continue + } + if ss.meta.SSEConfigUpdatedAt.After(lastUpdate) { + lastUpdate = ss.meta.SSEConfigUpdatedAt + latestID = dID + latestSSEConfig = ss.meta.SSEConfig + } + } + + latestPeerName = info.Sites[latestID].Name + var latestSSEConfigBytes []byte + var err error + if latestSSEConfig != nil { + latestSSEConfigBytes, err = base64.StdEncoding.DecodeString(*latestSSEConfig) + if err != nil { + return err + } + } + + for dID, bStatus := range bs { + if !bStatus.SSEConfigMismatch { + continue + } + if isBucketMetadataEqual(latestSSEConfig, bStatus.meta.SSEConfig) { + continue + } + if dID == globalDeploymentID { + if err := globalBucketMetadataSys.Update(ctx, bucket, bucketSSEConfig, latestSSEConfigBytes); err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing sse metadata from peer site %s : %w", latestPeerName, err)) + } + continue + } + + admClient, err := c.getAdminClient(ctx, dID) + if err != nil { + return wrapSRErr(err) + } + peerName := info.Sites[dID].Name + err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypeSSEConfig, + Bucket: bucket, + SSEConfig: latestSSEConfig, + }) + if err != nil { + logger.LogIf(ctx, c.annotatePeerErr(peerName, "SRPeerReplicateBucketMeta", fmt.Errorf("Error healing SSE config metadata for peer %s from peer %s : %s", + peerName, latestPeerName, err.Error()))) + } + } + return nil +} + +func (c *SiteReplicationSys) healOLockConfigMetadata(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { + bs := info.BucketStats[bucket] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestObjLockConfig *string + ) + + for dID, ss := range bs { + if lastUpdate.IsZero() { + lastUpdate = ss.meta.ObjectLockConfigUpdatedAt + latestID = dID + latestObjLockConfig = ss.meta.ObjectLockConfig + } + // avoid considering just created buckets as latest. Perhaps this site + // just joined cluster replication and yet to be sync'd + if ss.meta.CreatedAt.Equal(ss.meta.ObjectLockConfigUpdatedAt) { + continue + } + if ss.meta.ObjectLockConfig != nil && ss.meta.ObjectLockConfigUpdatedAt.After(lastUpdate) { + lastUpdate = ss.meta.ObjectLockConfigUpdatedAt + latestID = dID + latestObjLockConfig = ss.meta.ObjectLockConfig + } + } + latestPeerName = info.Sites[latestID].Name + var latestObjLockConfigBytes []byte + var err error + if latestObjLockConfig != nil { + latestObjLockConfigBytes, err = base64.StdEncoding.DecodeString(*latestObjLockConfig) + if err != nil { + return err + } + } + + for dID, bStatus := range bs { + if !bStatus.OLockConfigMismatch { + continue + } + if isBucketMetadataEqual(latestObjLockConfig, bStatus.meta.ObjectLockConfig) { + continue + } + if dID == globalDeploymentID { + if err := globalBucketMetadataSys.Update(ctx, bucket, objectLockConfig, latestObjLockConfigBytes); err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing objectlock config metadata from peer site %s : %w", latestPeerName, err)) + } + continue + } + + admClient, err := c.getAdminClient(ctx, dID) + if err != nil { + return wrapSRErr(err) + } + peerName := info.Sites[dID].Name + err = admClient.SRPeerReplicateBucketMeta(ctx, madmin.SRBucketMeta{ + Type: madmin.SRBucketMetaTypeObjectLockConfig, + Bucket: bucket, + Tags: latestObjLockConfig, + }) + if err != nil { + logger.LogIf(ctx, c.annotatePeerErr(peerName, "SRPeerReplicateBucketMeta", fmt.Errorf("Error healing object lock config metadata for peer %s from peer %s : %w", + peerName, latestPeerName, err))) + } + } + return nil +} + +func (c *SiteReplicationSys) healCreateMissingBucket(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { + bs := info.BucketStats[bucket] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + + bucketCnt := 0 + var ( + latestID string + lastUpdate time.Time + ) + + var dIDs []string + for dID, ss := range bs { + if ss.HasBucket { + bucketCnt++ + } else { + dIDs = append(dIDs, dID) + } + if lastUpdate.IsZero() { + lastUpdate = ss.meta.CreatedAt + latestID = dID + } + if ss.meta.CreatedAt.After(lastUpdate) { + lastUpdate = ss.meta.CreatedAt + latestID = dID + } + } + latestPeerName := info.Sites[latestID].Name + bStatus := info.BucketStats[bucket][latestID].meta + var opts BucketOptions + optsMap := make(map[string]string) + if bStatus.Location != "" { + optsMap["location"] = bStatus.Location + opts.Location = bStatus.Location + } + + optsMap["versioningEnabled"] = "true" + opts.VersioningEnabled = true + if bStatus.ObjectLockConfig != nil { + config, err := base64.StdEncoding.DecodeString(*bStatus.ObjectLockConfig) + if err != nil { + return err + } + if bytes.Equal([]byte(string(config)), enabledBucketObjectLockConfig) { + optsMap["lockEnabled"] = "true" + opts.LockEnabled = true + } + } + for _, dID := range dIDs { + peerName := info.Sites[dID].Name + if dID == globalDeploymentID { + err := c.PeerBucketMakeWithVersioningHandler(ctx, bucket, opts) + if err != nil { + logger.LogIf(ctx, c.annotateErr("MakeWithVersioning", fmt.Errorf("error healing bucket for site replication %w from %s -> %s", + err, latestPeerName, peerName))) + return err + } + } else { + admClient, err := c.getAdminClient(ctx, dID) + if err != nil { + logger.LogIf(ctx, c.annotateErr("ConfigureRepl", fmt.Errorf("admin client not found: %w", err))) + return err + } + err = admClient.SRPeerBucketOps(ctx, bucket, madmin.MakeWithVersioningBktOp, optsMap) + logger.LogIf(ctx, c.annotatePeerErr(peerName, "MakeWithVersioning", err)) + if err != nil { + return err + } + err = admClient.SRPeerBucketOps(ctx, bucket, madmin.ConfigureReplBktOp, nil) + logger.LogIf(ctx, c.annotatePeerErr(peerName, "ConfigureRepl", err)) + if err != nil { + return err + } + } + } + if len(dIDs) > 0 { + // configure replication from current cluster to other clusters + err := c.PeerBucketConfigureReplHandler(ctx, bucket) + if err != nil { + logger.LogIf(ctx, c.annotateErr("ConfigureRepl", err)) + return err + } + } + + return nil +} + +func (c *SiteReplicationSys) healBucketReplicationConfig(ctx context.Context, objAPI ObjectLayer, bucket string, info srStatusInfo) error { + bs := info.BucketStats[bucket] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + + var replMismatch bool + for _, ss := range bs { + if ss.ReplicationCfgMismatch { + replMismatch = true + break + } + } + if replMismatch { + err := c.PeerBucketConfigureReplHandler(ctx, bucket) + if err != nil { + logger.LogIf(ctx, c.annotateErr("ConfigureRepl", err)) + } + } + return nil +} + +func isBucketMetadataEqual(one, two *string) bool { + switch { + case one == nil && two == nil: + return true + case one == nil || two == nil: + return false + default: + return strings.EqualFold(*one, *two) + } +} + +func (c *SiteReplicationSys) healIAMSystem(ctx context.Context, objAPI ObjectLayer) error { + info, err := c.siteReplicationStatus(ctx, objAPI, madmin.SRStatusOptions{ + Users: true, + Policies: true, + Groups: true, + }) + if err != nil { + return err + } + for policy := range info.PolicyStats { + c.healPolicies(ctx, objAPI, policy, info) + } + + for user := range info.UserStats { + c.healUsers(ctx, objAPI, user, info) + } + for group := range info.GroupStats { + c.healGroups(ctx, objAPI, group, info) + } + for user := range info.UserStats { + c.healUserPolicies(ctx, objAPI, user, info, false) + } + for group := range info.GroupStats { + c.healGroupPolicies(ctx, objAPI, group, info, false) + } + for user := range info.UserStats { + c.healUserPolicies(ctx, objAPI, user, info, true) + } + for group := range info.GroupStats { + c.healGroupPolicies(ctx, objAPI, group, info, true) + } + + return nil +} + +// heal iam policies present on this site to peers, provided current cluster has the most recent update. +func (c *SiteReplicationSys) healPolicies(ctx context.Context, objAPI ObjectLayer, policy string, info srStatusInfo) error { + // create IAM policy on peer cluster if missing + ps := info.PolicyStats[policy] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestPolicyStat srPolicyStatsSummary + ) + for dID, ss := range ps { + if lastUpdate.IsZero() { + lastUpdate = ss.policy.UpdatedAt + latestID = dID + latestPolicyStat = ss + } + if !ss.policy.UpdatedAt.IsZero() && ss.policy.UpdatedAt.After(lastUpdate) { + lastUpdate = ss.policy.UpdatedAt + latestID = dID + latestPolicyStat = ss + } + } + if latestID != globalDeploymentID { + // heal only from the site with latest info. + return nil + } + latestPeerName = info.Sites[latestID].Name + // heal policy of peers if peer does not have it. + for dID, pStatus := range ps { + if dID == globalDeploymentID { + continue + } + if !pStatus.PolicyMismatch && pStatus.HasPolicy { + continue + } + peerName := info.Sites[dID].Name + err := c.IAMChangeHook(ctx, madmin.SRIAMItem{ + Type: madmin.SRIAMItemPolicy, + Name: policy, + Policy: latestPolicyStat.policy.Policy, + }) + if err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing IAM policy %s from peer site %s -> site %s : %w", policy, latestPeerName, peerName, err)) + } + } + return nil +} + +// heal user policy mappings present on this site to peers, provided current cluster has the most recent update. +func (c *SiteReplicationSys) healUserPolicies(ctx context.Context, objAPI ObjectLayer, user string, info srStatusInfo, svcAcct bool) error { + // create user policy mapping on peer cluster if missing + us := info.UserStats[user] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestUserStat srUserStatsSummary + ) + for dID, ss := range us { + if lastUpdate.IsZero() { + lastUpdate = ss.userPolicy.UpdatedAt + latestID = dID + latestUserStat = ss + } + if !ss.userPolicy.UpdatedAt.IsZero() && ss.userPolicy.UpdatedAt.After(lastUpdate) { + lastUpdate = ss.userPolicy.UpdatedAt + latestID = dID + latestUserStat = ss + } + } + if latestID != globalDeploymentID { + // heal only from the site with latest info. + return nil + } + latestPeerName = info.Sites[latestID].Name + // heal policy of peers if peer does not have it. + for dID, pStatus := range us { + if dID == globalDeploymentID { + continue + } + if !pStatus.PolicyMismatch && pStatus.HasPolicyMapping { + continue + } + if isPolicyMappingEqual(pStatus.userPolicy, latestUserStat.userPolicy) { + continue + } + peerName := info.Sites[dID].Name + err := c.IAMChangeHook(ctx, madmin.SRIAMItem{ + Type: madmin.SRIAMItemPolicyMapping, + PolicyMapping: &madmin.SRPolicyMapping{ + UserOrGroup: user, + IsGroup: false, + Policy: latestUserStat.userPolicy.Policy, + }, + }) + if err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing IAM user policy mapping for %s from peer site %s -> site %s : %w", user, latestPeerName, peerName, err)) + } + } + return nil +} + +// heal group policy mappings present on this site to peers, provided current cluster has the most recent update. +func (c *SiteReplicationSys) healGroupPolicies(ctx context.Context, objAPI ObjectLayer, group string, info srStatusInfo, svcAcct bool) error { + // create group policy mapping on peer cluster if missing + gs := info.GroupStats[group] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestGroupStat srGroupStatsSummary + ) + for dID, ss := range gs { + if lastUpdate.IsZero() { + lastUpdate = ss.groupPolicy.UpdatedAt + latestID = dID + latestGroupStat = ss + } + if !ss.groupPolicy.UpdatedAt.IsZero() && ss.groupPolicy.UpdatedAt.After(lastUpdate) { + lastUpdate = ss.groupPolicy.UpdatedAt + latestID = dID + latestGroupStat = ss + } + } + if latestID != globalDeploymentID { + // heal only from the site with latest info. + return nil + } + latestPeerName = info.Sites[latestID].Name + // heal policy of peers if peer does not have it. + for dID, pStatus := range gs { + if dID == globalDeploymentID { + continue + } + if !pStatus.PolicyMismatch && pStatus.HasPolicyMapping { + continue + } + if isPolicyMappingEqual(pStatus.groupPolicy, latestGroupStat.groupPolicy) { + continue + } + peerName := info.Sites[dID].Name + + err := c.IAMChangeHook(ctx, madmin.SRIAMItem{ + Type: madmin.SRIAMItemPolicyMapping, + PolicyMapping: &madmin.SRPolicyMapping{ + UserOrGroup: group, + IsGroup: true, + Policy: latestGroupStat.groupPolicy.Policy, + }, + }) + if err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing IAM group policy mapping for %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) + } + } + return nil +} + +// heal user accounts of local users that are present on this site, provided current cluster has the most recent update. +func (c *SiteReplicationSys) healUsers(ctx context.Context, objAPI ObjectLayer, user string, info srStatusInfo) error { + // create user if missing; fix user policy mapping if missing + us := info.UserStats[user] + + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestUserStat srUserStatsSummary + ) + for dID, ss := range us { + if lastUpdate.IsZero() { + lastUpdate = ss.userInfo.UserInfo.UpdatedAt + latestID = dID + latestUserStat = ss + } + if !ss.userInfo.UserInfo.UpdatedAt.IsZero() && ss.userInfo.UserInfo.UpdatedAt.After(lastUpdate) { + lastUpdate = ss.userInfo.UserInfo.UpdatedAt + latestID = dID + latestUserStat = ss + } + } + if latestID != globalDeploymentID { + // heal only from the site with latest info. + return nil + } + latestPeerName = info.Sites[latestID].Name + for dID, uStatus := range us { + if dID == globalDeploymentID { + continue + } + if !uStatus.UserInfoMismatch { + continue + } + + if isUserInfoEqual(latestUserStat.userInfo.UserInfo, uStatus.userInfo.UserInfo) { + continue + } + creds, ok := globalIAMSys.GetUser(ctx, user) + if !ok { + continue + } + // heal only the user accounts that are local users + if creds.IsServiceAccount() || creds.IsTemp() { + continue + } + peerName := info.Sites[dID].Name + if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{ + Type: madmin.SRIAMItemIAMUser, + IAMUser: &madmin.SRIAMUser{ + AccessKey: user, + IsDeleteReq: false, + UserReq: &madmin.AddOrUpdateUserReq{ + SecretKey: creds.SecretKey, + Status: latestUserStat.userInfo.Status, + }, + }, + }); err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing user %s from peer site %s -> %s : %w", user, latestPeerName, peerName, err)) + } + } + return nil +} + +func (c *SiteReplicationSys) healGroups(ctx context.Context, objAPI ObjectLayer, group string, info srStatusInfo) error { + c.RLock() + defer c.RUnlock() + if !c.enabled { + return nil + } + + var ( + latestID, latestPeerName string + lastUpdate time.Time + latestGroupStat srGroupStatsSummary + ) + // create group if missing; fix group policy mapping if missing + gs, ok := info.GroupStats[group] + if !ok { + return nil + } + for dID, ss := range gs { + if lastUpdate.IsZero() { + lastUpdate = ss.groupDesc.UpdatedAt + latestID = dID + latestGroupStat = ss + } + if !ss.groupDesc.UpdatedAt.IsZero() && ss.groupDesc.UpdatedAt.After(lastUpdate) { + lastUpdate = ss.groupDesc.UpdatedAt + latestID = dID + latestGroupStat = ss + } + } + if latestID != globalDeploymentID { + // heal only from the site with latest info. + return nil + } + latestPeerName = info.Sites[latestID].Name + for dID, gStatus := range gs { + if dID == globalDeploymentID { + continue + } + if !gStatus.GroupDescMismatch { + continue + } + + if isGroupDescEqual(latestGroupStat.groupDesc.GroupDesc, gStatus.groupDesc.GroupDesc) { + continue + } + peerName := info.Sites[dID].Name + + if err := c.IAMChangeHook(ctx, madmin.SRIAMItem{ + Type: madmin.SRIAMItemGroupInfo, + GroupInfo: &madmin.SRGroupInfo{ + UpdateReq: madmin.GroupAddRemove{ + Group: group, + Status: madmin.GroupStatus(latestGroupStat.groupDesc.Status), + Members: latestGroupStat.groupDesc.Members, + IsRemove: false, + }, + }, + }); err != nil { + logger.LogIf(ctx, fmt.Errorf("Error healing group %s from peer site %s -> site %s : %w", group, latestPeerName, peerName, err)) + } + } + return nil +} + +func isGroupDescEqual(g1, g2 madmin.GroupDesc) bool { + if g1.Name != g2.Name || + g1.Status != g2.Status || + g1.Policy != g2.Policy { + return false + } + if len(g1.Members) != len(g2.Members) { + return false + } + for _, v1 := range g1.Members { + var found bool + for _, v2 := range g2.Members { + if v1 == v2 { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +func isUserInfoEqual(u1, u2 madmin.UserInfo) bool { + if u1.PolicyName != u2.PolicyName || + u1.Status != u2.Status || + u1.SecretKey != u2.SecretKey { + return false + } + for len(u1.MemberOf) != len(u2.MemberOf) { + return false + } + for _, v1 := range u1.MemberOf { + var found bool + for _, v2 := range u2.MemberOf { + if v1 == v2 { + found = true + break + } + } + if !found { + return false + } + } + return true +} + +func isPolicyMappingEqual(p1, p2 srPolicyMapping) bool { + return p1.Policy == p2.Policy && p1.IsGroup == p2.IsGroup && p1.UserOrGroup == p2.UserOrGroup +} diff --git a/cmd/xl-storage.go b/cmd/xl-storage.go index 9b2b7f5f9..4118ceab9 100644 --- a/cmd/xl-storage.go +++ b/cmd/xl-storage.go @@ -428,7 +428,7 @@ func (s *xlStorage) NSScanner(ctx context.Context, cache dataUsageCache, updates } // Check if the current bucket has replication configuration - if rcfg, err := globalBucketMetadataSys.GetReplicationConfig(ctx, cache.Info.Name); err == nil { + if rcfg, _, err := globalBucketMetadataSys.GetReplicationConfig(ctx, cache.Info.Name); err == nil { if rcfg.HasActiveRules("", true) { tgts, err := globalBucketTargetSys.ListBucketTargets(ctx, cache.Info.Name) if err == nil { diff --git a/go.mod b/go.mod index d41d5d4f6..c389b6cd2 100644 --- a/go.mod +++ b/go.mod @@ -51,7 +51,7 @@ require ( github.com/minio/dperf v0.3.6 github.com/minio/highwayhash v1.0.2 github.com/minio/kes v0.19.0 - github.com/minio/madmin-go v1.3.11 + github.com/minio/madmin-go v1.3.12 github.com/minio/minio-go/v7 v7.0.24 github.com/minio/pkg v1.1.20 github.com/minio/selfupdate v0.4.0 diff --git a/go.sum b/go.sum index 56a65ebcf..30f393993 100644 --- a/go.sum +++ b/go.sum @@ -116,7 +116,6 @@ github.com/apache/thrift v0.15.0/go.mod h1:PHK3hniurgQaNMZYaCLEqXKsYK8upmhPbmdP2 github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= @@ -201,7 +200,6 @@ github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kw github.com/coredns/coredns v1.9.0 h1:M1EF1uups4CYcQGb1z8A97mfoq4BYCw3+xCYcJkOSDc= github.com/coredns/coredns v1.9.0/go.mod h1:czzy6Ofs15Mzn1PXpWoplBCZxoWdGoQUInL9uPSiYME= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible h1:sdJrfw8akMnCuUlaZU3tE/uYXFgfqom8DBE9so9EBsM= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -281,7 +279,6 @@ github.com/frankban/quicktest v1.14.0/go.mod h1:NeW+ay9A/U67EYXNFA1nPE8e/tnQv/09 github.com/fraugster/parquet-go v0.10.0 h1:whX91AO3dkkOnbH9MqD53DZ3rISw+Tnnj5yiqXjSv9Q= github.com/fraugster/parquet-go v0.10.0/go.mod h1:asQOKX0K/j+F3Xyj87kw7gKrU3yXo9M2hb8STSQKIIw= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko= github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg= @@ -710,8 +707,8 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT github.com/minio/kes v0.19.0 h1:rKzkDXT4ay7FBW34KgXK+y85bie4x4Oiq29ONRuMzh0= github.com/minio/kes v0.19.0/go.mod h1:e9YGKbwFCV7LbqNPMfZBazfNUsFGJ5LG4plSeWL8mmg= github.com/minio/madmin-go v1.3.5/go.mod h1:vGKGboQgGIWx4DuDUaXixjlIEZOCIp6ivJkQoiVaACc= -github.com/minio/madmin-go v1.3.11 h1:Cj02kzG2SD1pnZW2n1joe00yqb6NFE40Jt2gp+5mWFQ= -github.com/minio/madmin-go v1.3.11/go.mod h1:ez87VmMtsxP7DRxjKJKD4RDNW+nhO2QF9KSzwxBDQ98= +github.com/minio/madmin-go v1.3.12 h1:7SmK/KtT7+d3hn3VcYBqI/c4yETfXV9gRT1j+g/U1jE= +github.com/minio/madmin-go v1.3.12/go.mod h1:ez87VmMtsxP7DRxjKJKD4RDNW+nhO2QF9KSzwxBDQ98= github.com/minio/mc v0.0.0-20220415073849-792c6c1475bd h1:kAqiTMvTBGQq57Mr+lHIh0tZdMP80dbDDFMspANC6ic= github.com/minio/mc v0.0.0-20220415073849-792c6c1475bd/go.mod h1:Z8lzJkjqgORAzT+ux12BZrov2HdPresY/gD0w+SCsIk= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= @@ -746,7 +743,6 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= @@ -938,27 +934,16 @@ github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= github.com/streadway/amqp v1.0.0 h1:kuuDrUJFZL1QYL9hUNuCxNObNzB0bV/ZG5jV3RWAQgo=