mirror of
https://github.com/minio/minio
synced 2024-07-01 06:54:25 +00:00
Compare commits
20 Commits
773a065d48
...
5357b40820
Author | SHA1 | Date | |
---|---|---|---|
|
5357b40820 | ||
|
47bbc272df | ||
|
aebac90013 | ||
|
7ca4ba77c4 | ||
|
13512170b5 | ||
|
154fcaeb56 | ||
|
722118386d | ||
|
709612cb37 | ||
|
b35d083872 | ||
|
5e7b243bde | ||
|
f8f9fc77ac | ||
|
499531f0b5 | ||
|
3c2141513f | ||
|
602f6a9ad0 | ||
|
22c5a5b91b | ||
|
41f508765d | ||
|
7dccd1f589 | ||
|
55ff598b23 | ||
|
a22ce4550c | ||
|
da71597e00 |
4
Makefile
4
Makefile
|
@ -86,9 +86,9 @@ test-race: verifiers build ## builds minio, runs linters, tests (race)
|
|||
|
||||
test-iam: install-race ## verify IAM (external IDP, etcd backends)
|
||||
@echo "Running tests for IAM (external IDP, etcd backends)"
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 CGO_ENABLED=0 go test -timeout 15m -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@echo "Running tests for IAM (external IDP, etcd backends) with -race"
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -race -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
@MINIO_API_REQUESTS_MAX=10000 GORACE=history_size=7 CGO_ENABLED=1 go test -timeout 15m -race -tags kqueue,dev -v -run TestIAM* ./cmd
|
||||
|
||||
test-iam-ldap-upgrade-import: install-race ## verify IAM (external LDAP IDP)
|
||||
@echo "Running upgrade tests for IAM (LDAP backend)"
|
||||
|
|
|
@ -479,3 +479,179 @@ func (a adminAPIHandlers) ListAccessKeysLDAP(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
||||
// ListAccessKeysLDAPBulk - GET /minio/admin/v3/idp/ldap/list-access-keys-bulk
|
||||
func (a adminAPIHandlers) ListAccessKeysLDAPBulk(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := r.Context()
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cred, owner, s3Err := validateAdminSignature(ctx, r, "")
|
||||
if s3Err != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(s3Err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
dnList := r.Form["userDNs"]
|
||||
isAll := r.Form.Get("all") == "true"
|
||||
onlySelf := !isAll && len(dnList) == 0
|
||||
|
||||
if isAll && len(dnList) > 0 {
|
||||
// This should be checked on client side, so return generic error
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Empty DN list and not self, list access keys for all users
|
||||
if isAll {
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListUsersAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
} else if len(dnList) == 1 {
|
||||
var dn string
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(dnList[0])
|
||||
if err == nil {
|
||||
dn = foundResult.NormDN
|
||||
}
|
||||
if dn == cred.ParentUser || dnList[0] == cred.ParentUser {
|
||||
onlySelf = true
|
||||
}
|
||||
}
|
||||
|
||||
if !globalIAMSys.IsAllowed(policy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Groups: cred.Groups,
|
||||
Action: policy.ListServiceAccountsAdminAction,
|
||||
ConditionValues: getConditionValues(r, "", cred),
|
||||
IsOwner: owner,
|
||||
Claims: cred.Claims,
|
||||
DenyOnly: onlySelf,
|
||||
}) {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAccessDenied), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if onlySelf && len(dnList) == 0 {
|
||||
selfDN := cred.AccessKey
|
||||
if cred.ParentUser != "" {
|
||||
selfDN = cred.ParentUser
|
||||
}
|
||||
dnList = append(dnList, selfDN)
|
||||
}
|
||||
|
||||
accessKeyMap := make(map[string]madmin.ListAccessKeysLDAPResp)
|
||||
if isAll {
|
||||
ldapUsers, err := globalIAMSys.ListLDAPUsers(ctx)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for user := range ldapUsers {
|
||||
accessKeyMap[user] = madmin.ListAccessKeysLDAPResp{}
|
||||
}
|
||||
} else {
|
||||
for _, userDN := range dnList {
|
||||
// Validate the userDN
|
||||
foundResult, err := globalIAMSys.LDAPConfig.GetValidatedDNForUsername(userDN)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
if foundResult == nil {
|
||||
continue
|
||||
}
|
||||
accessKeyMap[foundResult.NormDN] = madmin.ListAccessKeysLDAPResp{}
|
||||
}
|
||||
}
|
||||
|
||||
listType := r.Form.Get("listType")
|
||||
var listSTSKeys, listServiceAccounts bool
|
||||
switch listType {
|
||||
case madmin.AccessKeyListUsersOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSTSOnly:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = false
|
||||
case madmin.AccessKeyListSvcaccOnly:
|
||||
listSTSKeys = false
|
||||
listServiceAccounts = true
|
||||
case madmin.AccessKeyListAll:
|
||||
listSTSKeys = true
|
||||
listServiceAccounts = true
|
||||
default:
|
||||
err := errors.New("invalid list type")
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErrWithErr(ErrInvalidRequest, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
for dn, accessKeys := range accessKeyMap {
|
||||
if listSTSKeys {
|
||||
stsKeys, err := globalIAMSys.ListSTSAccounts(ctx, dn)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, sts := range stsKeys {
|
||||
expiryTime := sts.Expiration
|
||||
accessKeys.STSKeys = append(accessKeys.STSKeys, madmin.ServiceAccountInfo{
|
||||
AccessKey: sts.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
})
|
||||
}
|
||||
// if only STS keys, skip if user has no STS keys
|
||||
if !listServiceAccounts && len(stsKeys) == 0 {
|
||||
delete(accessKeyMap, dn)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if listServiceAccounts {
|
||||
serviceAccounts, err := globalIAMSys.ListServiceAccounts(ctx, dn)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for _, svc := range serviceAccounts {
|
||||
expiryTime := svc.Expiration
|
||||
accessKeys.ServiceAccounts = append(accessKeys.ServiceAccounts, madmin.ServiceAccountInfo{
|
||||
AccessKey: svc.AccessKey,
|
||||
Expiration: &expiryTime,
|
||||
})
|
||||
}
|
||||
// if only service accounts, skip if user has no service accounts
|
||||
if !listSTSKeys && len(serviceAccounts) == 0 {
|
||||
delete(accessKeyMap, dn)
|
||||
continue
|
||||
}
|
||||
}
|
||||
accessKeyMap[dn] = accessKeys
|
||||
}
|
||||
|
||||
data, err := json.Marshal(accessKeyMap)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
encryptedData, err := madmin.EncryptData(cred.SecretKey, data)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, encryptedData)
|
||||
}
|
||||
|
|
|
@ -374,6 +374,7 @@ func (a adminAPIHandlers) RebalanceStop(w http.ResponseWriter, r *http.Request)
|
|||
globalNotificationSys.StopRebalance(r.Context())
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
adminLogIf(ctx, pools.saveRebalanceStats(GlobalContext, 0, rebalSaveStoppedAt))
|
||||
globalNotificationSys.LoadRebalanceMeta(ctx, false)
|
||||
}
|
||||
|
||||
func proxyDecommissionRequest(ctx context.Context, defaultEndPoint Endpoint, w http.ResponseWriter, r *http.Request) (proxy bool) {
|
||||
|
|
|
@ -120,9 +120,12 @@ func (s *TestSuiteIAM) TestDeleteUserRace(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
}
|
||||
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
accessKeys[i] = accessKey
|
||||
|
|
|
@ -239,9 +239,12 @@ func (s *TestSuiteIAM) TestUserCreate(c *check) {
|
|||
c.Assert(v.Status, madmin.AccountEnabled)
|
||||
|
||||
// 3. Associate policy and check that user can access
|
||||
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{"readwrite"},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
client := s.getUserClient(c, accessKey, secretKey, "")
|
||||
|
@ -348,9 +351,12 @@ func (s *TestSuiteIAM) TestUserPolicyEscalationBug(c *check) {
|
|||
if err != nil {
|
||||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 2.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
|
@ -470,9 +476,12 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
|||
c.mustNotListObjects(ctx, uClient, "testbucket")
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy1, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy1},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
admClnt := s.getAdminClient(c, accessKey, secretKey, "")
|
||||
|
@ -490,10 +499,22 @@ func (s *TestSuiteIAM) TestAddServiceAccountPerms(c *check) {
|
|||
c.Fatalf("policy was missing!")
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy2, accessKey, false)
|
||||
// Detach policy1 to set up for policy2
|
||||
_, err = s.adm.DetachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy1},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to detach policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.2 associate policy to user
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy2},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// 3.3 check user can create service account implicitly.
|
||||
|
@ -571,9 +592,12 @@ func (s *TestSuiteIAM) TestPolicyCreate(c *check) {
|
|||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3.2 associate policy to user
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 3.3 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
|
@ -726,9 +750,12 @@ func (s *TestSuiteIAM) TestGroupAddRemove(c *check) {
|
|||
c.mustNotListObjects(ctx, uClient, bucket)
|
||||
|
||||
// 3. Associate policy to group and check user got access.
|
||||
err = s.adm.SetPolicy(ctx, policy, group, true)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
Group: group,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
// 3.1 check user has access to bucket
|
||||
c.mustListObjects(ctx, uClient, bucket)
|
||||
|
@ -871,9 +898,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByUser(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
|
@ -952,9 +982,12 @@ func (s *TestSuiteIAM) TestServiceAccountDurationSecondsCondition(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// Create an madmin client with user creds
|
||||
|
@ -1031,9 +1064,12 @@ func (s *TestSuiteIAM) TestServiceAccountOpsByAdmin(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// 1. Create a service account for the user
|
||||
|
|
|
@ -301,8 +301,9 @@ func registerAdminRouter(router *mux.Router, enableConfigOps bool) {
|
|||
// LDAP specific service accounts ops
|
||||
adminRouter.Methods(http.MethodPut).Path(adminVersion + "/idp/ldap/add-service-account").HandlerFunc(adminMiddleware(adminAPI.AddServiceAccountLDAP))
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).
|
||||
Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAP)).Queries("userDN", "{userDN:.*}", "listType", "{listType:.*}")
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/idp/ldap/list-access-keys-bulk").
|
||||
HandlerFunc(adminMiddleware(adminAPI.ListAccessKeysLDAPBulk)).Queries("listType", "{listType:.*}")
|
||||
|
||||
// LDAP IAM operations
|
||||
adminRouter.Methods(http.MethodGet).Path(adminVersion + "/idp/ldap/policy-entities").HandlerFunc(adminMiddleware(adminAPI.ListLDAPPolicyMappingEntities))
|
||||
|
|
|
@ -946,10 +946,20 @@ func writeSuccessResponseHeadersOnly(w http.ResponseWriter) {
|
|||
|
||||
// writeErrorResponse writes error headers
|
||||
func writeErrorResponse(ctx context.Context, w http.ResponseWriter, err APIError, reqURL *url.URL) {
|
||||
if err.HTTPStatusCode == http.StatusServiceUnavailable {
|
||||
// Set retry-after header to indicate user-agents to retry request after 120secs.
|
||||
switch err.HTTPStatusCode {
|
||||
case http.StatusServiceUnavailable:
|
||||
// Set retry-after header to indicate user-agents to retry request after 60 seconds.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "120")
|
||||
w.Header().Set(xhttp.RetryAfter, "60")
|
||||
case http.StatusTooManyRequests:
|
||||
_, deadline := globalAPIConfig.getRequestsPool()
|
||||
if deadline <= 0 {
|
||||
// Set retry-after header to indicate user-agents to retry request after 10 seconds.
|
||||
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Retry-After
|
||||
w.Header().Set(xhttp.RetryAfter, "10")
|
||||
} else {
|
||||
w.Header().Set(xhttp.RetryAfter, strconv.Itoa(int(deadline.Seconds())))
|
||||
}
|
||||
}
|
||||
|
||||
switch err.Code {
|
||||
|
|
|
@ -88,6 +88,8 @@ type healingTracker struct {
|
|||
|
||||
ItemsSkipped uint64
|
||||
BytesSkipped uint64
|
||||
|
||||
RetryAttempts uint64
|
||||
// Add future tracking capabilities
|
||||
// Be sure that they are included in toHealingDisk
|
||||
}
|
||||
|
@ -382,6 +384,8 @@ func getLocalDisksToHeal() (disksToHeal Endpoints) {
|
|||
|
||||
var newDiskHealingTimeout = newDynamicTimeout(30*time.Second, 10*time.Second)
|
||||
|
||||
var errRetryHealing = errors.New("some items failed to heal, we will retry healing this drive again")
|
||||
|
||||
func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint) error {
|
||||
poolIdx, setIdx := endpoint.PoolIdx, endpoint.SetIdx
|
||||
disk := getStorageViaEndpoint(endpoint)
|
||||
|
@ -451,8 +455,27 @@ func healFreshDisk(ctx context.Context, z *erasureServerPools, endpoint Endpoint
|
|||
return err
|
||||
}
|
||||
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d, failed: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
// if objects have failed healing, we attempt a retry to heal the drive upto 3 times before giving up.
|
||||
if tracker.ItemsFailed > 0 && tracker.RetryAttempts < 4 {
|
||||
tracker.RetryAttempts++
|
||||
bugLogIf(ctx, tracker.update(ctx))
|
||||
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retrying %s time (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
humanize.Ordinal(int(tracker.RetryAttempts)), tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
return errRetryHealing
|
||||
}
|
||||
|
||||
if tracker.ItemsFailed > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is incomplete, retried %d times (healed: %d, skipped: %d, failed: %d).", disk,
|
||||
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped, tracker.ItemsFailed)
|
||||
} else {
|
||||
if tracker.RetryAttempts > 0 {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is complete, retried %d times (healed: %d, skipped: %d).", disk,
|
||||
tracker.RetryAttempts-1, tracker.ItemsHealed, tracker.ItemsSkipped)
|
||||
} else {
|
||||
healingLogEvent(ctx, "Healing of drive '%s' is finished (healed: %d, skipped: %d).", disk, tracker.ItemsHealed, tracker.ItemsSkipped)
|
||||
}
|
||||
}
|
||||
if serverDebugLog {
|
||||
tracker.printTo(os.Stdout)
|
||||
fmt.Printf("\n")
|
||||
|
@ -524,7 +547,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureServerPools) {
|
|||
if err := healFreshDisk(ctx, z, disk); err != nil {
|
||||
globalBackgroundHealState.setDiskHealingStatus(disk, false)
|
||||
timedout := OperationTimedOut{}
|
||||
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) {
|
||||
if !errors.Is(err, context.Canceled) && !errors.As(err, &timedout) && !errors.Is(err, errRetryHealing) {
|
||||
printEndpointError(disk, err, false)
|
||||
}
|
||||
return
|
||||
|
|
|
@ -200,6 +200,12 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
case "RetryAttempts":
|
||||
z.RetryAttempts, err = dc.ReadUint64()
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
default:
|
||||
err = dc.Skip()
|
||||
if err != nil {
|
||||
|
@ -213,9 +219,9 @@ func (z *healingTracker) DecodeMsg(dc *msgp.Reader) (err error) {
|
|||
|
||||
// EncodeMsg implements msgp.Encodable
|
||||
func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
||||
// map header, size 25
|
||||
// map header, size 26
|
||||
// write "ID"
|
||||
err = en.Append(0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
err = en.Append(0xde, 0x0, 0x1a, 0xa2, 0x49, 0x44)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
@ -478,15 +484,25 @@ func (z *healingTracker) EncodeMsg(en *msgp.Writer) (err error) {
|
|||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
// write "RetryAttempts"
|
||||
err = en.Append(0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = en.WriteUint64(z.RetryAttempts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalMsg implements msgp.Marshaler
|
||||
func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
||||
o = msgp.Require(b, z.Msgsize())
|
||||
// map header, size 25
|
||||
// map header, size 26
|
||||
// string "ID"
|
||||
o = append(o, 0xde, 0x0, 0x19, 0xa2, 0x49, 0x44)
|
||||
o = append(o, 0xde, 0x0, 0x1a, 0xa2, 0x49, 0x44)
|
||||
o = msgp.AppendString(o, z.ID)
|
||||
// string "PoolIndex"
|
||||
o = append(o, 0xa9, 0x50, 0x6f, 0x6f, 0x6c, 0x49, 0x6e, 0x64, 0x65, 0x78)
|
||||
|
@ -566,6 +582,9 @@ func (z *healingTracker) MarshalMsg(b []byte) (o []byte, err error) {
|
|||
// string "BytesSkipped"
|
||||
o = append(o, 0xac, 0x42, 0x79, 0x74, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64)
|
||||
o = msgp.AppendUint64(o, z.BytesSkipped)
|
||||
// string "RetryAttempts"
|
||||
o = append(o, 0xad, 0x52, 0x65, 0x74, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73)
|
||||
o = msgp.AppendUint64(o, z.RetryAttempts)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -763,6 +782,12 @@ func (z *healingTracker) UnmarshalMsg(bts []byte) (o []byte, err error) {
|
|||
err = msgp.WrapError(err, "BytesSkipped")
|
||||
return
|
||||
}
|
||||
case "RetryAttempts":
|
||||
z.RetryAttempts, bts, err = msgp.ReadUint64Bytes(bts)
|
||||
if err != nil {
|
||||
err = msgp.WrapError(err, "RetryAttempts")
|
||||
return
|
||||
}
|
||||
default:
|
||||
bts, err = msgp.Skip(bts)
|
||||
if err != nil {
|
||||
|
@ -785,6 +810,6 @@ func (z *healingTracker) Msgsize() (s int) {
|
|||
for za0002 := range z.HealedBuckets {
|
||||
s += msgp.StringPrefixSize + len(z.HealedBuckets[za0002])
|
||||
}
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size
|
||||
s += 7 + msgp.StringPrefixSize + len(z.HealID) + 13 + msgp.Uint64Size + 13 + msgp.Uint64Size + 14 + msgp.Uint64Size
|
||||
return
|
||||
}
|
||||
|
|
|
@ -106,14 +106,17 @@ func (s1 *ServerSystemConfig) Diff(s2 *ServerSystemConfig) error {
|
|||
}
|
||||
|
||||
var skipEnvs = map[string]struct{}{
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
"MINIO_ROOT_USER": {},
|
||||
"MINIO_ROOT_PASSWORD": {},
|
||||
"MINIO_ACCESS_KEY": {},
|
||||
"MINIO_SECRET_KEY": {},
|
||||
"MINIO_OPTS": {},
|
||||
"MINIO_CERT_PASSWD": {},
|
||||
"MINIO_SERVER_DEBUG": {},
|
||||
"MINIO_DSYNC_TRACE": {},
|
||||
"MINIO_ROOT_USER": {},
|
||||
"MINIO_ROOT_PASSWORD": {},
|
||||
"MINIO_ACCESS_KEY": {},
|
||||
"MINIO_SECRET_KEY": {},
|
||||
"MINIO_OPERATOR_VERSION": {},
|
||||
"MINIO_VSPHERE_PLUGIN_VERSION": {},
|
||||
"MINIO_CI_CD": {},
|
||||
}
|
||||
|
||||
func getServerSystemCfg() *ServerSystemConfig {
|
||||
|
|
|
@ -834,7 +834,7 @@ func serverHandleEnvVars() {
|
|||
}
|
||||
}
|
||||
|
||||
globalDisableFreezeOnBoot = env.Get("_MINIO_DISABLE_API_FREEZE_ON_BOOT", "") == "true" || serverDebugLog
|
||||
globalEnableSyncBoot = env.Get("MINIO_SYNC_BOOT", config.EnableOff) == config.EnableOn
|
||||
}
|
||||
|
||||
func loadRootCredentials() {
|
||||
|
|
|
@ -227,7 +227,9 @@ func runDataScanner(ctx context.Context, objAPI ObjectLayer) {
|
|||
binary.LittleEndian.PutUint64(tmp, cycleInfo.next)
|
||||
tmp, _ = cycleInfo.MarshalMsg(tmp)
|
||||
err = saveConfig(ctx, objAPI, dataUsageBloomNamePath, tmp)
|
||||
scannerLogIf(ctx, err, dataUsageBloomNamePath)
|
||||
if err != nil {
|
||||
scannerLogIf(ctx, fmt.Errorf("%w, Object %s", err, dataUsageBloomNamePath))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -797,7 +799,9 @@ func (f *folderScanner) scanFolder(ctx context.Context, folder cachedFolder, int
|
|||
}, madmin.HealItemObject)
|
||||
stopFn(int(ver.Size))
|
||||
if !isErrObjectNotFound(err) && !isErrVersionNotFound(err) {
|
||||
scannerLogIf(ctx, err, fiv.Name)
|
||||
if err != nil {
|
||||
scannerLogIf(ctx, fmt.Errorf("%w, Object %s/%s/%s", err, bucket, fiv.Name, ver.VersionID))
|
||||
}
|
||||
}
|
||||
if err == nil {
|
||||
successVersions++
|
||||
|
@ -1343,7 +1347,7 @@ func applyExpiryOnNonTransitionedObjects(ctx context.Context, objLayer ObjectLay
|
|||
case lifecycle.DeleteAllVersionsAction:
|
||||
eventName = event.ObjectRemovedDeleteAllVersions
|
||||
case lifecycle.DelMarkerDeleteAllVersionsAction:
|
||||
eventName = event.ILMDelMarkerExpirationDelete
|
||||
eventName = event.ILMDelObjExpirationDelete
|
||||
}
|
||||
// Notify object deleted event.
|
||||
sendEvent(eventArgs{
|
||||
|
|
|
@ -146,7 +146,7 @@ func TestApplyNewerNoncurrentVersionsLimit(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEvalActionFromLifecycle(t *testing.T) {
|
||||
// Tests cover only ExpiredObjectDeleteAllVersions and DelMarkerExpiration actions
|
||||
// Tests cover only ExpiredObjectDeleteAllVersions and DeletedObjectExpiration actions
|
||||
obj := ObjectInfo{
|
||||
Name: "foo",
|
||||
ModTime: time.Now().Add(-31 * 24 * time.Hour),
|
||||
|
@ -177,12 +177,12 @@ func TestEvalActionFromLifecycle(t *testing.T) {
|
|||
</LifecycleConfiguration>`
|
||||
delMarkerILM := `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>DelMarkerExpiration</ID>
|
||||
<ID>DeletedObjectExpiration</ID>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<DelMarkerExpiration>
|
||||
<DeletedObjectExpiration>
|
||||
<Days>60</Days>
|
||||
</DelMarkerExpiration>
|
||||
</DeletedObjectExpiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`
|
||||
deleteAllLc, err := lifecycle.ParseLifecycleConfig(strings.NewReader(deleteAllILM))
|
||||
|
|
|
@ -134,11 +134,16 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
|
|||
SSES3SinglePartObjects := make(map[int]bool)
|
||||
for i, object := range batch {
|
||||
if kind, ok := crypto.IsEncrypted(object.UserDefined); ok && kind == crypto.S3 && !crypto.IsMultiPart(object.UserDefined) {
|
||||
SSES3SinglePartObjects[i] = true
|
||||
|
||||
metadata = append(metadata, object.UserDefined)
|
||||
buckets = append(buckets, object.Bucket)
|
||||
names = append(names, object.Name)
|
||||
ETag, err := etag.Parse(object.ETag)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if ETag.IsEncrypted() {
|
||||
SSES3SinglePartObjects[i] = true
|
||||
metadata = append(metadata, object.UserDefined)
|
||||
buckets = append(buckets, object.Bucket)
|
||||
names = append(names, object.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -190,7 +195,7 @@ func DecryptETags(ctx context.Context, k *kms.KMS, objects []ObjectInfo) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if SSES3SinglePartObjects[i] && ETag.IsEncrypted() {
|
||||
if SSES3SinglePartObjects[i] {
|
||||
ETag, err = etag.Decrypt(keys[0][:], ETag)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -119,11 +119,8 @@ func (z *erasureServerPools) loadRebalanceMeta(ctx context.Context) error {
|
|||
}
|
||||
|
||||
z.rebalMu.Lock()
|
||||
if len(r.PoolStats) == len(z.serverPools) {
|
||||
z.rebalMeta = r
|
||||
} else {
|
||||
z.updateRebalanceStats(ctx)
|
||||
}
|
||||
z.rebalMeta = r
|
||||
z.updateRebalanceStats(ctx)
|
||||
z.rebalMu.Unlock()
|
||||
|
||||
return nil
|
||||
|
@ -147,24 +144,16 @@ func (z *erasureServerPools) updateRebalanceStats(ctx context.Context) error {
|
|||
}
|
||||
}
|
||||
if ok {
|
||||
lock := z.serverPools[0].NewNSLock(minioMetaBucket, rebalMetaName)
|
||||
lkCtx, err := lock.GetLock(ctx, globalOperationTimeout)
|
||||
if err != nil {
|
||||
rebalanceLogIf(ctx, fmt.Errorf("failed to acquire write lock on %s/%s: %w", minioMetaBucket, rebalMetaName, err))
|
||||
return err
|
||||
}
|
||||
defer lock.Unlock(lkCtx)
|
||||
|
||||
ctx = lkCtx.Context()
|
||||
|
||||
noLockOpts := ObjectOptions{NoLock: true}
|
||||
return z.rebalMeta.saveWithOpts(ctx, z.serverPools[0], noLockOpts)
|
||||
return z.rebalMeta.save(ctx, z.serverPools[0])
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (z *erasureServerPools) findIndex(index int) int {
|
||||
if z.rebalMeta == nil {
|
||||
return 0
|
||||
}
|
||||
for i := 0; i < len(z.rebalMeta.PoolStats); i++ {
|
||||
if i == index {
|
||||
return index
|
||||
|
@ -277,6 +266,10 @@ func (z *erasureServerPools) bucketRebalanceDone(bucket string, poolIdx int) {
|
|||
z.rebalMu.Lock()
|
||||
defer z.rebalMu.Unlock()
|
||||
|
||||
if z.rebalMeta == nil {
|
||||
return
|
||||
}
|
||||
|
||||
ps := z.rebalMeta.PoolStats[poolIdx]
|
||||
if ps == nil {
|
||||
return
|
||||
|
@ -331,6 +324,10 @@ func (r *rebalanceMeta) loadWithOpts(ctx context.Context, store objectIO, opts O
|
|||
}
|
||||
|
||||
func (r *rebalanceMeta) saveWithOpts(ctx context.Context, store objectIO, opts ObjectOptions) error {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
data := make([]byte, 4, r.Msgsize()+4)
|
||||
|
||||
// Initialize the header.
|
||||
|
@ -353,8 +350,15 @@ func (z *erasureServerPools) IsRebalanceStarted() bool {
|
|||
z.rebalMu.RLock()
|
||||
defer z.rebalMu.RUnlock()
|
||||
|
||||
if r := z.rebalMeta; r != nil {
|
||||
if r.StoppedAt.IsZero() {
|
||||
r := z.rebalMeta
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
if !r.StoppedAt.IsZero() {
|
||||
return false
|
||||
}
|
||||
for _, ps := range r.PoolStats {
|
||||
if ps.Participating && ps.Info.Status != rebalCompleted {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -369,7 +373,7 @@ func (z *erasureServerPools) IsPoolRebalancing(poolIndex int) bool {
|
|||
if !r.StoppedAt.IsZero() {
|
||||
return false
|
||||
}
|
||||
ps := z.rebalMeta.PoolStats[poolIndex]
|
||||
ps := r.PoolStats[poolIndex]
|
||||
return ps.Participating && ps.Info.Status == rebalStarted
|
||||
}
|
||||
return false
|
||||
|
@ -794,7 +798,9 @@ func (z *erasureServerPools) saveRebalanceStats(ctx context.Context, poolIdx int
|
|||
case rebalSaveStoppedAt:
|
||||
r.StoppedAt = time.Now()
|
||||
case rebalSaveStats:
|
||||
r.PoolStats[poolIdx] = z.rebalMeta.PoolStats[poolIdx]
|
||||
if z.rebalMeta != nil {
|
||||
r.PoolStats[poolIdx] = z.rebalMeta.PoolStats[poolIdx]
|
||||
}
|
||||
}
|
||||
z.rebalMeta = r
|
||||
|
||||
|
|
|
@ -449,8 +449,8 @@ var (
|
|||
// dynamic sleeper for multipart expiration routine
|
||||
deleteMultipartCleanupSleeper = newDynamicSleeper(5, 25*time.Millisecond, false)
|
||||
|
||||
// Is _MINIO_DISABLE_API_FREEZE_ON_BOOT set?
|
||||
globalDisableFreezeOnBoot bool
|
||||
// Is MINIO_SYNC_BOOT set?
|
||||
globalEnableSyncBoot bool
|
||||
|
||||
// Contains NIC interface name used for internode communication
|
||||
globalInternodeInterface string
|
||||
|
|
|
@ -29,14 +29,35 @@ import (
|
|||
|
||||
const unavailable = "offline"
|
||||
|
||||
// ClusterCheckHandler returns if the server is ready for requests.
|
||||
func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClusterCheckHandler")
|
||||
|
||||
func checkHealth(w http.ResponseWriter) ObjectLayer {
|
||||
objLayer := newObjectLayerFn()
|
||||
if objLayer == nil {
|
||||
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
|
||||
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !globalBucketMetadataSys.Initialized() {
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "bucket-metadata-offline")
|
||||
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !globalIAMSys.Initialized() {
|
||||
w.Header().Set(xhttp.MinIOServerStatus, "iam-offline")
|
||||
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
|
||||
return nil
|
||||
}
|
||||
|
||||
return objLayer
|
||||
}
|
||||
|
||||
// ClusterCheckHandler returns if the server is ready for requests.
|
||||
func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClusterCheckHandler")
|
||||
|
||||
objLayer := checkHealth(w)
|
||||
if objLayer == nil {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -72,10 +93,8 @@ func ClusterCheckHandler(w http.ResponseWriter, r *http.Request) {
|
|||
func ClusterReadCheckHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ClusterReadCheckHandler")
|
||||
|
||||
objLayer := newObjectLayerFn()
|
||||
objLayer := checkHealth(w)
|
||||
if objLayer == nil {
|
||||
w.Header().Set(xhttp.MinIOServerStatus, unavailable)
|
||||
writeResponse(w, http.StatusServiceUnavailable, nil, mimeNone)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -439,23 +439,44 @@ func (iamOS *IAMObjectStore) listAllIAMConfigItems(ctx context.Context) (res map
|
|||
return res, nil
|
||||
}
|
||||
|
||||
const (
|
||||
maxIAMLoadOpTime = 5 * time.Second
|
||||
)
|
||||
|
||||
// Assumes cache is locked by caller.
|
||||
func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache) error {
|
||||
func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iamCache, firstTime bool) error {
|
||||
bootstrapTraceMsgFirstTime := func(s string) {
|
||||
if firstTime {
|
||||
bootstrapTraceMsg(s)
|
||||
}
|
||||
}
|
||||
|
||||
if iamOS.objAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading all IAM items")
|
||||
bootstrapTraceMsgFirstTime("loading all IAM items")
|
||||
|
||||
setDefaultCannedPolicies(cache.iamPolicyDocsMap)
|
||||
|
||||
listStartTime := UTCNow()
|
||||
listedConfigItems, err := iamOS.listAllIAMConfigItems(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to list IAM data: %w", err)
|
||||
}
|
||||
if took := time.Since(listStartTime); took > maxIAMLoadOpTime {
|
||||
var s strings.Builder
|
||||
for k, v := range listedConfigItems {
|
||||
s.WriteString(fmt.Sprintf(" %s: %d items\n", k, len(v)))
|
||||
}
|
||||
logger.Info("listAllIAMConfigItems took %.2fs with contents:\n%s", took.Seconds(), s.String())
|
||||
}
|
||||
|
||||
// Loads things in the same order as `LoadIAMCache()`
|
||||
|
||||
bootstrapTraceMsg("loading policy documents")
|
||||
bootstrapTraceMsgFirstTime("loading policy documents")
|
||||
|
||||
policyLoadStartTime := UTCNow()
|
||||
policiesList := listedConfigItems[policiesListKey]
|
||||
for _, item := range policiesList {
|
||||
policyName := path.Dir(item)
|
||||
|
@ -463,10 +484,13 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
|||
return fmt.Errorf("unable to load the policy doc `%s`: %w", policyName, err)
|
||||
}
|
||||
}
|
||||
setDefaultCannedPolicies(cache.iamPolicyDocsMap)
|
||||
if took := time.Since(policyLoadStartTime); took > maxIAMLoadOpTime {
|
||||
logger.Info("Policy docs load took %.2fs (for %d items)", took.Seconds(), len(policiesList))
|
||||
}
|
||||
|
||||
if iamOS.usersSysType == MinIOUsersSysType {
|
||||
bootstrapTraceMsg("loading regular IAM users")
|
||||
bootstrapTraceMsgFirstTime("loading regular IAM users")
|
||||
regUsersLoadStartTime := UTCNow()
|
||||
regUsersList := listedConfigItems[usersListKey]
|
||||
for _, item := range regUsersList {
|
||||
userName := path.Dir(item)
|
||||
|
@ -474,8 +498,14 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
|||
return fmt.Errorf("unable to load the user `%s`: %w", userName, err)
|
||||
}
|
||||
}
|
||||
if took := time.Since(regUsersLoadStartTime); took > maxIAMLoadOpTime {
|
||||
actualLoaded := len(cache.iamUsersMap)
|
||||
logger.Info("Reg. users load took %.2fs (for %d items with %d expired items)", took.Seconds(),
|
||||
len(regUsersList), len(regUsersList)-actualLoaded)
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading regular IAM groups")
|
||||
bootstrapTraceMsgFirstTime("loading regular IAM groups")
|
||||
groupsLoadStartTime := UTCNow()
|
||||
groupsList := listedConfigItems[groupsListKey]
|
||||
for _, item := range groupsList {
|
||||
group := path.Dir(item)
|
||||
|
@ -483,9 +513,13 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
|||
return fmt.Errorf("unable to load the group `%s`: %w", group, err)
|
||||
}
|
||||
}
|
||||
if took := time.Since(groupsLoadStartTime); took > maxIAMLoadOpTime {
|
||||
logger.Info("Groups load took %.2fs (for %d items)", took.Seconds(), len(groupsList))
|
||||
}
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading user policy mapping")
|
||||
bootstrapTraceMsgFirstTime("loading user policy mapping")
|
||||
userPolicyMappingLoadStartTime := UTCNow()
|
||||
userPolicyMappingsList := listedConfigItems[policyDBUsersListKey]
|
||||
for _, item := range userPolicyMappingsList {
|
||||
userName := strings.TrimSuffix(item, ".json")
|
||||
|
@ -493,8 +527,12 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
|||
return fmt.Errorf("unable to load the policy mapping for the user `%s`: %w", userName, err)
|
||||
}
|
||||
}
|
||||
if took := time.Since(userPolicyMappingLoadStartTime); took > maxIAMLoadOpTime {
|
||||
logger.Info("User policy mappings load took %.2fs (for %d items)", took.Seconds(), len(userPolicyMappingsList))
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading group policy mapping")
|
||||
bootstrapTraceMsgFirstTime("loading group policy mapping")
|
||||
groupPolicyMappingLoadStartTime := UTCNow()
|
||||
groupPolicyMappingsList := listedConfigItems[policyDBGroupsListKey]
|
||||
for _, item := range groupPolicyMappingsList {
|
||||
groupName := strings.TrimSuffix(item, ".json")
|
||||
|
@ -502,8 +540,12 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
|||
return fmt.Errorf("unable to load the policy mapping for the group `%s`: %w", groupName, err)
|
||||
}
|
||||
}
|
||||
if took := time.Since(groupPolicyMappingLoadStartTime); took > maxIAMLoadOpTime {
|
||||
logger.Info("Group policy mappings load took %.2fs (for %d items)", took.Seconds(), len(groupPolicyMappingsList))
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading service accounts")
|
||||
bootstrapTraceMsgFirstTime("loading service accounts")
|
||||
svcAccLoadStartTime := UTCNow()
|
||||
svcAccList := listedConfigItems[svcAccListKey]
|
||||
svcUsersMap := make(map[string]UserIdentity, len(svcAccList))
|
||||
for _, item := range svcAccList {
|
||||
|
@ -512,9 +554,18 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
|||
return fmt.Errorf("unable to load the service account `%s`: %w", userName, err)
|
||||
}
|
||||
}
|
||||
if took := time.Since(svcAccLoadStartTime); took > maxIAMLoadOpTime {
|
||||
logger.Info("Service accounts load took %.2fs (for %d items with %d expired items)", took.Seconds(),
|
||||
len(svcAccList), len(svcAccList)-len(svcUsersMap))
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading STS account policy mapping")
|
||||
stsPolicyMappingLoadStartTime := UTCNow()
|
||||
var stsPolicyMappingsCount int
|
||||
for _, svcAcc := range svcUsersMap {
|
||||
svcParent := svcAcc.Credentials.ParentUser
|
||||
if _, ok := cache.iamUsersMap[svcParent]; !ok {
|
||||
stsPolicyMappingsCount++
|
||||
// If a service account's parent user is not in iamUsersMap, the
|
||||
// parent is an STS account. Such accounts may have a policy mapped
|
||||
// on the parent user, so we load them. This is not needed for the
|
||||
|
@ -533,6 +584,10 @@ func (iamOS *IAMObjectStore) loadAllFromObjStore(ctx context.Context, cache *iam
|
|||
}
|
||||
}
|
||||
}
|
||||
if took := time.Since(stsPolicyMappingLoadStartTime); took > maxIAMLoadOpTime {
|
||||
logger.Info("STS policy mappings load took %.2fs (for %d items)", took.Seconds(), stsPolicyMappingsCount)
|
||||
}
|
||||
|
||||
// Copy svcUsersMap to cache.iamUsersMap
|
||||
for k, v := range svcUsersMap {
|
||||
cache.iamUsersMap[k] = v
|
||||
|
|
183
cmd/iam-store.go
183
cmd/iam-store.go
|
@ -431,8 +431,41 @@ func (c *iamCache) policyDBGet(store *IAMStoreSys, name string, isGroup bool) ([
|
|||
}
|
||||
}
|
||||
|
||||
// returned policy could be empty
|
||||
policies := mp.toSlice()
|
||||
// returned policy could be empty, we use set to de-duplicate.
|
||||
policies := set.CreateStringSet(mp.toSlice()...)
|
||||
|
||||
for _, group := range u.Credentials.Groups {
|
||||
if store.getUsersSysType() == MinIOUsersSysType {
|
||||
g, ok := c.iamGroupsMap[group]
|
||||
if !ok {
|
||||
if err := store.loadGroup(context.Background(), group, c.iamGroupsMap); err != nil {
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
g, ok = c.iamGroupsMap[group]
|
||||
if !ok {
|
||||
return nil, time.Time{}, errNoSuchGroup
|
||||
}
|
||||
}
|
||||
|
||||
// Group is disabled, so we return no policy - this
|
||||
// ensures the request is denied.
|
||||
if g.Status == statusDisabled {
|
||||
return nil, time.Time{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
policy, ok := c.iamGroupPolicyMap.Load(group)
|
||||
if !ok {
|
||||
if err := store.loadMappedPolicyWithRetry(context.TODO(), group, regUser, true, c.iamGroupPolicyMap, 3); err != nil && !errors.Is(err, errNoSuchPolicy) {
|
||||
return nil, time.Time{}, err
|
||||
}
|
||||
policy, _ = c.iamGroupPolicyMap.Load(group)
|
||||
}
|
||||
|
||||
for _, p := range policy.toSlice() {
|
||||
policies.Add(p)
|
||||
}
|
||||
}
|
||||
|
||||
for _, group := range c.iamUserGroupMemberships[name].ToSlice() {
|
||||
if store.getUsersSysType() == MinIOUsersSysType {
|
||||
|
@ -462,10 +495,12 @@ func (c *iamCache) policyDBGet(store *IAMStoreSys, name string, isGroup bool) ([
|
|||
policy, _ = c.iamGroupPolicyMap.Load(group)
|
||||
}
|
||||
|
||||
policies = append(policies, policy.toSlice()...)
|
||||
for _, p := range policy.toSlice() {
|
||||
policies.Add(p)
|
||||
}
|
||||
}
|
||||
|
||||
return policies, mp.UpdatedAt, nil
|
||||
return policies.ToSlice(), mp.UpdatedAt, nil
|
||||
}
|
||||
|
||||
func (c *iamCache) updateUserWithClaims(key string, u UserIdentity) error {
|
||||
|
@ -537,25 +572,25 @@ func setDefaultCannedPolicies(policies map[string]PolicyDoc) {
|
|||
// LoadIAMCache reads all IAM items and populates a new iamCache object and
|
||||
// replaces the in-memory cache object.
|
||||
func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) error {
|
||||
bootstrapTraceMsg := func(s string) {
|
||||
bootstrapTraceMsgFirstTime := func(s string) {
|
||||
if firstTime {
|
||||
bootstrapTraceMsg(s)
|
||||
}
|
||||
}
|
||||
bootstrapTraceMsg("loading IAM data")
|
||||
bootstrapTraceMsgFirstTime("loading IAM data")
|
||||
|
||||
newCache := newIamCache()
|
||||
|
||||
loadedAt := time.Now()
|
||||
|
||||
if iamOS, ok := store.IAMStorageAPI.(*IAMObjectStore); ok {
|
||||
err := iamOS.loadAllFromObjStore(ctx, newCache)
|
||||
err := iamOS.loadAllFromObjStore(ctx, newCache, firstTime)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
|
||||
bootstrapTraceMsg("loading policy documents")
|
||||
// Only non-object IAM store (i.e. only etcd backend).
|
||||
bootstrapTraceMsgFirstTime("loading policy documents")
|
||||
if err := store.loadPolicyDocs(ctx, newCache.iamPolicyDocsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -564,29 +599,29 @@ func (store *IAMStoreSys) LoadIAMCache(ctx context.Context, firstTime bool) erro
|
|||
setDefaultCannedPolicies(newCache.iamPolicyDocsMap)
|
||||
|
||||
if store.getUsersSysType() == MinIOUsersSysType {
|
||||
bootstrapTraceMsg("loading regular users")
|
||||
bootstrapTraceMsgFirstTime("loading regular users")
|
||||
if err := store.loadUsers(ctx, regUser, newCache.iamUsersMap); err != nil {
|
||||
return err
|
||||
}
|
||||
bootstrapTraceMsg("loading regular groups")
|
||||
bootstrapTraceMsgFirstTime("loading regular groups")
|
||||
if err := store.loadGroups(ctx, newCache.iamGroupsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading user policy mapping")
|
||||
bootstrapTraceMsgFirstTime("loading user policy mapping")
|
||||
// load polices mapped to users
|
||||
if err := store.loadMappedPolicies(ctx, regUser, false, newCache.iamUserPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading group policy mapping")
|
||||
bootstrapTraceMsgFirstTime("loading group policy mapping")
|
||||
// load policies mapped to groups
|
||||
if err := store.loadMappedPolicies(ctx, regUser, true, newCache.iamGroupPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bootstrapTraceMsg("loading service accounts")
|
||||
bootstrapTraceMsgFirstTime("loading service accounts")
|
||||
// load service accounts
|
||||
if err := store.loadUsers(ctx, svcUser, newCache.iamUsersMap); err != nil {
|
||||
return err
|
||||
|
@ -937,12 +972,7 @@ func (store *IAMStoreSys) GetGroupDescription(group string) (gd madmin.GroupDesc
|
|||
}, nil
|
||||
}
|
||||
|
||||
// ListGroups - lists groups. Since this is not going to be a frequent
|
||||
// operation, we fetch this info from storage, and refresh the cache as well.
|
||||
func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err error) {
|
||||
cache := store.lock()
|
||||
defer store.unlock()
|
||||
|
||||
func (store *IAMStoreSys) updateGroups(ctx context.Context, cache *iamCache) (res []string, err error) {
|
||||
if store.getUsersSysType() == MinIOUsersSysType {
|
||||
m := map[string]GroupInfo{}
|
||||
err = store.loadGroups(ctx, m)
|
||||
|
@ -970,7 +1000,16 @@ func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err err
|
|||
})
|
||||
}
|
||||
|
||||
return
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ListGroups - lists groups. Since this is not going to be a frequent
|
||||
// operation, we fetch this info from storage, and refresh the cache as well.
|
||||
func (store *IAMStoreSys) ListGroups(ctx context.Context) (res []string, err error) {
|
||||
cache := store.lock()
|
||||
defer store.unlock()
|
||||
|
||||
return store.updateGroups(ctx, cache)
|
||||
}
|
||||
|
||||
// listGroups - lists groups - fetch groups from cache
|
||||
|
@ -1445,16 +1484,51 @@ func filterPolicies(cache *iamCache, policyName string, bucketName string) (stri
|
|||
return strings.Join(policies, ","), policy.MergePolicies(toMerge...)
|
||||
}
|
||||
|
||||
// FilterPolicies - accepts a comma separated list of policy names as a string
|
||||
// and bucket and returns only policies that currently exist in MinIO. If
|
||||
// bucketName is non-empty, additionally filters policies matching the bucket.
|
||||
// The first returned value is the list of currently existing policies, and the
|
||||
// second is their combined policy definition.
|
||||
func (store *IAMStoreSys) FilterPolicies(policyName string, bucketName string) (string, policy.Policy) {
|
||||
cache := store.rlock()
|
||||
defer store.runlock()
|
||||
// MergePolicies - accepts a comma separated list of policy names as a string
|
||||
// and returns only policies that currently exist in MinIO. It includes hot loading
|
||||
// of policies if not in the memory
|
||||
func (store *IAMStoreSys) MergePolicies(policyName string) (string, policy.Policy) {
|
||||
var policies []string
|
||||
var missingPolicies []string
|
||||
var toMerge []policy.Policy
|
||||
|
||||
return filterPolicies(cache, policyName, bucketName)
|
||||
cache := store.rlock()
|
||||
for _, policy := range newMappedPolicy(policyName).toSlice() {
|
||||
if policy == "" {
|
||||
continue
|
||||
}
|
||||
p, found := cache.iamPolicyDocsMap[policy]
|
||||
if !found {
|
||||
missingPolicies = append(missingPolicies, policy)
|
||||
continue
|
||||
}
|
||||
policies = append(policies, policy)
|
||||
toMerge = append(toMerge, p.Policy)
|
||||
}
|
||||
store.runlock()
|
||||
|
||||
if len(missingPolicies) > 0 {
|
||||
m := make(map[string]PolicyDoc)
|
||||
for _, policy := range missingPolicies {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
_ = store.loadPolicyDoc(ctx, policy, m)
|
||||
cancel()
|
||||
}
|
||||
|
||||
cache := store.lock()
|
||||
for policy, p := range m {
|
||||
cache.iamPolicyDocsMap[policy] = p
|
||||
}
|
||||
store.unlock()
|
||||
|
||||
for policy, p := range m {
|
||||
policies = append(policies, policy)
|
||||
toMerge = append(toMerge, p.Policy)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return strings.Join(policies, ","), policy.MergePolicies(toMerge...)
|
||||
}
|
||||
|
||||
// GetBucketUsers - returns users (not STS or service accounts) that have access
|
||||
|
@ -1907,6 +1981,11 @@ func (store *IAMStoreSys) GetAllParentUsers() map[string]ParentUserInfo {
|
|||
cache := store.rlock()
|
||||
defer store.runlock()
|
||||
|
||||
return store.getParentUsers(cache)
|
||||
}
|
||||
|
||||
// assumes store is locked by caller.
|
||||
func (store *IAMStoreSys) getParentUsers(cache *iamCache) map[string]ParentUserInfo {
|
||||
res := map[string]ParentUserInfo{}
|
||||
for _, ui := range cache.iamUsersMap {
|
||||
cred := ui.Credentials
|
||||
|
@ -1977,6 +2056,38 @@ func (store *IAMStoreSys) GetAllParentUsers() map[string]ParentUserInfo {
|
|||
return res
|
||||
}
|
||||
|
||||
// GetAllSTSUserMappings - Loads all STS user policy mappings from storage and
|
||||
// returns them. Also gets any STS users that do not have policy mappings but have
|
||||
// Service Accounts or STS keys (This is useful if the user is part of a group)
|
||||
func (store *IAMStoreSys) GetAllSTSUserMappings(userPredicate func(string) bool) (map[string]string, error) {
|
||||
cache := store.rlock()
|
||||
defer store.runlock()
|
||||
|
||||
stsMap := make(map[string]string)
|
||||
m := xsync.NewMapOf[string, MappedPolicy]()
|
||||
if err := store.loadMappedPolicies(context.Background(), stsUser, false, m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
m.Range(func(user string, mappedPolicy MappedPolicy) bool {
|
||||
if userPredicate != nil && !userPredicate(user) {
|
||||
return true
|
||||
}
|
||||
stsMap[user] = mappedPolicy.Policies
|
||||
return true
|
||||
})
|
||||
|
||||
for user := range store.getParentUsers(cache) {
|
||||
if _, ok := stsMap[user]; !ok {
|
||||
if userPredicate != nil && !userPredicate(user) {
|
||||
continue
|
||||
}
|
||||
stsMap[user] = ""
|
||||
}
|
||||
}
|
||||
return stsMap, nil
|
||||
}
|
||||
|
||||
// Assumes store is locked by caller. If users is empty, returns all user mappings.
|
||||
func (store *IAMStoreSys) listUserPolicyMappings(cache *iamCache, users []string,
|
||||
userPredicate func(string) bool,
|
||||
|
@ -2638,6 +2749,18 @@ func (store *IAMStoreSys) LoadUser(ctx context.Context, accessKey string) error
|
|||
}
|
||||
}
|
||||
|
||||
load := len(cache.iamGroupsMap) == 0
|
||||
if store.getUsersSysType() == LDAPUsersSysType && cache.iamGroupPolicyMap.Size() == 0 {
|
||||
load = true
|
||||
}
|
||||
if load {
|
||||
if _, err = store.updateGroups(ctx, cache); err != nil {
|
||||
return "done", err
|
||||
}
|
||||
}
|
||||
|
||||
cache.buildUserGroupMemberships()
|
||||
|
||||
return "done", err
|
||||
})
|
||||
|
||||
|
|
18
cmd/iam.go
18
cmd/iam.go
|
@ -436,7 +436,7 @@ func (sys *IAMSys) validateAndAddRolePolicyMappings(ctx context.Context, m map[a
|
|||
// running server by creating the policies after start up.
|
||||
for arn, rolePolicies := range m {
|
||||
specifiedPoliciesSet := newMappedPolicy(rolePolicies).policySet()
|
||||
validPolicies, _ := sys.store.FilterPolicies(rolePolicies, "")
|
||||
validPolicies, _ := sys.store.MergePolicies(rolePolicies)
|
||||
knownPoliciesSet := newMappedPolicy(validPolicies).policySet()
|
||||
unknownPoliciesSet := specifiedPoliciesSet.Difference(knownPoliciesSet)
|
||||
if len(unknownPoliciesSet) > 0 {
|
||||
|
@ -672,7 +672,7 @@ func (sys *IAMSys) CurrentPolicies(policyName string) string {
|
|||
return ""
|
||||
}
|
||||
|
||||
policies, _ := sys.store.FilterPolicies(policyName, "")
|
||||
policies, _ := sys.store.MergePolicies(policyName)
|
||||
return policies
|
||||
}
|
||||
|
||||
|
@ -786,11 +786,15 @@ func (sys *IAMSys) ListLDAPUsers(ctx context.Context) (map[string]madmin.UserInf
|
|||
|
||||
select {
|
||||
case <-sys.configLoaded:
|
||||
ldapUsers := make(map[string]madmin.UserInfo)
|
||||
for user, policy := range sys.store.GetUsersWithMappedPolicies() {
|
||||
stsMap, err := sys.store.GetAllSTSUserMappings(sys.LDAPConfig.IsLDAPUserDN)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ldapUsers := make(map[string]madmin.UserInfo, len(stsMap))
|
||||
for user, policy := range stsMap {
|
||||
ldapUsers[user] = madmin.UserInfo{
|
||||
PolicyName: policy,
|
||||
Status: madmin.AccountEnabled,
|
||||
Status: statusEnabled,
|
||||
}
|
||||
}
|
||||
return ldapUsers, nil
|
||||
|
@ -2118,7 +2122,7 @@ func (sys *IAMSys) IsAllowedServiceAccount(args policy.Args, parentUser string)
|
|||
var combinedPolicy policy.Policy
|
||||
// Policies were found, evaluate all of them.
|
||||
if !isOwnerDerived {
|
||||
availablePoliciesStr, c := sys.store.FilterPolicies(strings.Join(svcPolicies, ","), "")
|
||||
availablePoliciesStr, c := sys.store.MergePolicies(strings.Join(svcPolicies, ","))
|
||||
if availablePoliciesStr == "" {
|
||||
return false
|
||||
}
|
||||
|
@ -2346,7 +2350,7 @@ func isAllowedBySessionPolicy(args policy.Args) (hasSessionPolicy bool, isAllowe
|
|||
|
||||
// GetCombinedPolicy returns a combined policy combining all policies
|
||||
func (sys *IAMSys) GetCombinedPolicy(policies ...string) policy.Policy {
|
||||
_, policy := sys.store.FilterPolicies(strings.Join(policies, ","), "")
|
||||
_, policy := sys.store.MergePolicies(strings.Join(policies, ","))
|
||||
return policy
|
||||
}
|
||||
|
||||
|
|
|
@ -897,7 +897,7 @@ func serverMain(ctx *cli.Context) {
|
|||
})
|
||||
}
|
||||
|
||||
if !globalDisableFreezeOnBoot {
|
||||
if globalEnableSyncBoot {
|
||||
// Freeze the services until the bucket notification subsystem gets initialized.
|
||||
bootstrapTrace("freezeServices", freezeServices)
|
||||
}
|
||||
|
@ -1000,10 +1000,11 @@ func serverMain(ctx *cli.Context) {
|
|||
}()
|
||||
|
||||
go func() {
|
||||
if !globalDisableFreezeOnBoot {
|
||||
if globalEnableSyncBoot {
|
||||
defer bootstrapTrace("unfreezeServices", unfreezeServices)
|
||||
t := time.AfterFunc(5*time.Minute, func() {
|
||||
warnings = append(warnings, color.YellowBold("- Initializing the config subsystem is taking longer than 5 minutes. Please set '_MINIO_DISABLE_API_FREEZE_ON_BOOT=true' to not freeze the APIs"))
|
||||
warnings = append(warnings,
|
||||
color.YellowBold("- Initializing the config subsystem is taking longer than 5 minutes. Please remove 'MINIO_SYNC_BOOT=on' to not freeze the APIs"))
|
||||
})
|
||||
defer t.Stop()
|
||||
}
|
||||
|
|
|
@ -194,9 +194,12 @@ func (s *TestSuiteIAM) SFTPInvalidServiceAccountPassword(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{"readwrite"},
|
||||
User: accessKey,
|
||||
}
|
||||
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
newSSHCon := newSSHConnMock(accessKey + "=svc")
|
||||
|
@ -222,9 +225,12 @@ func (s *TestSuiteIAM) SFTPServiceAccountLogin(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, "readwrite", accessKey, false)
|
||||
if err != nil {
|
||||
c.Fatalf("unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{"readwrite"},
|
||||
User: accessKey,
|
||||
}
|
||||
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
newSSHCon := newSSHConnMock(accessKey + "=svc")
|
||||
|
@ -270,9 +276,12 @@ func (s *TestSuiteIAM) SFTPValidLDAPLoginWithPassword(c *check) {
|
|||
}
|
||||
|
||||
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
|
||||
err = s.adm.SetPolicy(ctx, policy, userDN, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: userDN,
|
||||
}
|
||||
if _, err := s.adm.AttachPolicy(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
newSSHCon := newSSHConnMock("dillon=ldap")
|
||||
|
|
|
@ -116,9 +116,12 @@ func (s *TestSuiteIAM) TestSTSServiceAccountsWithUsername(c *check) {
|
|||
c.Fatalf("policy add error: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, "dillon", false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: "dillon",
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
assumeRole := cr.STSAssumeRole{
|
||||
|
@ -231,9 +234,12 @@ func (s *TestSuiteIAM) TestSTSWithDenyDeleteVersion(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// confirm that the user is able to access the bucket
|
||||
|
@ -332,9 +338,12 @@ func (s *TestSuiteIAM) TestSTSWithTags(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// confirm that the user is able to access the bucket
|
||||
|
@ -420,9 +429,12 @@ func (s *TestSuiteIAM) TestSTS(c *check) {
|
|||
c.Fatalf("Unable to set user: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, accessKey, false)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: accessKey,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// confirm that the user is able to access the bucket
|
||||
|
@ -515,9 +527,12 @@ func (s *TestSuiteIAM) TestSTSWithGroupPolicy(c *check) {
|
|||
c.Fatalf("unable to add user to group: %v", err)
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, "test-group", true)
|
||||
_, err = s.adm.AttachPolicy(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
Group: "test-group",
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
c.Fatalf("Unable to attach policy: %v", err)
|
||||
}
|
||||
|
||||
// confirm that the user is able to access the bucket - permission comes
|
||||
|
@ -984,6 +999,7 @@ func (s *TestSuiteIAM) TestIAMExport(c *check, caseNum int, content iamTestConte
|
|||
}
|
||||
|
||||
for userDN, policies := range content.ldapUserPolicyMappings {
|
||||
// No need to detach, we are starting from a clean slate after exporting.
|
||||
_, err := s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: policies,
|
||||
User: userDN,
|
||||
|
@ -1194,14 +1210,21 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
|
|||
|
||||
// Attempting to set a non-existent policy should fail.
|
||||
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
|
||||
err = s.adm.SetPolicy(ctx, policy+"x", userDN, false)
|
||||
_, err = s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy + "x"},
|
||||
User: userDN,
|
||||
})
|
||||
if err == nil {
|
||||
c.Fatalf("should not be able to set non-existent policy")
|
||||
c.Fatalf("should not be able to attach non-existent policy")
|
||||
}
|
||||
|
||||
err = s.adm.SetPolicy(ctx, policy, userDN, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: userDN,
|
||||
}
|
||||
|
||||
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach user policy: %v", err)
|
||||
}
|
||||
|
||||
value, err := ldapID.Retrieve()
|
||||
|
@ -1240,10 +1263,8 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
|
|||
c.Fatalf("unexpected non-access-denied err: %v", err)
|
||||
}
|
||||
|
||||
// Remove the policy assignment on the user DN:
|
||||
err = s.adm.SetPolicy(ctx, "", userDN, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to remove policy setting: %v", err)
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to detach user policy: %v", err)
|
||||
}
|
||||
|
||||
_, err = ldapID.Retrieve()
|
||||
|
@ -1253,9 +1274,13 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
|
|||
|
||||
// Set policy via group and validate policy assignment.
|
||||
groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io"
|
||||
err = s.adm.SetPolicy(ctx, policy, groupDN, true)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set group policy: %v", err)
|
||||
groupReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
Group: groupDN,
|
||||
}
|
||||
|
||||
if _, err = s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil {
|
||||
c.Fatalf("Unable to attach group policy: %v", err)
|
||||
}
|
||||
|
||||
value, err = ldapID.Retrieve()
|
||||
|
@ -1278,6 +1303,10 @@ func (s *TestSuiteIAM) TestLDAPSTS(c *check) {
|
|||
// Validate that the client cannot remove any objects
|
||||
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
|
||||
c.Assert(err.Error(), "Access Denied.")
|
||||
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil {
|
||||
c.Fatalf("Unable to detach group policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestLDAPUnicodeVariationsLegacyAPI(c *check) {
|
||||
|
@ -1490,12 +1519,13 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
|
|||
// \uFE52 is the unicode dot SMALL FULL STOP used below:
|
||||
userDNWithUnicodeDot := "uid=svc﹒algorithm,OU=swengg,DC=min,DC=io"
|
||||
|
||||
_, err = s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: userDNWithUnicodeDot,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach user policy: %v", err)
|
||||
}
|
||||
|
||||
value, err := ldapID.Retrieve()
|
||||
|
@ -1534,12 +1564,9 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
|
|||
}
|
||||
|
||||
// Remove the policy assignment on the user DN:
|
||||
_, err = s.adm.DetachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: userDNWithUnicodeDot,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to remove policy setting: %v", err)
|
||||
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to detach user policy: %v", err)
|
||||
}
|
||||
|
||||
_, err = ldapID.Retrieve()
|
||||
|
@ -1550,11 +1577,12 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
|
|||
// Set policy via group and validate policy assignment.
|
||||
actualGroupDN := mustNormalizeDN("cn=project.c,ou=groups,ou=swengg,dc=min,dc=io")
|
||||
groupDNWithUnicodeDot := "cn=project﹒c,ou=groups,ou=swengg,dc=min,dc=io"
|
||||
_, err = s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
|
||||
groupReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
Group: groupDNWithUnicodeDot,
|
||||
})
|
||||
if err != nil {
|
||||
}
|
||||
|
||||
if _, err = s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil {
|
||||
c.Fatalf("Unable to attach group policy: %v", err)
|
||||
}
|
||||
|
||||
|
@ -1594,6 +1622,10 @@ func (s *TestSuiteIAM) TestLDAPUnicodeVariations(c *check) {
|
|||
// Validate that the client cannot remove any objects
|
||||
err = minioClient.RemoveObject(ctx, bucket, "someobject", minio.RemoveObjectOptions{})
|
||||
c.Assert(err.Error(), "Access Denied.")
|
||||
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil {
|
||||
c.Fatalf("Unable to detach group policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) {
|
||||
|
@ -1630,9 +1662,13 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) {
|
|||
}
|
||||
|
||||
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
|
||||
err = s.adm.SetPolicy(ctx, policy, userDN, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: userDN,
|
||||
}
|
||||
|
||||
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach user policy: %v", err)
|
||||
}
|
||||
|
||||
ldapID := cr.LDAPIdentity{
|
||||
|
@ -1687,6 +1723,11 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccounts(c *check) {
|
|||
|
||||
// 6. Check that service account cannot be created for some other user.
|
||||
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
|
||||
|
||||
// Detach the policy from the user
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to detach user policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) {
|
||||
|
@ -1707,12 +1748,12 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) {
|
|||
{
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
"s3:PutObject",
|
||||
"s3:GetObject",
|
||||
"s3:ListBucket"
|
||||
],
|
||||
"Resource": [
|
||||
"arn:aws:s3:::${ldap:username}/*"
|
||||
"arn:aws:s3:::${ldap:username}/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
@ -1723,9 +1764,14 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) {
|
|||
}
|
||||
|
||||
userDN := "uid=dillon,ou=people,ou=swengg,dc=min,dc=io"
|
||||
err = s.adm.SetPolicy(ctx, policy, userDN, false)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
User: userDN,
|
||||
}
|
||||
|
||||
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach user policy: %v", err)
|
||||
}
|
||||
|
||||
ldapID := cr.LDAPIdentity{
|
||||
|
@ -1776,6 +1822,10 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithUsername(c *check) {
|
|||
|
||||
// 3. Check S3 access for download
|
||||
c.mustDownload(ctx, svcClient, bucket)
|
||||
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to detach user policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// In this test, the parent users gets their permissions from a group, rather
|
||||
|
@ -1814,9 +1864,13 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) {
|
|||
}
|
||||
|
||||
groupDN := "cn=projecta,ou=groups,ou=swengg,dc=min,dc=io"
|
||||
err = s.adm.SetPolicy(ctx, policy, groupDN, true)
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{policy},
|
||||
Group: groupDN,
|
||||
}
|
||||
|
||||
if _, err = s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach user policy: %v", err)
|
||||
}
|
||||
|
||||
ldapID := cr.LDAPIdentity{
|
||||
|
@ -1871,18 +1925,24 @@ func (s *TestSuiteIAM) TestLDAPSTSServiceAccountsWithGroups(c *check) {
|
|||
|
||||
// 6. Check that service account cannot be created for some other user.
|
||||
c.mustNotCreateSvcAccount(ctx, globalActiveCred.AccessKey, userAdmClient)
|
||||
|
||||
// Detach the user policy
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to detach user policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestLDAPCyrillicUser(c *check) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
_, err := s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
|
||||
userReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{"readwrite"},
|
||||
User: "uid=Пользователь,ou=people,ou=swengg,dc=min,dc=io",
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
if _, err := s.adm.AttachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to attach user policy: %v", err)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
|
@ -1940,6 +2000,10 @@ func (s *TestSuiteIAM) TestLDAPCyrillicUser(c *check) {
|
|||
c.Fatalf("Test %d: unexpected dn claim: %s", i+1, dnClaim)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, userReq); err != nil {
|
||||
c.Fatalf("Unable to detach user policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestLDAPAttributesLookup(c *check) {
|
||||
|
@ -1947,12 +2011,13 @@ func (s *TestSuiteIAM) TestLDAPAttributesLookup(c *check) {
|
|||
defer cancel()
|
||||
|
||||
groupDN := "cn=projectb,ou=groups,ou=swengg,dc=min,dc=io"
|
||||
_, err := s.adm.AttachPolicyLDAP(ctx, madmin.PolicyAssociationReq{
|
||||
groupReq := madmin.PolicyAssociationReq{
|
||||
Policies: []string{"readwrite"},
|
||||
Group: groupDN,
|
||||
})
|
||||
if err != nil {
|
||||
c.Fatalf("Unable to set policy: %v", err)
|
||||
}
|
||||
|
||||
if _, err := s.adm.AttachPolicyLDAP(ctx, groupReq); err != nil {
|
||||
c.Fatalf("Unable to attach user policy: %v", err)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
|
@ -2025,6 +2090,10 @@ func (s *TestSuiteIAM) TestLDAPAttributesLookup(c *check) {
|
|||
c.Fatalf("Test %d: unexpected sshPublicKey type: %s", i+1, parts[0])
|
||||
}
|
||||
}
|
||||
|
||||
if _, err = s.adm.DetachPolicyLDAP(ctx, groupReq); err != nil {
|
||||
c.Fatalf("Unable to detach group policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *TestSuiteIAM) TestOpenIDSTS(c *check) {
|
||||
|
|
|
@ -43,8 +43,8 @@ unset MINIO_KMS_KES_KEY_FILE
|
|||
unset MINIO_KMS_KES_ENDPOINT
|
||||
unset MINIO_KMS_KES_KEY_NAME
|
||||
|
||||
wget -q -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x mc
|
||||
go install -v github.com/minio/mc@master
|
||||
cp -a $(go env GOPATH)/bin/mc ./mc
|
||||
|
||||
if [ ! -f mc.RELEASE.2021-03-12T03-36-59Z ]; then
|
||||
wget -q -O mc.RELEASE.2021-03-12T03-36-59Z https://dl.minio.io/client/mc/release/linux-amd64/archive/mc.RELEASE.2021-03-12T03-36-59Z &&
|
||||
|
|
|
@ -74,7 +74,7 @@ pools:
|
|||
- Each pool expects a minimum of 2 nodes per pool, and unique non-repeating hosts for each argument.
|
||||
- Each pool expects each host in this pool has the same number of drives specified as any other host.
|
||||
- Mixing `local-path` and `distributed-path` is not allowed, doing so would cause MinIO to refuse starting the server.
|
||||
- Ellipses notation (e.g. `{1...10}`) or bracket notations are fully allowed (e.g. `{a,c,f}`) to have multiple entries in one line.
|
||||
- Ellipses and bracket notation (e.g. `{1...10}`) are allowed.
|
||||
|
||||
> NOTE: MinIO environmental variables still take precedence over the `config.yaml` file, however `config.yaml` is preferred over MinIO internal config KV settings via `mc admin config set alias/ <sub-system>`.
|
||||
|
||||
|
@ -88,3 +88,4 @@ In subsequent releases we are planning to extend this to provide things like
|
|||
and decommissioning to provide a functionality that smaller deployments
|
||||
care about.
|
||||
|
||||
- Fully allow bracket notation (e.g. `{a,c,f}`) to have multiple entries on one line.
|
|
@ -22,6 +22,12 @@ export MINIO_CI_CD=1
|
|||
if [ ! -f ./mc ]; then
|
||||
os="$(uname -s)"
|
||||
arch="$(uname -m)"
|
||||
case "${arch}" in
|
||||
"x86_64")
|
||||
arch="amd64"
|
||||
;;
|
||||
esac
|
||||
|
||||
wget -O mc https://dl.minio.io/client/mc/release/${os,,}-${arch,,}/mc &&
|
||||
chmod +x mc
|
||||
fi
|
||||
|
|
|
@ -8,10 +8,8 @@ pkill minio
|
|||
pkill kes
|
||||
rm -rf /tmp/xl
|
||||
|
||||
if [ ! -f ./mc ]; then
|
||||
wget --quiet -O mc https://dl.minio.io/client/mc/release/linux-amd64/mc &&
|
||||
chmod +x mc
|
||||
fi
|
||||
go install -v github.com/minio/mc@master
|
||||
cp -a $(go env GOPATH)/bin/mc ./mc
|
||||
|
||||
if [ ! -f ./kes ]; then
|
||||
wget --quiet -O kes https://github.com/minio/kes/releases/latest/download/kes-linux-amd64 &&
|
||||
|
@ -39,37 +37,37 @@ export MC_HOST_myminio="http://minioadmin:minioadmin@localhost:9000/"
|
|||
(minio server http://localhost:9000/tmp/xl/{1...10}/disk{0...1} 2>&1 >/dev/null) &
|
||||
pid=$!
|
||||
|
||||
./mc ready myminio
|
||||
mc ready myminio
|
||||
|
||||
./mc admin user add myminio/ minio123 minio123
|
||||
mc admin user add myminio/ minio123 minio123
|
||||
|
||||
./mc admin policy create myminio/ deny-non-sse-kms-pol ./docs/iam/policies/deny-non-sse-kms-objects.json
|
||||
./mc admin policy create myminio/ deny-invalid-sse-kms-pol ./docs/iam/policies/deny-objects-with-invalid-sse-kms-key-id.json
|
||||
mc admin policy create myminio/ deny-non-sse-kms-pol ./docs/iam/policies/deny-non-sse-kms-objects.json
|
||||
mc admin policy create myminio/ deny-invalid-sse-kms-pol ./docs/iam/policies/deny-objects-with-invalid-sse-kms-key-id.json
|
||||
|
||||
./mc admin policy attach myminio deny-non-sse-kms-pol --user minio123
|
||||
./mc admin policy attach myminio deny-invalid-sse-kms-pol --user minio123
|
||||
./mc admin policy attach myminio consoleAdmin --user minio123
|
||||
mc admin policy attach myminio deny-non-sse-kms-pol --user minio123
|
||||
mc admin policy attach myminio deny-invalid-sse-kms-pol --user minio123
|
||||
mc admin policy attach myminio consoleAdmin --user minio123
|
||||
|
||||
./mc mb -l myminio/test-bucket
|
||||
./mc mb -l myminio/multi-key-poc
|
||||
mc mb -l myminio/test-bucket
|
||||
mc mb -l myminio/multi-key-poc
|
||||
|
||||
export MC_HOST_myminio1="http://minio123:minio123@localhost:9000/"
|
||||
|
||||
./mc cp /etc/issue myminio1/test-bucket
|
||||
mc cp /etc/issue myminio1/test-bucket
|
||||
ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
echo "BUG: PutObject to bucket: test-bucket should succeed. Failed"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
./mc cp /etc/issue myminio1/multi-key-poc | grep -q "Insufficient permissions to access this path"
|
||||
mc cp /etc/issue myminio1/multi-key-poc | grep -q "Insufficient permissions to access this path"
|
||||
ret=$?
|
||||
if [ $ret -eq 0 ]; then
|
||||
echo "BUG: PutObject to bucket: multi-key-poc without sse-kms should fail. Succedded"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
./mc cp /etc/hosts myminio1/multi-key-poc/hosts --enc-kms "myminio1/multi-key-poc/hosts=minio-default-key"
|
||||
mc cp /etc/hosts myminio1/multi-key-poc/hosts --enc-kms "myminio1/multi-key-poc/hosts=minio-default-key"
|
||||
ret=$?
|
||||
if [ $ret -ne 0 ]; then
|
||||
echo "BUG: PutObject to bucket: multi-key-poc with valid sse-kms should succeed. Failed"
|
||||
|
|
|
@ -2,7 +2,7 @@ version: '3.7'
|
|||
|
||||
# Settings and configurations that are common for all containers
|
||||
x-minio-common: &minio-common
|
||||
image: quay.io/minio/minio:RELEASE.2024-06-22T05-26-45Z
|
||||
image: quay.io/minio/minio:RELEASE.2024-06-28T09-06-49Z
|
||||
command: server --console-address ":9001" http://minio{1...4}/data{1...2}
|
||||
expose:
|
||||
- "9000"
|
||||
|
|
26
docs/tuning/README.md
Normal file
26
docs/tuning/README.md
Normal file
|
@ -0,0 +1,26 @@
|
|||
# How to enable 'minio' performance profile with tuned?
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Please make sure the following packages are already installed via `dnf` or `apt`:
|
||||
|
||||
- `tuned`
|
||||
- `curl`
|
||||
|
||||
### Install `tuned.conf` performance profile
|
||||
|
||||
#### Step 1 - download `tuned.conf` from the referenced link
|
||||
```
|
||||
wget https://raw.githubusercontent.com/minio/minio/master/docs/tuning/tuned.conf
|
||||
```
|
||||
|
||||
#### Step 2 - install tuned.conf as supported performance profile on all nodes
|
||||
```
|
||||
sudo mkdir -p /usr/lib/tuned/minio/
|
||||
sudo mv tuned.conf /usr/lib/tuned/minio
|
||||
```
|
||||
|
||||
#### Step 3 - to enable minio performance profile on all the nodes
|
||||
```
|
||||
sudo tuned-admin profile minio
|
||||
```
|
81
docs/tuning/tuned.conf
Normal file
81
docs/tuning/tuned.conf
Normal file
|
@ -0,0 +1,81 @@
|
|||
[main]
|
||||
summary=Maximum server performance for MinIO
|
||||
|
||||
[vm]
|
||||
transparent_hugepage=madvise
|
||||
transparent_hugepage.defrag=defer+madvise
|
||||
transparent_hugepage.khugepaged.max_ptes_none=0
|
||||
|
||||
[cpu]
|
||||
force_latency=1
|
||||
governor=performance
|
||||
energy_perf_bias=performance
|
||||
min_perf_pct=100
|
||||
|
||||
[sysctl]
|
||||
fs.xfs.xfssyncd_centisecs=72000
|
||||
net.core.busy_read=50
|
||||
net.core.busy_poll=50
|
||||
kernel.numa_balancing=1
|
||||
|
||||
# Do not use swap at all
|
||||
vm.swappiness=0
|
||||
vm.vfs_cache_pressure=50
|
||||
|
||||
# Start writeback at 3% memory
|
||||
vm.dirty_background_ratio=3
|
||||
# Force writeback at 10% memory
|
||||
vm.dirty_ratio=10
|
||||
|
||||
# Quite a few memory map
|
||||
# areas may be consumed
|
||||
vm.max_map_count=524288
|
||||
|
||||
# Default is 500000 = 0.5ms
|
||||
kernel.sched_migration_cost_ns=5000000
|
||||
|
||||
# stalled hdd io threads
|
||||
kernel.hung_task_timeout_secs=85
|
||||
|
||||
# network tuning for bigger throughput
|
||||
net.core.netdev_max_backlog=250000
|
||||
net.core.somaxconn=16384
|
||||
net.ipv4.tcp_syncookies=0
|
||||
net.ipv4.tcp_max_syn_backlog=16384
|
||||
net.core.wmem_max=4194304
|
||||
net.core.rmem_max=4194304
|
||||
net.core.rmem_default=4194304
|
||||
net.core.wmem_default=4194304
|
||||
net.ipv4.tcp_rmem="4096 87380 4194304"
|
||||
net.ipv4.tcp_wmem="4096 65536 4194304"
|
||||
|
||||
# Reduce CPU utilization
|
||||
net.ipv4.tcp_timestamps=0
|
||||
|
||||
# Increase throughput
|
||||
net.ipv4.tcp_sack=1
|
||||
|
||||
# Low latency mode for TCP
|
||||
net.ipv4.tcp_low_latency=1
|
||||
|
||||
# The following variable is used to tell the kernel how
|
||||
# much of the socket buffer space should be used for TCP
|
||||
# window size, and how much to save for an application buffer.
|
||||
net.ipv4.tcp_adv_win_scale=1
|
||||
|
||||
# disable RFC2861 behavior
|
||||
net.ipv4.tcp_slow_start_after_idle = 0
|
||||
|
||||
# Fix faulty network setups
|
||||
net.ipv4.tcp_mtu_probing=1
|
||||
net.ipv4.tcp_base_mss=1280
|
||||
|
||||
# Disable ipv6
|
||||
net.ipv6.conf.all.disable_ipv6=1
|
||||
net.ipv6.conf.default.disable_ipv6=1
|
||||
net.ipv6.conf.lo.disable_ipv6=1
|
||||
|
||||
[bootloader]
|
||||
# Avoid firing timers for all CPUs at the same time. This is irrelevant for
|
||||
# full nohz systems
|
||||
cmdline=skew_tick=1
|
4
go.mod
4
go.mod
|
@ -45,14 +45,14 @@ require (
|
|||
github.com/lithammer/shortuuid/v4 v4.0.0
|
||||
github.com/miekg/dns v1.1.59
|
||||
github.com/minio/cli v1.24.2
|
||||
github.com/minio/console v1.6.0
|
||||
github.com/minio/console v1.6.1
|
||||
github.com/minio/csvparser v1.0.0
|
||||
github.com/minio/dnscache v0.1.1
|
||||
github.com/minio/dperf v0.5.3
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/minio/kms-go/kes v0.3.0
|
||||
github.com/minio/kms-go/kms v0.4.0
|
||||
github.com/minio/madmin-go/v3 v3.0.55
|
||||
github.com/minio/madmin-go/v3 v3.0.57
|
||||
github.com/minio/minio-go/v7 v7.0.72-0.20240610154810-fa174cbf14b0
|
||||
github.com/minio/mux v1.9.0
|
||||
github.com/minio/pkg/v3 v3.0.2
|
||||
|
|
8
go.sum
8
go.sum
|
@ -440,8 +440,8 @@ github.com/minio/cli v1.24.2 h1:J+fCUh9mhPLjN3Lj/YhklXvxj8mnyE/D6FpFduXJ2jg=
|
|||
github.com/minio/cli v1.24.2/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY=
|
||||
github.com/minio/colorjson v1.0.8 h1:AS6gEQ1dTRYHmC4xuoodPDRILHP/9Wz5wYUGDQfPLpg=
|
||||
github.com/minio/colorjson v1.0.8/go.mod h1:wrs39G/4kqNlGjwqHvPlAnXuc2tlPszo6JKdSBCLN8w=
|
||||
github.com/minio/console v1.6.0 h1:G3mjhGV2Pox1Sqjwp/jRbRY7WiKsVyCLaZkxoIOaMCU=
|
||||
github.com/minio/console v1.6.0/go.mod h1:XJ3HKHmigs1MgjaNjUwpyuOAJnwqlSMB+QnZCZ+BROY=
|
||||
github.com/minio/console v1.6.1 h1:/rlXITBdZeDcX33PCjhEdA2vufeMMFMufj1XVukbu+0=
|
||||
github.com/minio/console v1.6.1/go.mod h1:XJ3HKHmigs1MgjaNjUwpyuOAJnwqlSMB+QnZCZ+BROY=
|
||||
github.com/minio/csvparser v1.0.0 h1:xJEHcYK8ZAjeW4hNV9Zu30u+/2o4UyPnYgyjWp8b7ZU=
|
||||
github.com/minio/csvparser v1.0.0/go.mod h1:lKXskSLzPgC5WQyzP7maKH7Sl1cqvANXo9YCto8zbtM=
|
||||
github.com/minio/dnscache v0.1.1 h1:AMYLqomzskpORiUA1ciN9k7bZT1oB3YZN4cEIi88W5o=
|
||||
|
@ -456,8 +456,8 @@ github.com/minio/kms-go/kes v0.3.0 h1:SU8VGVM/Hk9w1OiSby3OatkcojooUqIdDHl6dtM6Nk
|
|||
github.com/minio/kms-go/kes v0.3.0/go.mod h1:w6DeVT878qEOU3nUrYVy1WOT5H1Ig9hbDIh698NYJKY=
|
||||
github.com/minio/kms-go/kms v0.4.0 h1:cLPZceEp+05xHotVBaeFJrgL7JcXM4lBy6PU0idkE7I=
|
||||
github.com/minio/kms-go/kms v0.4.0/go.mod h1:q12CehiIy2qgBnDKq6Q7wmPi2PHSyRVug5DKp0HAVeE=
|
||||
github.com/minio/madmin-go/v3 v3.0.55 h1:Vm5AWS0kFoWwoJX4epskjVwmmS64xMNORMZaGR3cbK8=
|
||||
github.com/minio/madmin-go/v3 v3.0.55/go.mod h1:IFAwr0XMrdsLovxAdCcuq/eoL4nRuMVQQv0iubJANQw=
|
||||
github.com/minio/madmin-go/v3 v3.0.57 h1:fXoOnYP8/k9x0MWWowXkAQWYu59hongieCcT3urUaAQ=
|
||||
github.com/minio/madmin-go/v3 v3.0.57/go.mod h1:IFAwr0XMrdsLovxAdCcuq/eoL4nRuMVQQv0iubJANQw=
|
||||
github.com/minio/mc v0.0.0-20240612143403-e7c9a733c680 h1:Ns5mhSm86qJx6a9GJ1kzHkZMjRMZrQGsptakVRmq4QA=
|
||||
github.com/minio/mc v0.0.0-20240612143403-e7c9a733c680/go.mod h1:21/cb+wUd+lLRsdX7ACqyO8DzPNSpXftp1bOkQlIbh8=
|
||||
github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=
|
||||
|
|
|
@ -22,23 +22,23 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
var errInvalidDaysDelMarkerExpiration = Errorf("Days must be a positive integer with DelMarkerExpiration")
|
||||
var errInvalidDaysDeletedObjExpiration = Errorf("Days must be a positive integer with DeletedObjectExpiration")
|
||||
|
||||
// DelMarkerExpiration used to xml encode/decode ILM action by the same name
|
||||
type DelMarkerExpiration struct {
|
||||
XMLName xml.Name `xml:"DelMarkerExpiration"`
|
||||
// DeletedObjectExpiration used to xml encode/decode ILM action by the same name
|
||||
type DeletedObjectExpiration struct {
|
||||
XMLName xml.Name `xml:"DeletedObjectExpiration"`
|
||||
Days int `xml:"Days,omitempty"`
|
||||
}
|
||||
|
||||
// Empty returns if a DelMarkerExpiration XML element is empty.
|
||||
// Used to detect if lifecycle.Rule contained a DelMarkerExpiration element.
|
||||
func (de DelMarkerExpiration) Empty() bool {
|
||||
// Empty returns if a DeletedObjectExpiration XML element is empty.
|
||||
// Used to detect if lifecycle.Rule contained a DeletedObjectExpiration element.
|
||||
func (de DeletedObjectExpiration) Empty() bool {
|
||||
return de.Days == 0
|
||||
}
|
||||
|
||||
// UnmarshalXML decodes a single XML element into a DelMarkerExpiration value
|
||||
func (de *DelMarkerExpiration) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error {
|
||||
type delMarkerExpiration DelMarkerExpiration
|
||||
// UnmarshalXML decodes a single XML element into a DeletedObjectExpiration value
|
||||
func (de *DeletedObjectExpiration) UnmarshalXML(dec *xml.Decoder, start xml.StartElement) error {
|
||||
type delMarkerExpiration DeletedObjectExpiration
|
||||
var dexp delMarkerExpiration
|
||||
err := dec.DecodeElement(&dexp, &start)
|
||||
if err != nil {
|
||||
|
@ -46,26 +46,26 @@ func (de *DelMarkerExpiration) UnmarshalXML(dec *xml.Decoder, start xml.StartEle
|
|||
}
|
||||
|
||||
if dexp.Days <= 0 {
|
||||
return errInvalidDaysDelMarkerExpiration
|
||||
return errInvalidDaysDeletedObjExpiration
|
||||
}
|
||||
|
||||
*de = DelMarkerExpiration(dexp)
|
||||
*de = DeletedObjectExpiration(dexp)
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalXML encodes a DelMarkerExpiration value into an XML element
|
||||
func (de DelMarkerExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
||||
// MarshalXML encodes a DeletedObjectExpiration value into an XML element
|
||||
func (de DeletedObjectExpiration) MarshalXML(enc *xml.Encoder, start xml.StartElement) error {
|
||||
if de.Empty() {
|
||||
return nil
|
||||
}
|
||||
|
||||
type delMarkerExpiration DelMarkerExpiration
|
||||
type delMarkerExpiration DeletedObjectExpiration
|
||||
return enc.EncodeElement(delMarkerExpiration(de), start)
|
||||
}
|
||||
|
||||
// NextDue returns upcoming DelMarkerExpiration date for obj if
|
||||
// NextDue returns upcoming DeletedObjectExpiration date for obj if
|
||||
// applicable, returns false otherwise.
|
||||
func (de DelMarkerExpiration) NextDue(obj ObjectOpts) (time.Time, bool) {
|
||||
func (de DeletedObjectExpiration) NextDue(obj ObjectOpts) (time.Time, bool) {
|
||||
if !obj.IsLatest || !obj.DeleteMarker {
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
|
|
@ -23,24 +23,24 @@ import (
|
|||
"testing"
|
||||
)
|
||||
|
||||
func TestDelMarkerExpParseAndValidate(t *testing.T) {
|
||||
func TestDelObjExpParseAndValidate(t *testing.T) {
|
||||
tests := []struct {
|
||||
xml string
|
||||
err error
|
||||
}{
|
||||
{
|
||||
xml: `<DelMarkerExpiration> <Days> 1 </Days> </DelMarkerExpiration>`,
|
||||
xml: `<DeletedObjectExpiration> <Days> 1 </Days> </DeletedObjectExpiration>`,
|
||||
err: nil,
|
||||
},
|
||||
{
|
||||
xml: `<DelMarkerExpiration> <Days> -1 </Days> </DelMarkerExpiration>`,
|
||||
err: errInvalidDaysDelMarkerExpiration,
|
||||
xml: `<DeletedObjectExpiration> <Days> -1 </Days> </DeletedObjectExpiration>`,
|
||||
err: errInvalidDaysDeletedObjExpiration,
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
t.Run(fmt.Sprintf("TestDelMarker-%d", i), func(t *testing.T) {
|
||||
var dexp DelMarkerExpiration
|
||||
var dexp DeletedObjectExpiration
|
||||
var fail bool
|
||||
err := xml.Unmarshal([]byte(test.xml), &dexp)
|
||||
if test.err == nil {
|
||||
|
|
|
@ -390,9 +390,9 @@ func (lc Lifecycle) eval(obj ObjectOpts, now time.Time) Event {
|
|||
}
|
||||
}
|
||||
|
||||
// DelMarkerExpiration
|
||||
if obj.IsLatest && obj.DeleteMarker && !rule.DelMarkerExpiration.Empty() {
|
||||
if due, ok := rule.DelMarkerExpiration.NextDue(obj); ok && (now.IsZero() || now.After(due)) {
|
||||
// DeletedObjectExpiration
|
||||
if obj.IsLatest && obj.DeleteMarker && !rule.DeletedObjectExpiration.Empty() {
|
||||
if due, ok := rule.DeletedObjectExpiration.NextDue(obj); ok && (now.IsZero() || now.After(due)) {
|
||||
events = append(events, Event{
|
||||
Action: DelMarkerDeleteAllVersionsAction,
|
||||
RuleID: rule.ID,
|
||||
|
@ -401,7 +401,7 @@ func (lc Lifecycle) eval(obj ObjectOpts, now time.Time) Event {
|
|||
}
|
||||
// No other conflicting actions in this rule can apply to an object with current version as DEL marker
|
||||
// Note: There could be other rules with earlier expiration which need to be considered.
|
||||
// See TestDelMarkerExpiration
|
||||
// See TestDeletedObjectExpiration
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -168,14 +168,14 @@ func TestParseAndValidateLifecycleConfig(t *testing.T) {
|
|||
},
|
||||
// Lifecycle with delmarker expiration
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ID>rule</ID><Status>Enabled</Status><Filter></Filter><DelMarkerExpiration><Days>5</Days></DelMarkerExpiration></Rule></LifecycleConfiguration>`,
|
||||
inputConfig: `<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ID>rule</ID><Status>Enabled</Status><Filter></Filter><DeletedObjectExpiration><Days>5</Days></DeletedObjectExpiration></Rule></LifecycleConfiguration>`,
|
||||
expectedParsingErr: nil,
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
// Lifecycle with empty delmarker expiration
|
||||
{
|
||||
inputConfig: `<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ID>rule</ID><Status>Enabled</Status><Filter></Filter><DelMarkerExpiration><Days></Days></DelMarkerExpiration></Rule></LifecycleConfiguration>`,
|
||||
expectedParsingErr: errInvalidDaysDelMarkerExpiration,
|
||||
inputConfig: `<LifecycleConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"><Rule><ID>rule</ID><Status>Enabled</Status><Filter></Filter><DeletedObjectExpiration><Days></Days></DeletedObjectExpiration></Rule></LifecycleConfiguration>`,
|
||||
expectedParsingErr: errInvalidDaysDeletedObjExpiration,
|
||||
expectedValidationErr: nil,
|
||||
},
|
||||
}
|
||||
|
@ -656,15 +656,15 @@ func TestEval(t *testing.T) {
|
|||
expectedAction: DeleteVersionAction,
|
||||
},
|
||||
{
|
||||
// DelMarkerExpiration is preferred since object age is past both transition and expiration days.
|
||||
// DeletedObjectExpiration is preferred since object age is past both transition and expiration days.
|
||||
inputConfig: `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>DelMarkerExpiration with Transition</ID>
|
||||
<ID>DeletedObjectExpiration with Transition</ID>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<DelMarkerExpiration>
|
||||
<DeletedObjectExpiration>
|
||||
<Days>60</Days>
|
||||
</DelMarkerExpiration>
|
||||
</DeletedObjectExpiration>
|
||||
<Transition>
|
||||
<StorageClass>WARM-1</StorageClass>
|
||||
<Days>30</Days>
|
||||
|
@ -677,16 +677,16 @@ func TestEval(t *testing.T) {
|
|||
expectedAction: DelMarkerDeleteAllVersionsAction,
|
||||
},
|
||||
{
|
||||
// NoneAction since object doesn't qualify for DelMarkerExpiration yet.
|
||||
// NoneAction since object doesn't qualify for DeletedObjectExpiration yet.
|
||||
// Note: TransitionAction doesn't apply to DEL marker
|
||||
inputConfig: `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>DelMarkerExpiration with Transition</ID>
|
||||
<ID>DeletedObjectExpiration with Transition</ID>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<DelMarkerExpiration>
|
||||
<DeletedObjectExpiration>
|
||||
<Days>60</Days>
|
||||
</DelMarkerExpiration>
|
||||
</DeletedObjectExpiration>
|
||||
<Transition>
|
||||
<StorageClass>WARM-1</StorageClass>
|
||||
<Days>30</Days>
|
||||
|
@ -701,12 +701,12 @@ func TestEval(t *testing.T) {
|
|||
{
|
||||
inputConfig: `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>DelMarkerExpiration with non DEL-marker object</ID>
|
||||
<ID>DeletedObjectExpiration with non DEL-marker object</ID>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<DelMarkerExpiration>
|
||||
<DeletedObjectExpiration>
|
||||
<Days>60</Days>
|
||||
</DelMarkerExpiration>
|
||||
</DeletedObjectExpiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`,
|
||||
objectName: "obj-1",
|
||||
|
@ -716,12 +716,12 @@ func TestEval(t *testing.T) {
|
|||
{
|
||||
inputConfig: `<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>DelMarkerExpiration with noncurrent DEL-marker</ID>
|
||||
<ID>DeletedObjectExpiration with noncurrent DEL-marker</ID>
|
||||
<Filter></Filter>
|
||||
<Status>Enabled</Status>
|
||||
<DelMarkerExpiration>
|
||||
<DeletedObjectExpiration>
|
||||
<Days>60</Days>
|
||||
</DelMarkerExpiration>
|
||||
</DeletedObjectExpiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>`,
|
||||
objectName: "obj-1",
|
||||
|
@ -1428,7 +1428,7 @@ func TestFilterRules(t *testing.T) {
|
|||
|
||||
// TestDeleteAllVersions tests ordering among events, especially ones which
|
||||
// expire all versions like ExpiredObjectDeleteAllVersions and
|
||||
// DelMarkerExpiration
|
||||
// DeletedObjectExpiration
|
||||
func TestDeleteAllVersions(t *testing.T) {
|
||||
// ExpiredObjectDeleteAllVersions
|
||||
lc := Lifecycle{
|
||||
|
@ -1471,20 +1471,20 @@ func TestDeleteAllVersions(t *testing.T) {
|
|||
t.Fatalf("Expected due %v but got %v, ruleID=%v", exp, event.Due, event.RuleID)
|
||||
}
|
||||
|
||||
// DelMarkerExpiration
|
||||
// DeletedObjectExpiration
|
||||
lc = Lifecycle{
|
||||
Rules: []Rule{
|
||||
{
|
||||
ID: "delmarker-exp-20",
|
||||
Status: "Enabled",
|
||||
DelMarkerExpiration: DelMarkerExpiration{
|
||||
DeletedObjectExpiration: DeletedObjectExpiration{
|
||||
Days: 20,
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "delmarker-exp-10",
|
||||
Status: "Enabled",
|
||||
DelMarkerExpiration: DelMarkerExpiration{
|
||||
DeletedObjectExpiration: DeletedObjectExpiration{
|
||||
Days: 10,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -33,24 +33,24 @@ const (
|
|||
|
||||
// Rule - a rule for lifecycle configuration.
|
||||
type Rule struct {
|
||||
XMLName xml.Name `xml:"Rule"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
Status Status `xml:"Status"`
|
||||
Filter Filter `xml:"Filter,omitempty"`
|
||||
Prefix Prefix `xml:"Prefix,omitempty"`
|
||||
Expiration Expiration `xml:"Expiration,omitempty"`
|
||||
Transition Transition `xml:"Transition,omitempty"`
|
||||
DelMarkerExpiration DelMarkerExpiration `xml:"DelMarkerExpiration,omitempty"`
|
||||
XMLName xml.Name `xml:"Rule"`
|
||||
ID string `xml:"ID,omitempty"`
|
||||
Status Status `xml:"Status"`
|
||||
Filter Filter `xml:"Filter,omitempty"`
|
||||
Prefix Prefix `xml:"Prefix,omitempty"`
|
||||
Expiration Expiration `xml:"Expiration,omitempty"`
|
||||
Transition Transition `xml:"Transition,omitempty"`
|
||||
DeletedObjectExpiration DeletedObjectExpiration `xml:"DeletedObjectExpiration,omitempty"`
|
||||
// FIXME: add a type to catch unsupported AbortIncompleteMultipartUpload AbortIncompleteMultipartUpload `xml:"AbortIncompleteMultipartUpload,omitempty"`
|
||||
NoncurrentVersionExpiration NoncurrentVersionExpiration `xml:"NoncurrentVersionExpiration,omitempty"`
|
||||
NoncurrentVersionTransition NoncurrentVersionTransition `xml:"NoncurrentVersionTransition,omitempty"`
|
||||
}
|
||||
|
||||
var (
|
||||
errInvalidRuleID = Errorf("ID length is limited to 255 characters")
|
||||
errEmptyRuleStatus = Errorf("Status should not be empty")
|
||||
errInvalidRuleStatus = Errorf("Status must be set to either Enabled or Disabled")
|
||||
errInvalidRuleDelMarkerExpiration = Errorf("Rule with DelMarkerExpiration cannot have tags based filtering")
|
||||
errInvalidRuleID = Errorf("ID length is limited to 255 characters")
|
||||
errEmptyRuleStatus = Errorf("Status should not be empty")
|
||||
errInvalidRuleStatus = Errorf("Status must be set to either Enabled or Disabled")
|
||||
errInvalidRuleDelObjExpiration = Errorf("Rule with DeletedObjectExpiration cannot have tags based filtering")
|
||||
)
|
||||
|
||||
// validateID - checks if ID is valid or not.
|
||||
|
@ -160,10 +160,10 @@ func (r Rule) Validate() error {
|
|||
if err := r.validateNoncurrentTransition(); err != nil {
|
||||
return err
|
||||
}
|
||||
if (!r.Filter.Tag.IsEmpty() || len(r.Filter.And.Tags) != 0) && !r.DelMarkerExpiration.Empty() {
|
||||
return errInvalidRuleDelMarkerExpiration
|
||||
if (!r.Filter.Tag.IsEmpty() || len(r.Filter.And.Tags) != 0) && !r.DeletedObjectExpiration.Empty() {
|
||||
return errInvalidRuleDelObjExpiration
|
||||
}
|
||||
if !r.Expiration.set && !r.Transition.set && !r.NoncurrentVersionExpiration.set && !r.NoncurrentVersionTransition.set && r.DelMarkerExpiration.Empty() {
|
||||
if !r.Expiration.set && !r.Transition.set && !r.NoncurrentVersionExpiration.set && !r.NoncurrentVersionTransition.set && r.DeletedObjectExpiration.Empty() {
|
||||
return errXMLNotWellFormed
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -107,28 +107,28 @@ func TestInvalidRules(t *testing.T) {
|
|||
},
|
||||
{
|
||||
inputXML: `<Rule>
|
||||
<ID>Rule with a tag and DelMarkerExpiration</ID>
|
||||
<ID>Rule with a tag and DeletedObjectExpiration</ID>
|
||||
<Filter><Tag><Key>k1</Key><Value>v1</Value></Tag></Filter>
|
||||
<DelMarkerExpiration>
|
||||
<DeletedObjectExpiration>
|
||||
<Days>365</Days>
|
||||
</DelMarkerExpiration>
|
||||
</DeletedObjectExpiration>
|
||||
<Status>Enabled</Status>
|
||||
</Rule>`,
|
||||
expectedErr: errInvalidRuleDelMarkerExpiration,
|
||||
expectedErr: errInvalidRuleDelObjExpiration,
|
||||
},
|
||||
{
|
||||
inputXML: `<Rule>
|
||||
<ID>Rule with multiple tags and DelMarkerExpiration</ID>
|
||||
<ID>Rule with multiple tags and DeletedObjectExpiration</ID>
|
||||
<Filter><And>
|
||||
<Tag><Key>k1</Key><Value>v1</Value></Tag>
|
||||
<Tag><Key>k2</Key><Value>v2</Value></Tag>
|
||||
</And></Filter>
|
||||
<DelMarkerExpiration>
|
||||
<DeletedObjectExpiration>
|
||||
<Days>365</Days>
|
||||
</DelMarkerExpiration>
|
||||
</DeletedObjectExpiration>
|
||||
<Status>Enabled</Status>
|
||||
</Rule>`,
|
||||
expectedErr: errInvalidRuleDelMarkerExpiration,
|
||||
expectedErr: errInvalidRuleDelObjExpiration,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ const (
|
|||
ObjectManyVersions
|
||||
ObjectLargeVersions
|
||||
PrefixManyFolders
|
||||
ILMDelMarkerExpirationDelete
|
||||
ILMDelObjExpirationDelete
|
||||
|
||||
objectSingleTypesEnd
|
||||
// Start Compound types that require expansion:
|
||||
|
@ -200,8 +200,8 @@ func (name Name) String() string {
|
|||
return "s3:ObjectRemoved:NoOP"
|
||||
case ObjectRemovedDeleteAllVersions:
|
||||
return "s3:ObjectRemoved:DeleteAllVersions"
|
||||
case ILMDelMarkerExpirationDelete:
|
||||
return "s3:LifecycleDelMarkerExpiration:Delete"
|
||||
case ILMDelObjExpirationDelete:
|
||||
return "s3:LifecycleDeletedObjectExpiration:Delete"
|
||||
case ObjectReplicationAll:
|
||||
return "s3:Replication:*"
|
||||
case ObjectReplicationFailed:
|
||||
|
@ -327,8 +327,8 @@ func ParseName(s string) (Name, error) {
|
|||
return ObjectRemovedNoOP, nil
|
||||
case "s3:ObjectRemoved:DeleteAllVersions":
|
||||
return ObjectRemovedDeleteAllVersions, nil
|
||||
case "s3:LifecycleDelMarkerExpiration:Delete":
|
||||
return ILMDelMarkerExpirationDelete, nil
|
||||
case "s3:LifecycleDeletedObjectExpiration:Delete":
|
||||
return ILMDelObjExpirationDelete, nil
|
||||
case "s3:Replication:*":
|
||||
return ObjectReplicationAll, nil
|
||||
case "s3:Replication:OperationFailedReplication":
|
||||
|
|
|
@ -69,7 +69,7 @@ func TestNameString(t *testing.T) {
|
|||
{ObjectRemovedAll, "s3:ObjectRemoved:*"},
|
||||
{ObjectRemovedDelete, "s3:ObjectRemoved:Delete"},
|
||||
{ObjectRemovedDeleteAllVersions, "s3:ObjectRemoved:DeleteAllVersions"},
|
||||
{ILMDelMarkerExpirationDelete, "s3:LifecycleDelMarkerExpiration:Delete"},
|
||||
{ILMDelObjExpirationDelete, "s3:LifecycleDeletedObjectExpiration:Delete"},
|
||||
{ObjectRemovedNoOP, "s3:ObjectRemoved:NoOP"},
|
||||
{ObjectCreatedPutRetention, "s3:ObjectCreated:PutRetention"},
|
||||
{ObjectCreatedPutLegalHold, "s3:ObjectCreated:PutLegalHold"},
|
||||
|
@ -221,7 +221,7 @@ func TestParseName(t *testing.T) {
|
|||
{"s3:ObjectAccessed:*", ObjectAccessedAll, false},
|
||||
{"s3:ObjectRemoved:Delete", ObjectRemovedDelete, false},
|
||||
{"s3:ObjectRemoved:NoOP", ObjectRemovedNoOP, false},
|
||||
{"s3:LifecycleDelMarkerExpiration:Delete", ILMDelMarkerExpirationDelete, false},
|
||||
{"s3:LifecycleDeletedObjectExpiration:Delete", ILMDelObjExpirationDelete, false},
|
||||
{"", blankName, true},
|
||||
}
|
||||
|
||||
|
|
|
@ -90,13 +90,14 @@ type Config struct {
|
|||
// buffer is full, new logs are just ignored and an error
|
||||
// is returned to the caller.
|
||||
type Target struct {
|
||||
totalMessages int64
|
||||
failedMessages int64
|
||||
status int32
|
||||
totalMessages atomic.Int64
|
||||
failedMessages atomic.Int64
|
||||
status atomic.Int32
|
||||
|
||||
// Worker control
|
||||
workers int64
|
||||
workers atomic.Int64
|
||||
maxWorkers int64
|
||||
|
||||
// workerStartMu sync.Mutex
|
||||
lastStarted time.Time
|
||||
|
||||
|
@ -157,7 +158,7 @@ func (h *Target) String() string {
|
|||
|
||||
// IsOnline returns true if the target is reachable using a cached value
|
||||
func (h *Target) IsOnline(ctx context.Context) bool {
|
||||
return atomic.LoadInt32(&h.status) == statusOnline
|
||||
return h.status.Load() == statusOnline
|
||||
}
|
||||
|
||||
// Stats returns the target statistics.
|
||||
|
@ -166,8 +167,8 @@ func (h *Target) Stats() types.TargetStats {
|
|||
queueLength := len(h.logCh)
|
||||
h.logChMu.RUnlock()
|
||||
stats := types.TargetStats{
|
||||
TotalMessages: atomic.LoadInt64(&h.totalMessages),
|
||||
FailedMessages: atomic.LoadInt64(&h.failedMessages),
|
||||
TotalMessages: h.totalMessages.Load(),
|
||||
FailedMessages: h.failedMessages.Load(),
|
||||
QueueLength: queueLength,
|
||||
}
|
||||
|
||||
|
@ -221,9 +222,9 @@ func (h *Target) initMemoryStore(ctx context.Context) (err error) {
|
|||
func (h *Target) send(ctx context.Context, payload []byte, payloadType string, timeout time.Duration) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
atomic.StoreInt32(&h.status, statusOffline)
|
||||
h.status.Store(statusOffline)
|
||||
} else {
|
||||
atomic.StoreInt32(&h.status, statusOnline)
|
||||
h.status.Store(statusOnline)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -275,8 +276,8 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) {
|
|||
}
|
||||
h.logChMu.RUnlock()
|
||||
|
||||
atomic.AddInt64(&h.workers, 1)
|
||||
defer atomic.AddInt64(&h.workers, -1)
|
||||
h.workers.Add(1)
|
||||
defer h.workers.Add(-1)
|
||||
|
||||
h.wg.Add(1)
|
||||
defer h.wg.Done()
|
||||
|
@ -353,7 +354,7 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) {
|
|||
}
|
||||
|
||||
if !isTick {
|
||||
atomic.AddInt64(&h.totalMessages, 1)
|
||||
h.totalMessages.Add(1)
|
||||
|
||||
if !isDirQueue {
|
||||
if err := enc.Encode(&entry); err != nil {
|
||||
|
@ -362,7 +363,7 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) {
|
|||
fmt.Errorf("unable to encode webhook log entry, err '%w' entry: %v\n", err, entry),
|
||||
h.Name(),
|
||||
)
|
||||
atomic.AddInt64(&h.failedMessages, 1)
|
||||
h.failedMessages.Add(1)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -395,7 +396,7 @@ func (h *Target) startQueueProcessor(ctx context.Context, mainWorker bool) {
|
|||
// and when it's been at least 30 seconds since
|
||||
// we launched a new worker.
|
||||
if mainWorker && len(h.logCh) > cap(h.logCh)/2 {
|
||||
nWorkers := atomic.LoadInt64(&h.workers)
|
||||
nWorkers := h.workers.Load()
|
||||
if nWorkers < h.maxWorkers {
|
||||
if time.Since(h.lastStarted).Milliseconds() > 10 {
|
||||
h.lastStarted = time.Now()
|
||||
|
@ -493,10 +494,10 @@ func New(config Config) (*Target, error) {
|
|||
h := &Target{
|
||||
logCh: make(chan interface{}, config.QueueSize),
|
||||
config: config,
|
||||
status: statusOffline,
|
||||
batchSize: config.BatchSize,
|
||||
maxWorkers: int64(maxWorkers),
|
||||
}
|
||||
h.status.Store(statusOffline)
|
||||
|
||||
if config.BatchSize > 1 {
|
||||
h.payloadType = ""
|
||||
|
@ -528,10 +529,17 @@ func (h *Target) SendFromStore(key store.Key) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
h.failedMessages.Add(1)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
h.failedMessages.Add(-1)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := h.send(context.Background(), eventData, h.payloadType, webhookCallTimeout); err != nil {
|
||||
atomic.AddInt64(&h.failedMessages, 1)
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete the event from store.
|
||||
return h.store.Del(key.Name)
|
||||
}
|
||||
|
@ -540,7 +548,7 @@ func (h *Target) SendFromStore(key store.Key) (err error) {
|
|||
// Messages are queued in the disk if the store is enabled
|
||||
// If Cancel has been called the message is ignored.
|
||||
func (h *Target) Send(ctx context.Context, entry interface{}) error {
|
||||
if atomic.LoadInt32(&h.status) == statusClosed {
|
||||
if h.status.Load() == statusClosed {
|
||||
if h.migrateTarget != nil {
|
||||
return h.migrateTarget.Send(ctx, entry)
|
||||
}
|
||||
|
@ -557,7 +565,7 @@ func (h *Target) Send(ctx context.Context, entry interface{}) error {
|
|||
retry:
|
||||
select {
|
||||
case h.logCh <- entry:
|
||||
atomic.AddInt64(&h.totalMessages, 1)
|
||||
h.totalMessages.Add(1)
|
||||
case <-ctx.Done():
|
||||
// return error only for context timedout.
|
||||
if errors.Is(ctx.Err(), context.DeadlineExceeded) {
|
||||
|
@ -565,11 +573,14 @@ retry:
|
|||
}
|
||||
return nil
|
||||
default:
|
||||
if h.workers < h.maxWorkers {
|
||||
nWorkers := h.workers.Load()
|
||||
if nWorkers < h.maxWorkers {
|
||||
// Just sleep to avoid any possible hot-loops.
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
goto retry
|
||||
}
|
||||
atomic.AddInt64(&h.totalMessages, 1)
|
||||
atomic.AddInt64(&h.failedMessages, 1)
|
||||
h.totalMessages.Add(1)
|
||||
h.failedMessages.Add(1)
|
||||
return errors.New("log buffer full")
|
||||
}
|
||||
|
||||
|
@ -580,7 +591,7 @@ retry:
|
|||
// All queued messages are flushed and the function returns afterwards.
|
||||
// All messages sent to the target after this function has been called will be dropped.
|
||||
func (h *Target) Cancel() {
|
||||
atomic.StoreInt32(&h.status, statusClosed)
|
||||
h.status.Store(statusClosed)
|
||||
h.storeCtxCancel()
|
||||
|
||||
// Wait for messages to be sent...
|
||||
|
|
Loading…
Reference in New Issue
Block a user