diff --git a/cmd/admin-handlers_test.go b/cmd/admin-handlers_test.go index 9f57aec97..787529bd7 100644 --- a/cmd/admin-handlers_test.go +++ b/cmd/admin-handlers_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -176,6 +176,11 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) { // Init global heal state initAllHealState(globalIsXL) + globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints) + if err != nil { + return nil, err + } + // Setup admin mgmt REST API handlers. adminRouter := router.NewRouter() registerAdminRouter(adminRouter) diff --git a/cmd/admin-rpc-client.go b/cmd/admin-rpc-client.go index ef8b1b831..b6c68b525 100644 --- a/cmd/admin-rpc-client.go +++ b/cmd/admin-rpc-client.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2014, 2015, 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2014, 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -139,11 +139,6 @@ func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) { } storage := objLayer.StorageInfo() - var arns []string - for queueArn := range globalEventNotifier.GetAllExternalTargets() { - arns = append(arns, queueArn) - } - return ServerInfoData{ StorageInfo: storage, ConnStats: globalConnStats.toServerConnStats(), @@ -152,7 +147,7 @@ func (lc localAdminClient) ServerInfoData() (sid ServerInfoData, e error) { Uptime: UTCNow().Sub(globalBootTime), Version: Version, CommitID: CommitID, - SQSARN: arns, + SQSARN: globalNotificationSys.GetARNList(), Region: globalServerConfig.GetRegion(), }, }, nil diff --git a/cmd/admin-rpc-server.go b/cmd/admin-rpc-server.go index 2c1e2b5b9..d8b238aae 100644 --- a/cmd/admin-rpc-server.go +++ b/cmd/admin-rpc-server.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -120,18 +120,13 @@ func (s *adminCmd) ServerInfoData(args *AuthRPCArgs, reply *ServerInfoDataReply) } storageInfo := objLayer.StorageInfo() - var arns []string - for queueArn := range globalEventNotifier.GetAllExternalTargets() { - arns = append(arns, queueArn) - } - reply.ServerInfoData = ServerInfoData{ Properties: ServerProperties{ Uptime: UTCNow().Sub(globalBootTime), Version: Version, CommitID: CommitID, Region: globalServerConfig.GetRegion(), - SQSARN: arns, + SQSARN: globalNotificationSys.GetARNList(), }, StorageInfo: storageInfo, ConnStats: globalConnStats.toServerConnStats(), diff --git a/cmd/api-errors.go b/cmd/api-errors.go index bf598f4f6..a8ff1953c 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -23,6 +23,7 @@ import ( "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" ) @@ -950,6 +951,28 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { apiErr = ErrPartsSizeUnequal case BucketPolicyNotFound: apiErr = ErrNoSuchBucketPolicy + case *event.ErrInvalidEventName: + apiErr = ErrEventNotification + case *event.ErrInvalidARN: + apiErr = ErrARNNotification + case *event.ErrARNNotFound: + apiErr = ErrARNNotification + case *event.ErrUnknownRegion: + apiErr = ErrRegionNotification + case *event.ErrInvalidFilterName: + apiErr = ErrFilterNameInvalid + case *event.ErrFilterNamePrefix: + apiErr = ErrFilterNamePrefix + case *event.ErrFilterNameSuffix: + apiErr = ErrFilterNameSuffix + case *event.ErrInvalidFilterValue: + apiErr = ErrFilterValueInvalid + case *event.ErrDuplicateEventName: + apiErr = ErrOverlappingConfigs + case *event.ErrDuplicateQueueConfiguration: + apiErr = ErrOverlappingFilterNotification + case *event.ErrUnsupportedConfiguration: + apiErr = ErrUnsupportedNotification default: apiErr = ErrInternalError } diff --git a/cmd/api-resources.go b/cmd/api-resources.go index aa5dfa183..fce7a517c 100644 --- a/cmd/api-resources.go +++ b/cmd/api-resources.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -79,14 +79,6 @@ func getObjectResources(values url.Values) (uploadID string, partNumberMarker, m return } -// Parse listen bucket notification resources. -func getListenBucketNotificationResources(values url.Values) (prefix []string, suffix []string, events []string) { - prefix = values["prefix"] - suffix = values["suffix"] - events = values["events"] - return prefix, suffix, events -} - // Validates filter values func validateFilterValues(values []string) (err APIErrorCode) { for _, value := range values { diff --git a/cmd/api-response.go b/cmd/api-response.go index 95c01982a..5ae58e5ad 100644 --- a/cmd/api-response.go +++ b/cmd/api-response.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -27,7 +27,6 @@ import ( ) const ( - timeFormatAMZ = "2006-01-02T15:04:05Z" // Reply date format timeFormatAMZLong = "2006-01-02T15:04:05.000Z" // Reply date format with nanosecond precision. maxObjectList = 1000 // Limit number of objects in a listObjectsResponse. maxUploadsList = 1000 // Limit number of uploads in a listUploadsResponse. diff --git a/cmd/browser-peer-rpc.go b/cmd/browser-peer-rpc.go index 1a3f7d363..febf59028 100644 --- a/cmd/browser-peer-rpc.go +++ b/cmd/browser-peer-rpc.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -71,11 +71,7 @@ func (br *browserPeerAPIHandlers) SetAuthPeer(args SetAuthPeerArgs, reply *AuthR // Sends SetAuthPeer RPCs to all peers in the Minio cluster func updateCredsOnPeers(creds auth.Credentials) map[string]error { - // Get list of peer addresses (from globalS3Peers) - peers := []string{} - for _, p := range globalS3Peers { - peers = append(peers, p.addr) - } + peers := GetRemotePeers(globalEndpoints) // Array of errors for each peer errs := make([]error, len(peers)) diff --git a/cmd/bucket-handlers.go b/cmd/bucket-handlers.go index 1022d9979..f38ca5276 100644 --- a/cmd/bucket-handlers.go +++ b/cmd/bucket-handlers.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ import ( "github.com/minio/minio-go/pkg/policy" "github.com/minio/minio-go/pkg/set" "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" ) @@ -356,10 +357,10 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter, // Notify deleted event for objects. for _, dobj := range deletedObjects { - eventNotify(eventData{ - Type: ObjectRemovedDelete, - Bucket: bucket, - ObjInfo: ObjectInfo{ + sendEvent(eventArgs{ + EventName: event.ObjectRemovedDelete, + BucketName: bucket, + Object: ObjectInfo{ Name: dobj.ObjectName, }, ReqParams: extractReqParams(r), @@ -606,14 +607,14 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h } // Notify object created event. - defer eventNotify(eventData{ - Type: ObjectCreatedPost, - Bucket: objInfo.Bucket, - ObjInfo: objInfo, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: host, - Port: port, + defer sendEvent(eventArgs{ + EventName: event.ObjectCreatedPost, + BucketName: objInfo.Bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, }) if successRedirect != "" { @@ -692,6 +693,16 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http. return } + // Notify all peers (including self) to update in-memory state + for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { + errorIf(err, "unable to update policy change in remote peer %v", addr) + } + + globalNotificationSys.RemoveNotification(bucket) + for addr, err := range globalNotificationSys.DeleteBucket(bucket) { + errorIf(err, "unable to delete bucket in remote peer %v", addr) + } + // Write success response. writeSuccessNoContent(w) } diff --git a/cmd/bucket-handlers_test.go b/cmd/bucket-handlers_test.go index c1be89bc8..586b1c586 100644 --- a/cmd/bucket-handlers_test.go +++ b/cmd/bucket-handlers_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -617,11 +617,6 @@ func testAPIDeleteMultipleObjectsHandler(obj ObjectLayer, instanceType, bucketNa credentials auth.Credentials, t *testing.T) { var err error - // register event notifier. - err = initEventNotifier(obj) - if err != nil { - t.Fatal("Notifier initialization failed.") - } contentBytes := []byte("hello") sha256sum := "" diff --git a/cmd/bucket-metadata.go b/cmd/bucket-metadata.go deleted file mode 100644 index 452d473de..000000000 --- a/cmd/bucket-metadata.go +++ /dev/null @@ -1,126 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -// BucketMetaState - Interface to update bucket metadata in-memory -// state. -type BucketMetaState interface { - // Updates bucket notification - UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error - - // Updates bucket listener - UpdateBucketListener(args *SetBucketListenerPeerArgs) error - - // Updates bucket policy - UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error - - // Sends event - SendEvent(args *EventArgs) error -} - -// BucketUpdater - Interface implementer calls one of BucketMetaState's methods. -type BucketUpdater interface { - BucketUpdate(client BucketMetaState) error -} - -// Type that implements BucketMetaState for local node. -type localBucketMetaState struct { - ObjectAPI func() ObjectLayer -} - -// localBucketMetaState.UpdateBucketNotification - updates in-memory global bucket -// notification info. -func (lc *localBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error { - // check if object layer is available. - objAPI := lc.ObjectAPI() - if objAPI == nil { - return errServerNotInitialized - } - - globalEventNotifier.SetBucketNotificationConfig(args.Bucket, args.NCfg) - - return nil -} - -// localBucketMetaState.UpdateBucketListener - updates in-memory global bucket -// listeners info. -func (lc *localBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error { - // check if object layer is available. - objAPI := lc.ObjectAPI() - if objAPI == nil { - return errServerNotInitialized - } - - // Update in-memory notification config. - return globalEventNotifier.SetBucketListenerConfig(args.Bucket, args.LCfg) -} - -// localBucketMetaState.UpdateBucketPolicy - updates in-memory global bucket -// policy info. -func (lc *localBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error { - // check if object layer is available. - objAPI := lc.ObjectAPI() - if objAPI == nil { - return errServerNotInitialized - } - return objAPI.RefreshBucketPolicy(args.Bucket) -} - -// localBucketMetaState.SendEvent - sends event to local event notifier via -// `globalEventNotifier` -func (lc *localBucketMetaState) SendEvent(args *EventArgs) error { - // check if object layer is available. - objAPI := lc.ObjectAPI() - if objAPI == nil { - return errServerNotInitialized - } - - return globalEventNotifier.SendListenerEvent(args.Arn, args.Event) -} - -// Type that implements BucketMetaState for remote node. -type remoteBucketMetaState struct { - *AuthRPCClient -} - -// remoteBucketMetaState.UpdateBucketNotification - sends bucket notification -// change to remote peer via RPC call. -func (rc *remoteBucketMetaState) UpdateBucketNotification(args *SetBucketNotificationPeerArgs) error { - reply := AuthRPCReply{} - return rc.Call("S3.SetBucketNotificationPeer", args, &reply) -} - -// remoteBucketMetaState.UpdateBucketListener - sends bucket listener change to -// remote peer via RPC call. -func (rc *remoteBucketMetaState) UpdateBucketListener(args *SetBucketListenerPeerArgs) error { - reply := AuthRPCReply{} - return rc.Call("S3.SetBucketListenerPeer", args, &reply) -} - -// remoteBucketMetaState.UpdateBucketPolicy - sends bucket policy change to remote -// peer via RPC call. -func (rc *remoteBucketMetaState) UpdateBucketPolicy(args *SetBucketPolicyPeerArgs) error { - reply := AuthRPCReply{} - return rc.Call("S3.SetBucketPolicyPeer", args, &reply) -} - -// remoteBucketMetaState.SendEvent - sends event for bucket listener to remote -// peer via RPC call. -func (rc *remoteBucketMetaState) SendEvent(args *EventArgs) error { - reply := AuthRPCReply{} - return rc.Call("S3.Event", args, &reply) -} diff --git a/cmd/bucket-notification-datatypes.go b/cmd/bucket-notification-datatypes.go deleted file mode 100644 index 4529bc437..000000000 --- a/cmd/bucket-notification-datatypes.go +++ /dev/null @@ -1,213 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "encoding/xml" - "errors" -) - -// Represents the criteria for the filter rule. -type filterRule struct { - Name string `xml:"Name"` - Value string `xml:"Value"` -} - -// Collection of filter rules per service config. -type keyFilter struct { - FilterRules []filterRule `xml:"FilterRule,omitempty"` -} - -type filterStruct struct { - Key keyFilter `xml:"S3Key,omitempty" json:"S3Key,omitempty"` -} - -// ServiceConfig - Common elements of service notification. -type ServiceConfig struct { - Events []string `xml:"Event" json:"Event"` - Filter filterStruct `xml:"Filter" json:"Filter"` - ID string `xml:"Id" json:"Id"` -} - -// Queue SQS configuration. -type queueConfig struct { - ServiceConfig - QueueARN string `xml:"Queue"` -} - -// Topic SNS configuration, this is a compliance field not used by minio yet. -type topicConfig struct { - ServiceConfig - TopicARN string `xml:"Topic" json:"Topic"` -} - -// Lambda function configuration, this is a compliance field not used by minio yet. -type lambdaConfig struct { - ServiceConfig - LambdaARN string `xml:"CloudFunction"` -} - -// Notification configuration structure represents the XML format of -// notification configuration of buckets. -type notificationConfig struct { - XMLName xml.Name `xml:"NotificationConfiguration"` - QueueConfigs []queueConfig `xml:"QueueConfiguration"` - LambdaConfigs []lambdaConfig `xml:"CloudFunctionConfiguration"` - TopicConfigs []topicConfig `xml:"TopicConfiguration"` -} - -// listenerConfig structure represents run-time notification -// configuration for live listeners -type listenerConfig struct { - TopicConfig topicConfig `json:"TopicConfiguration"` - TargetServer string `json:"TargetServer"` -} - -// Internal error used to signal notifications not set. -var errNoSuchNotifications = errors.New("The specified bucket does not have bucket notifications") - -// EventName is AWS S3 event type: -// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html -type EventName int - -const ( - // ObjectCreatedPut is s3:ObjectCreated:Put - ObjectCreatedPut EventName = iota - // ObjectCreatedPost is s3:ObjectCreated:Post - ObjectCreatedPost - // ObjectCreatedCopy is s3:ObjectCreated:Copy - ObjectCreatedCopy - // ObjectCreatedCompleteMultipartUpload is s3:ObjectCreated:CompleteMultipartUpload - ObjectCreatedCompleteMultipartUpload - // ObjectRemovedDelete is s3:ObjectRemoved:Delete - ObjectRemovedDelete - // ObjectAccessedGet is s3:ObjectAccessed:Get - ObjectAccessedGet - // ObjectAccessedHead is s3:ObjectAccessed:Head - ObjectAccessedHead -) - -// Stringer interface for event name. -func (eventName EventName) String() string { - switch eventName { - case ObjectCreatedPut: - return "s3:ObjectCreated:Put" - case ObjectCreatedPost: - return "s3:ObjectCreated:Post" - case ObjectCreatedCopy: - return "s3:ObjectCreated:Copy" - case ObjectCreatedCompleteMultipartUpload: - return "s3:ObjectCreated:CompleteMultipartUpload" - case ObjectRemovedDelete: - return "s3:ObjectRemoved:Delete" - case ObjectAccessedGet: - return "s3:ObjectAccessed:Get" - case ObjectAccessedHead: - return "s3:ObjectAccessed:Head" - default: - return "s3:Unknown" - } -} - -// Indentity represents the accessKey who caused the event. -type identity struct { - PrincipalID string `json:"principalId"` -} - -// Notification event bucket metadata. -type bucketMeta struct { - Name string `json:"name"` - OwnerIdentity identity `json:"ownerIdentity"` - ARN string `json:"arn"` -} - -// Notification event object metadata. -type objectMeta struct { - Key string `json:"key"` - Size int64 `json:"size,omitempty"` - ETag string `json:"eTag,omitempty"` - ContentType string `json:"contentType,omitempty"` - UserMetadata map[string]string `json:"userMetadata,omitempty"` - VersionID string `json:"versionId,omitempty"` - Sequencer string `json:"sequencer"` -} - -const ( - // Event schema version number defaulting to the value in S3 spec. - // ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html - eventSchemaVersion = "1.0" - - // Default ID found in bucket notification configuration. - // ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html - eventConfigID = "Config" -) - -const ( - // Response element origin endpoint key. - responseOriginEndpointKey = "x-minio-origin-endpoint" -) - -// Notification event server specific metadata. -type eventMeta struct { - SchemaVersion string `json:"s3SchemaVersion"` - ConfigurationID string `json:"configurationId"` - Bucket bucketMeta `json:"bucket"` - Object objectMeta `json:"object"` -} - -const ( - // Event source static value defaulting to the value in S3 spec. - // ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html - eventSource = "aws:s3" - - // Event version number defaulting to the value in S3 spec. - // ref: http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html - eventVersion = "2.0" -) - -// sourceInfo represents information on the client that triggered the -// event notification. -type sourceInfo struct { - Host string `json:"host"` - Port string `json:"port"` - UserAgent string `json:"userAgent"` -} - -// NotificationEvent represents an Amazon an S3 bucket notification event. -type NotificationEvent struct { - EventVersion string `json:"eventVersion"` - EventSource string `json:"eventSource"` - AwsRegion string `json:"awsRegion"` - EventTime string `json:"eventTime"` - EventName string `json:"eventName"` - UserIdentity identity `json:"userIdentity"` - RequestParameters map[string]string `json:"requestParameters"` - ResponseElements map[string]string `json:"responseElements"` - S3 eventMeta `json:"s3"` - Source sourceInfo `json:"source"` -} - -// Represents the minio sqs type and account id's. -type arnSQS struct { - Type string - AccountID string -} - -// Stringer for constructing AWS ARN compatible string. -func (m arnSQS) String() string { - return minioSqs + globalServerConfig.GetRegion() + ":" + m.AccountID + ":" + m.Type -} diff --git a/cmd/bucket-notification-handlers.go b/cmd/bucket-notification-handlers.go index a0040875b..13dc0f77f 100644 --- a/cmd/bucket-notification-handlers.go +++ b/cmd/bucket-notification-handlers.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -17,18 +17,16 @@ package cmd import ( - "bytes" - "encoding/json" "encoding/xml" - "fmt" + "errors" "io" - "net" "net/http" - "syscall" - "time" "github.com/gorilla/mux" - "github.com/minio/minio/pkg/errors" + xerrors "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/event" + "github.com/minio/minio/pkg/event/target" + xnet "github.com/minio/minio/pkg/net" ) const ( @@ -37,11 +35,11 @@ const ( bucketListenerConfig = "listener.json" ) -// GetBucketNotificationHandler - This implementation of the GET -// operation uses the notification subresource to return the -// notification configuration of a bucket. If notifications are -// not enabled on the bucket, the operation returns an empty -// NotificationConfiguration element. +var errNoSuchNotifications = errors.New("The specified bucket does not have bucket notifications") + +// GetBucketNotificationHandler - This HTTP handler returns event notification configuration +// as per http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html. +// It returns empty configuration if its not set. func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { objAPI := api.ObjectAPI() @@ -60,46 +58,40 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, } vars := mux.Vars(r) - bucket := vars["bucket"] + bucketName := vars["bucket"] - _, err := objAPI.GetBucketInfo(bucket) + _, err := objAPI.GetBucketInfo(bucketName) if err != nil { + errorIf(err, "Unable to find bucket info.") writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } // Attempt to successfully load notification config. - nConfig, err := loadNotificationConfig(bucket, objAPI) - if err != nil && errors.Cause(err) != errNoSuchNotifications { - errorIf(err, "Unable to read notification configuration.") - writeErrorResponse(w, toAPIErrorCode(err), r.URL) - return - } - // For no notifications we write a dummy XML. - if errors.Cause(err) == errNoSuchNotifications { - // Complies with the s3 behavior in this regard. - nConfig = ¬ificationConfig{} + nConfig, err := readNotificationConfig(objAPI, bucketName) + if err != nil { + // Ignore errNoSuchNotifications to comply with AWS S3. + if xerrors.Cause(err) != errNoSuchNotifications { + errorIf(err, "Unable to read notification configuration.") + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + nConfig = &event.Config{} } + notificationBytes, err := xml.Marshal(nConfig) if err != nil { - // For any marshalling failure. errorIf(err, "Unable to marshal notification configuration into XML.", err) writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } - // Success. writeSuccessResponseXML(w, notificationBytes) } -// PutBucketNotificationHandler - Minio notification feature enables -// you to receive notifications when certain events happen in your bucket. -// Using this API, you can replace an existing notification configuration. -// The configuration is an XML file that defines the event types that you -// want Minio to publish and the destination where you want Minio to publish -// an event notification when it detects an event of the specified type. -// By default, your bucket has no event notifications configured. That is, -// the notification configuration will be an empty NotificationConfiguration. +// PutBucketNotificationHandler - This HTTP handler stores given notification configuration as per +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html. func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { objectAPI := api.ObjectAPI() @@ -118,185 +110,56 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, } vars := mux.Vars(r) - bucket := vars["bucket"] + bucketName := vars["bucket"] - _, err := objectAPI.GetBucketInfo(bucket) + _, err := objectAPI.GetBucketInfo(bucketName) if err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } - // If Content-Length is unknown or zero, deny the request. // PutBucketNotification always needs a Content-Length. - if r.ContentLength == -1 || r.ContentLength == 0 { + if r.ContentLength <= 0 { writeErrorResponse(w, ErrMissingContentLength, r.URL) return } - // Reads the incoming notification configuration. - var buffer bytes.Buffer - if r.ContentLength >= 0 { - _, err = io.CopyN(&buffer, r.Body, r.ContentLength) - } else { - _, err = io.Copy(&buffer, r.Body) - } + var config *event.Config + config, err = event.ParseConfig(io.LimitReader(r.Body, r.ContentLength), globalServerConfig.GetRegion(), globalNotificationSys.targetList) if err != nil { - errorIf(err, "Unable to read incoming body.") + apiErr := ErrMalformedXML + if event.IsEventError(err) { + apiErr = toAPIErrorCode(err) + } + + writeErrorResponse(w, apiErr, r.URL) + return + } + + // Acquire a write lock on bucket before modifying its configuration. + bucketLock := globalNSMutex.NewNSLock(bucketName, "") + if err = bucketLock.GetLock(globalOperationTimeout); err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + defer bucketLock.Unlock() + + if err = saveNotificationConfig(objectAPI, bucketName, config); err != nil { writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } - var notificationCfg notificationConfig - // Unmarshal notification bytes. - notificationConfigBytes := buffer.Bytes() - if err = xml.Unmarshal(notificationConfigBytes, ¬ificationCfg); err != nil { - errorIf(err, "Unable to parse notification configuration XML.") - writeErrorResponse(w, ErrMalformedXML, r.URL) - return - } // Successfully marshalled notification configuration. - - // Validate unmarshalled bucket notification configuration. - if s3Error := validateNotificationConfig(notificationCfg); s3Error != ErrNone { - writeErrorResponse(w, s3Error, r.URL) - return + rulesMap := config.ToRulesMap() + globalNotificationSys.AddRulesMap(bucketName, rulesMap) + for addr, err := range globalNotificationSys.PutBucketNotification(bucketName, rulesMap) { + errorIf(err, "unable to put bucket notification to remote peer %v", addr) } - // Convert the incoming ARNs properly to the GetRegion(). - for i, queueConfig := range notificationCfg.QueueConfigs { - queueConfig.QueueARN = unmarshalSqsARN(queueConfig.QueueARN).String() - notificationCfg.QueueConfigs[i] = queueConfig - } - - // Put bucket notification config. - err = PutBucketNotificationConfig(bucket, ¬ificationCfg, objectAPI) - if err != nil { - writeErrorResponse(w, toAPIErrorCode(err), r.URL) - return - } - - // Success. writeSuccessResponseHeadersOnly(w) } -// PutBucketNotificationConfig - Put a new notification config for a -// bucket (overwrites any previous config) persistently, updates -// global in-memory state, and notify other nodes in the cluster (if -// any) -func PutBucketNotificationConfig(bucket string, ncfg *notificationConfig, objAPI ObjectLayer) error { - if ncfg == nil { - return errInvalidArgument - } - - // Acquire a write lock on bucket before modifying its - // configuration. - bucketLock := globalNSMutex.NewNSLock(bucket, "") - if err := bucketLock.GetLock(globalOperationTimeout); err != nil { - return err - } - // Release lock after notifying peers - defer bucketLock.Unlock() - - // persist config to disk - err := persistNotificationConfig(bucket, ncfg, objAPI) - if err != nil { - return fmt.Errorf("Unable to persist Bucket notification config to object layer - config=%v errMsg=%v", *ncfg, err) - } - - // All servers (including local) are told to update in-memory config - S3PeersUpdateBucketNotification(bucket, ncfg) - - return nil -} - -// writeNotification marshals notification message before writing to client. -func writeNotification(w http.ResponseWriter, notification map[string][]NotificationEvent) error { - // Invalid response writer. - if w == nil { - return errInvalidArgument - } - // Invalid notification input. - if notification == nil { - return errInvalidArgument - } - // Marshal notification data into JSON and write to client. - notificationBytes, err := json.Marshal(¬ification) - if err != nil { - return err - } - - // Add additional CRLF characters for client to - // differentiate the individual events properly. - _, err = w.Write(append(notificationBytes, crlf...)) - // Make sure we have flushed, this would set Transfer-Encoding: chunked. - w.(http.Flusher).Flush() - return err -} - -// CRLF character used for chunked transfer in accordance with HTTP standards. -var crlf = []byte("\r\n") - -// listenChan A `listenChan` provides a data channel to send event -// notifications on and `doneCh` to signal that events are no longer -// being received. It also sends empty events (whitespace) to keep the -// underlying connection alive. -type listenChan struct { - doneCh chan struct{} - dataCh chan []NotificationEvent -} - -// newListenChan returns a listenChan with properly initialized -// unbuffered channels. -func newListenChan() *listenChan { - return &listenChan{ - doneCh: make(chan struct{}), - dataCh: make(chan []NotificationEvent), - } -} - -// sendNotificationEvent sends notification events on the data channel -// unless doneCh is not closed -func (l *listenChan) sendNotificationEvent(events []NotificationEvent) { - select { - // Returns immediately if receiver has quit. - case <-l.doneCh: - // Blocks until receiver is available. - case l.dataCh <- events: - } -} - -// waitForListener writes event notification OR whitespaces on -// ResponseWriter until client closes connection -func (l *listenChan) waitForListener(w http.ResponseWriter) { - - // Logs errors other than EPIPE and ECONNRESET. - // EPIPE and ECONNRESET indicate that the client stopped - // listening to notification events. - logClientError := func(err error, msg string) { - if oe, ok := err.(*net.OpError); ok && (oe.Err == syscall.EPIPE || oe.Err == - syscall.ECONNRESET) { - errorIf(err, msg) - } - } - - emptyEvent := map[string][]NotificationEvent{"Records": nil} - defer close(l.doneCh) - for { - select { - case events := <-l.dataCh: - if err := writeNotification(w, map[string][]NotificationEvent{"Records": events}); err != nil { - logClientError(err, "Unable to write notification") - return - } - case <-time.After(globalSNSConnAlive): - if err := writeNotification(w, emptyEvent); err != nil { - logClientError(err, "Unable to write empty notification") - return - } - } - } -} - -// ListenBucketNotificationHandler - list bucket notifications. +// ListenBucketNotificationHandler - This HTTP handler sends events to the connected HTTP client. +// Client should send prefix/suffix object name to match and events to watch as query parameters. func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWriter, r *http.Request) { // Validate if bucket exists. objAPI := api.ObjectAPI() @@ -314,195 +177,84 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit } vars := mux.Vars(r) - bucket := vars["bucket"] + bucketName := vars["bucket"] - // Parse listen bucket notification resources. - prefixes, suffixes, events := getListenBucketNotificationResources(r.URL.Query()) + values := r.URL.Query() - if err := validateFilterValues(prefixes); err != ErrNone { - writeErrorResponse(w, err, r.URL) - return + var prefix string + if len(values["prefix"]) > 1 { + writeErrorResponse(w, ErrFilterNamePrefix, r.URL) } - - if err := validateFilterValues(suffixes); err != ErrNone { - writeErrorResponse(w, err, r.URL) - return - } - - // Validate all the resource events. - for _, event := range events { - if errCode := checkEvent(event); errCode != ErrNone { - writeErrorResponse(w, errCode, r.URL) + if len(values["prefix"]) == 1 { + if err := event.ValidateFilterRuleValue(values["prefix"][0]); err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } + + prefix = values["prefix"][0] } - _, err := objAPI.GetBucketInfo(bucket) - if err != nil { - writeErrorResponse(w, toAPIErrorCode(err), r.URL) - return + var suffix string + if len(values["suffix"]) > 1 { + writeErrorResponse(w, ErrFilterNameSuffix, r.URL) } - - targetServer := GetLocalPeer(globalEndpoints) - accountID := fmt.Sprintf("%d", UTCNow().UnixNano()) - accountARN := fmt.Sprintf( - "%s:%s:%s:%s-%s", - minioTopic, - globalServerConfig.GetRegion(), - accountID, - snsTypeMinio, - targetServer, - ) - - var filterRules []filterRule - - for _, prefix := range prefixes { - filterRules = append(filterRules, filterRule{ - Name: "prefix", - Value: prefix, - }) - } - - for _, suffix := range suffixes { - filterRules = append(filterRules, filterRule{ - Name: "suffix", - Value: suffix, - }) - } - - // Make topic configuration corresponding to this - // ListenBucketNotification request. - topicCfg := &topicConfig{ - TopicARN: accountARN, - ServiceConfig: ServiceConfig{ - Events: events, - Filter: struct { - Key keyFilter `xml:"S3Key,omitempty" json:"S3Key,omitempty"` - }{ - Key: keyFilter{ - FilterRules: filterRules, - }, - }, - ID: "sns-" + accountID, - }, - } - - // Setup a listen channel to receive notifications like - // s3:ObjectCreated, s3:ObjectDeleted etc. - nListenCh := newListenChan() - // Add channel for listener events - if err = globalEventNotifier.AddListenerChan(accountARN, nListenCh); err != nil { - errorIf(err, "Error adding a listener!") - writeErrorResponse(w, toAPIErrorCode(err), r.URL) - return - } - // Remove listener channel after the writer has closed or the - // client disconnected. - defer globalEventNotifier.RemoveListenerChan(accountARN) - - // Update topic config to bucket config and persist - as soon - // as this call completes, events may start appearing in - // nListenCh - lc := listenerConfig{ - TopicConfig: *topicCfg, - TargetServer: targetServer, - } - - err = AddBucketListenerConfig(bucket, &lc, objAPI) - if err != nil { - writeErrorResponse(w, toAPIErrorCode(err), r.URL) - return - } - defer RemoveBucketListenerConfig(bucket, &lc, objAPI) - - // Add all common headers. - setCommonHeaders(w) - - // https://github.com/containous/traefik/issues/560 - // https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events - // - // Proxies might buffer the connection to avoid this we - // need the proper MIME type before writing to client. - // This MIME header tells the proxies to avoid buffering - w.Header().Set("Content-Type", "text/event-stream") - - // Start writing bucket notifications to ResponseWriter. - nListenCh.waitForListener(w) -} - -// AddBucketListenerConfig - Updates on disk state of listeners, and -// updates all peers with the change in listener config. -func AddBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI ObjectLayer) error { - if lcfg == nil { - return errInvalidArgument - } - listenerCfgs := globalEventNotifier.GetBucketListenerConfig(bucket) - - // add new lid to listeners and persist to object layer. - listenerCfgs = append(listenerCfgs, *lcfg) - - // Acquire a write lock on bucket before modifying its - // configuration. - bucketLock := globalNSMutex.NewNSLock(bucket, "") - if err := bucketLock.GetLock(globalOperationTimeout); err != nil { - return err - } - // Release lock after notifying peers - defer bucketLock.Unlock() - - // update persistent config if dist XL - if globalIsDistXL { - err := persistListenerConfig(bucket, listenerCfgs, objAPI) - if err != nil { - errorIf(err, "Error persisting listener config when adding a listener.") - return err - } - } - - // persistence success - now update in-memory globals on all - // peers (including local) - S3PeersUpdateBucketListener(bucket, listenerCfgs) - return nil -} - -// RemoveBucketListenerConfig - removes a given bucket notification config -func RemoveBucketListenerConfig(bucket string, lcfg *listenerConfig, objAPI ObjectLayer) { - listenerCfgs := globalEventNotifier.GetBucketListenerConfig(bucket) - - // remove listener with matching ARN - if not found ignore and exit. - var updatedLcfgs []listenerConfig - found := false - for k, configuredLcfg := range listenerCfgs { - if configuredLcfg.TopicConfig.TopicARN == lcfg.TopicConfig.TopicARN { - updatedLcfgs = append(listenerCfgs[:k], - listenerCfgs[k+1:]...) - found = true - break - } - } - if !found { - return - } - - // Acquire a write lock on bucket before modifying its - // configuration. - bucketLock := globalNSMutex.NewNSLock(bucket, "") - if bucketLock.GetLock(globalOperationTimeout) != nil { - return - } - // Release lock after notifying peers - defer bucketLock.Unlock() - - // update persistent config if dist XL - if globalIsDistXL { - err := persistListenerConfig(bucket, updatedLcfgs, objAPI) - if err != nil { - errorIf(err, "Error persisting listener config when removing a listener.") + if len(values["suffix"]) == 1 { + if err := event.ValidateFilterRuleValue(values["suffix"][0]); err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) return } + + suffix = values["suffix"][0] } - // persistence success - now update in-memory globals on all - // peers (including local) - S3PeersUpdateBucketListener(bucket, updatedLcfgs) + pattern := event.NewPattern(prefix, suffix) + + eventNames := []event.Name{} + for _, s := range values["events"] { + eventName, err := event.ParseName(s) + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + eventNames = append(eventNames, eventName) + } + + if _, err := objAPI.GetBucketInfo(bucketName); err != nil { + errorIf(err, "Unable to get bucket info.") + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + host := xnet.MustParseHost(r.RemoteAddr) + target := target.NewHTTPClientTarget(*host, w) + rulesMap := event.NewRulesMap(eventNames, pattern, target.ID()) + + if err := globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil { + errorIf(err, "Unable to add httpclient target %v to globalNotificationSys.targetList.", target) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID()) + defer globalNotificationSys.RemoveRulesMap(bucketName, rulesMap) + + thisAddr := xnet.MustParseHost(GetLocalPeer(globalEndpoints)) + if err := SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil { + errorIf(err, "Unable to save HTTP listener %v", target) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + + errors := globalNotificationSys.ListenBucketNotification(bucketName, eventNames, pattern, target.ID(), *thisAddr) + for addr, err := range errors { + errorIf(err, "unable to call listen bucket notification to remote peer %v", addr) + } + + <-target.DoneCh + + if err := RemoveListener(objAPI, bucketName, target.ID(), *thisAddr); err != nil { + errorIf(err, "Unable to save HTTP listener %v", target) + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } } diff --git a/cmd/bucket-notification-handlers_test.go b/cmd/bucket-notification-handlers_test.go deleted file mode 100644 index 08c20e4ff..000000000 --- a/cmd/bucket-notification-handlers_test.go +++ /dev/null @@ -1,483 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bufio" - "bytes" - "encoding/json" - "encoding/xml" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "reflect" - "sync" - "testing" - - "time" - - "github.com/minio/minio/pkg/auth" -) - -// Implement a dummy flush writer. -type flushWriter struct { - io.Writer -} - -// Flush writer is a dummy writer compatible with http.Flusher and http.ResponseWriter. -func (f *flushWriter) Flush() {} -func (f *flushWriter) Write(b []byte) (n int, err error) { return f.Writer.Write(b) } -func (f *flushWriter) Header() http.Header { return http.Header{} } -func (f *flushWriter) WriteHeader(code int) {} - -func newFlushWriter(writer io.Writer) http.ResponseWriter { - return &flushWriter{writer} -} - -// Tests write notification code. -func TestWriteNotification(t *testing.T) { - // Initialize a new test config. - root, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Unable to initialize test config %s", err) - } - defer os.RemoveAll(root) - - var buffer bytes.Buffer - // Collection of test cases for each event writer. - testCases := []struct { - writer http.ResponseWriter - event map[string][]NotificationEvent - err error - }{ - // Invalid input argument with writer `nil` - Test - 1 - { - writer: nil, - event: nil, - err: errInvalidArgument, - }, - // Invalid input argument with event `nil` - Test - 2 - { - writer: newFlushWriter(ioutil.Discard), - event: nil, - err: errInvalidArgument, - }, - // Unmarshal and write, validate last 5 bytes. - Test - 3 - { - writer: newFlushWriter(&buffer), - event: map[string][]NotificationEvent{ - "Records": {newNotificationEvent(eventData{ - Type: ObjectCreatedPut, - Bucket: "testbucket", - ObjInfo: ObjectInfo{ - Name: "key", - }, - ReqParams: map[string]string{ - "ip": "10.1.10.1", - }}), - }, - }, - err: nil, - }, - } - // Validates all the testcases for writing notification. - for _, testCase := range testCases { - err := writeNotification(testCase.writer, testCase.event) - if err != testCase.err { - t.Errorf("Unable to write notification %s", err) - } - // Validates if the ending string has 'crlf' - if err == nil && !bytes.HasSuffix(buffer.Bytes(), crlf) { - buf := buffer.Bytes()[buffer.Len()-5 : 0] - t.Errorf("Invalid suffix found from the writer last 5 bytes %s, expected `\r\n`", string(buf)) - } - // Not printing 'buf' on purpose, validates look for string '10.1.10.1'. - if err == nil && !bytes.Contains(buffer.Bytes(), []byte("10.1.10.1")) { - // Enable when debugging) - // fmt.Println(string(buffer.Bytes())) - t.Errorf("Requested content couldn't be found, expected `10.1.10.1`") - } - } -} - -// testResponseWriter implements `http.ResponseWriter` that buffers -// response body in a `bytes.Buffer` and returns error after `failCount` -// calls to `Write` method -type testResponseWriter struct { - mu sync.Mutex - failCount int - buf *bytes.Buffer - m http.Header -} - -func newTestResponseWriter(failAt int) *testResponseWriter { - return &testResponseWriter{ - buf: new(bytes.Buffer), - m: make(http.Header), - failCount: failAt, - } -} - -func (trw *testResponseWriter) Flush() { -} - -func (trw *testResponseWriter) Write(p []byte) (int, error) { - trw.mu.Lock() - defer trw.mu.Unlock() - - if trw.failCount == 0 { - return 0, errors.New("Custom error") - } - trw.failCount-- - - return trw.buf.Write(p) -} - -func (trw *testResponseWriter) Header() http.Header { - return trw.m -} - -func (trw *testResponseWriter) WriteHeader(i int) { -} - -func TestListenChan(t *testing.T) { - // Initialize a new test config. - root, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Unable to initialize test config %s", err) - } - defer os.RemoveAll(root) - - // Create a listen channel to manage notifications - nListenCh := newListenChan() - - // Construct notification events to be passed on the events channel. - var events []NotificationEvent - evTypes := []EventName{ - ObjectCreatedPut, - ObjectCreatedPost, - ObjectCreatedCopy, - ObjectCreatedCompleteMultipartUpload, - } - - for _, evType := range evTypes { - events = append(events, newNotificationEvent(eventData{ - Type: evType, - })) - } - - // Send notification events one-by-one - go func() { - for _, event := range events { - nListenCh.sendNotificationEvent([]NotificationEvent{event}) - } - }() - - // Create a http.ResponseWriter that fails after len(events) - // number of times - trw := newTestResponseWriter(len(events)) - - // Wait for all (4) notification events to be received - nListenCh.waitForListener(trw) - - // Used to read JSON-formatted event stream line-by-line - scanner := bufio.NewScanner(trw.buf) - var records map[string][]NotificationEvent - for i := 0; scanner.Scan(); i++ { - err = json.Unmarshal(scanner.Bytes(), &records) - if err != nil { - t.Fatalf("Failed to unmarshal json %v", err) - } - - nEvent := records["Records"][0] - if nEvent.EventName != evTypes[i].String() { - t.Errorf("notification event name mismatch, expected %s but got %s", evTypes[i], nEvent.EventName) - } - } -} - -func TestSendNotificationEvent(t *testing.T) { - // This test verifies that sendNotificationEvent function - // returns once listenChan.doneCh is closed - - l := newListenChan() - testCh := make(chan struct{}) - timeout := 5 * time.Second - - go func() { - // Send one empty notification event on listenChan - events := []NotificationEvent{{}} - l.sendNotificationEvent(events) - testCh <- struct{}{} - }() - - // close l.doneCh to signal client exiting from - // ListenBucketNotification API call - close(l.doneCh) - - select { - case <-time.After(timeout): - t.Fatalf("sendNotificationEvent didn't return after %v seconds", timeout) - case <-testCh: - // If we reach this case, sendNotificationEvent - // returned on closing l.doneCh - } -} - -func TestGetBucketNotificationHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testGetBucketNotificationHandler, []string{ - "GetBucketNotification", - }) -} - -func testGetBucketNotificationHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - // declare sample configs - filterRules := []filterRule{ - { - Name: "prefix", - Value: "minio", - }, - { - Name: "suffix", - Value: "*.jpg", - }, - } - sampleSvcCfg := ServiceConfig{ - []string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"}, - filterStruct{ - keyFilter{filterRules}, - }, - "1", - } - sampleNotifCfg := notificationConfig{ - QueueConfigs: []queueConfig{ - { - ServiceConfig: sampleSvcCfg, - QueueARN: "testqARN", - }, - }, - } - rec := httptest.NewRecorder() - req, err := newTestSignedRequestV4("GET", getGetBucketNotificationURL("", bucketName), - 0, nil, credentials.AccessKey, credentials.SecretKey) - if err != nil { - t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: %v", instanceType, err) - } - apiRouter.ServeHTTP(rec, req) - if rec.Code != http.StatusOK { - t.Fatalf("Unexpected http response %d", rec.Code) - } - if err = persistNotificationConfig(bucketName, &sampleNotifCfg, obj); err != nil { - t.Fatalf("Unable to save notification config %s", err) - } - rec = httptest.NewRecorder() - req, err = newTestSignedRequestV4("GET", getGetBucketNotificationURL("", bucketName), - 0, nil, credentials.AccessKey, credentials.SecretKey) - if err != nil { - t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: %v", instanceType, err) - } - apiRouter.ServeHTTP(rec, req) - if rec.Code != http.StatusOK { - t.Fatalf("Unexpected http response %d", rec.Code) - } - notificationBytes, err := ioutil.ReadAll(rec.Body) - if err != nil { - t.Fatalf("Unexpected error %s", err) - } - nConfig := notificationConfig{} - if err = xml.Unmarshal(notificationBytes, &nConfig); err != nil { - t.Fatalf("Unexpected XML received %s", err) - } - if sampleNotifCfg.QueueConfigs[0].QueueARN != nConfig.QueueConfigs[0].QueueARN { - t.Fatalf("Uexpected notification configs expected %#v, got %#v", sampleNotifCfg, nConfig) - } - if !reflect.DeepEqual(sampleNotifCfg.QueueConfigs[0].Events, nConfig.QueueConfigs[0].Events) { - t.Fatalf("Uexpected notification configs expected %#v, got %#v", sampleNotifCfg, nConfig) - } -} - -func TestPutBucketNotificationHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testPutBucketNotificationHandler, []string{ - "PutBucketNotification", - }) -} - -func testPutBucketNotificationHandler(obj ObjectLayer, instanceType, - bucketName string, apiRouter http.Handler, credentials auth.Credentials, - t *testing.T) { - - // declare sample configs - filterRules := []filterRule{ - { - Name: "prefix", - Value: "minio", - }, - { - Name: "suffix", - Value: "*.jpg", - }, - } - sampleSvcCfg := ServiceConfig{ - []string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"}, - filterStruct{ - keyFilter{filterRules}, - }, - "1", - } - sampleNotifCfg := notificationConfig{ - QueueConfigs: []queueConfig{ - { - ServiceConfig: sampleSvcCfg, - QueueARN: "testqARN", - }, - }, - } - - { - sampleNotifCfg.LambdaConfigs = []lambdaConfig{ - { - sampleSvcCfg, "testLARN", - }, - } - xmlBytes, err := xml.Marshal(sampleNotifCfg) - if err != nil { - t.Fatalf("%s: Unexpected err: %#v", instanceType, err) - } - rec := httptest.NewRecorder() - req, err := newTestSignedRequestV4("PUT", - getPutBucketNotificationURL("", bucketName), - int64(len(xmlBytes)), bytes.NewReader(xmlBytes), - credentials.AccessKey, credentials.SecretKey) - if err != nil { - t.Fatalf("%s: Failed to create HTTP testRequest for PutBucketNotification: %v", - instanceType, err) - } - apiRouter.ServeHTTP(rec, req) - if rec.Code != http.StatusBadRequest { - t.Fatalf("Unexpected http response %d", rec.Code) - } - } - - { - sampleNotifCfg.LambdaConfigs = nil - sampleNotifCfg.TopicConfigs = []topicConfig{ - { - sampleSvcCfg, "testTARN", - }, - } - xmlBytes, err := xml.Marshal(sampleNotifCfg) - if err != nil { - t.Fatalf("%s: Unexpected err: %#v", instanceType, err) - } - rec := httptest.NewRecorder() - req, err := newTestSignedRequestV4("PUT", - getPutBucketNotificationURL("", bucketName), - int64(len(xmlBytes)), bytes.NewReader(xmlBytes), - credentials.AccessKey, credentials.SecretKey) - if err != nil { - t.Fatalf("%s: Failed to create HTTP testRequest for PutBucketNotification: %v", - instanceType, err) - } - apiRouter.ServeHTTP(rec, req) - if rec.Code != http.StatusBadRequest { - t.Fatalf("Unexpected http response %d", rec.Code) - } - } -} - -func TestListenBucketNotificationNilHandler(t *testing.T) { - ExecObjectLayerAPITest(t, testListenBucketNotificationNilHandler, []string{ - "ListenBucketNotification", - "PutObject", - }) -} - -func testListenBucketNotificationNilHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, - credentials auth.Credentials, t *testing.T) { - // get random bucket name. - randBucket := getRandomBucketName() - - // Nil Object layer - nilAPIRouter := initTestAPIEndPoints(nil, []string{ - "ListenBucketNotification", - }) - testRec := httptest.NewRecorder() - testReq, tErr := newTestSignedRequestV4("GET", - getListenBucketNotificationURL("", randBucket, []string{}, - []string{"*.jpg"}, []string{ - "s3:ObjectCreated:*", - "s3:ObjectRemoved:*", - "s3:ObjectAccessed:*", - }), 0, nil, credentials.AccessKey, credentials.SecretKey) - if tErr != nil { - t.Fatalf("%s: Failed to create HTTP testRequest for ListenBucketNotification: %v", instanceType, tErr) - } - nilAPIRouter.ServeHTTP(testRec, testReq) - if testRec.Code != http.StatusServiceUnavailable { - t.Fatalf("Test 1: %s: expected HTTP code %d, but received %d: %v", - instanceType, http.StatusServiceUnavailable, testRec.Code, tErr) - } -} - -func testRemoveNotificationConfig(obj ObjectLayer, instanceType, - bucketName string, apiRouter http.Handler, credentials auth.Credentials, - t *testing.T) { - - invalidBucket := "Invalid\\Bucket" - // get random bucket name. - randBucket := bucketName - - nCfg := notificationConfig{ - QueueConfigs: []queueConfig{ - { - ServiceConfig: ServiceConfig{ - Events: []string{"s3:ObjectRemoved:*", - "s3:ObjectCreated:*"}, - }, - QueueARN: "testqARN", - }, - }, - } - if err := persistNotificationConfig(randBucket, &nCfg, obj); err != nil { - t.Fatalf("Unexpected error: %#v", err) - } - - testCases := []struct { - bucketName string - expectedErr error - }{ - {invalidBucket, BucketNameInvalid{Bucket: invalidBucket}}, - {randBucket, nil}, - } - for i, test := range testCases { - tErr := removeNotificationConfig(test.bucketName, obj) - if tErr != test.expectedErr { - t.Errorf("Test %d: %s expected error %v, but received %v", i+1, instanceType, test.expectedErr, tErr) - } - } -} - -func TestRemoveNotificationConfig(t *testing.T) { - ExecObjectLayerAPITest(t, testRemoveNotificationConfig, []string{ - "PutBucketNotification", - "ListenBucketNotification", - }) -} diff --git a/cmd/bucket-notification-utils.go b/cmd/bucket-notification-utils.go deleted file mode 100644 index 005c7823a..000000000 --- a/cmd/bucket-notification-utils.go +++ /dev/null @@ -1,317 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "errors" - "strings" - - "github.com/minio/minio-go/pkg/set" -) - -// List of valid event types. -var suppportedEventTypes = map[string]struct{}{ - // Object created event types. - "s3:ObjectCreated:*": {}, - "s3:ObjectCreated:Put": {}, - "s3:ObjectCreated:Post": {}, - "s3:ObjectCreated:Copy": {}, - "s3:ObjectCreated:CompleteMultipartUpload": {}, - // Object removed event types. - "s3:ObjectRemoved:*": {}, - "s3:ObjectRemoved:Delete": {}, - "s3:ObjectAccessed:Get": {}, - "s3:ObjectAccessed:Head": {}, - "s3:ObjectAccessed:*": {}, -} - -// checkEvent - checks if an event is supported. -func checkEvent(event string) APIErrorCode { - _, ok := suppportedEventTypes[event] - if !ok { - return ErrEventNotification - } - return ErrNone -} - -// checkEvents - checks given list of events if all of them are valid. -// given if one of them is invalid, this function returns an error. -func checkEvents(events []string) APIErrorCode { - for _, event := range events { - if s3Error := checkEvent(event); s3Error != ErrNone { - return s3Error - } - } - return ErrNone -} - -// Valid if filterName is 'prefix'. -func isValidFilterNamePrefix(filterName string) bool { - return "prefix" == filterName -} - -// Valid if filterName is 'suffix'. -func isValidFilterNameSuffix(filterName string) bool { - return "suffix" == filterName -} - -// Is this a valid filterName? - returns true if valid. -func isValidFilterName(filterName string) bool { - return isValidFilterNamePrefix(filterName) || isValidFilterNameSuffix(filterName) -} - -// checkFilterRules - checks given list of filter rules if all of them are valid. -func checkFilterRules(filterRules []filterRule) APIErrorCode { - ruleSetMap := make(map[string]string) - // Validate all filter rules. - for _, filterRule := range filterRules { - // Unknown filter rule name found, returns an appropriate error. - if !isValidFilterName(filterRule.Name) { - return ErrFilterNameInvalid - } - - // Filter names should not be set twice per notification service - // configuration, if found return an appropriate error. - if _, ok := ruleSetMap[filterRule.Name]; ok { - if isValidFilterNamePrefix(filterRule.Name) { - return ErrFilterNamePrefix - } else if isValidFilterNameSuffix(filterRule.Name) { - return ErrFilterNameSuffix - } else { - return ErrFilterNameInvalid - } - } - - if !IsValidObjectPrefix(filterRule.Value) { - return ErrFilterValueInvalid - } - - // Set the new rule name to keep track of duplicates. - ruleSetMap[filterRule.Name] = filterRule.Value - } - // Success all prefixes validated. - return ErrNone -} - -// Checks validity of input ARN for a given arnType. -func checkARN(arn, arnType string) APIErrorCode { - if !strings.HasPrefix(arn, arnType) { - return ErrARNNotification - } - strs := strings.SplitN(arn, ":", -1) - if len(strs) != 6 { - return ErrARNNotification - } - - // Server region is allowed to be empty by default, - // in such a scenario ARN region is not validating - // allowing all regions. - if sregion := globalServerConfig.GetRegion(); sregion != "" { - region := strs[3] - if region != sregion { - return ErrRegionNotification - } - } - accountID := strs[4] - resource := strs[5] - if accountID == "" || resource == "" { - return ErrARNNotification - } - return ErrNone -} - -// checkQueueARN - check if the queue arn is valid. -func checkQueueARN(queueARN string) APIErrorCode { - return checkARN(queueARN, minioSqs) -} - -// Validates account id for input queue ARN. -func isValidQueueID(queueARN string) bool { - // Unmarshals QueueARN into structured object. - sqsARN := unmarshalSqsARN(queueARN) - // Is Queue identifier valid?. - - if isAMQPQueue(sqsARN) { // AMQP eueue. - amqpN := globalServerConfig.Notify.GetAMQPByID(sqsARN.AccountID) - return amqpN.Enable && amqpN.URL != "" - } else if isMQTTQueue(sqsARN) { - mqttN := globalServerConfig.Notify.GetMQTTByID(sqsARN.AccountID) - return mqttN.Enable && mqttN.Broker != "" - } else if isNATSQueue(sqsARN) { - natsN := globalServerConfig.Notify.GetNATSByID(sqsARN.AccountID) - return natsN.Enable && natsN.Address != "" - } else if isElasticQueue(sqsARN) { // Elastic queue. - elasticN := globalServerConfig.Notify.GetElasticSearchByID(sqsARN.AccountID) - return elasticN.Enable && elasticN.URL != "" - } else if isRedisQueue(sqsARN) { // Redis queue. - redisN := globalServerConfig.Notify.GetRedisByID(sqsARN.AccountID) - return redisN.Enable && redisN.Addr != "" - } else if isPostgreSQLQueue(sqsARN) { - pgN := globalServerConfig.Notify.GetPostgreSQLByID(sqsARN.AccountID) - // Postgres can work with only default conn. info. - return pgN.Enable - } else if isMySQLQueue(sqsARN) { - msqlN := globalServerConfig.Notify.GetMySQLByID(sqsARN.AccountID) - // Mysql can work with only default conn. info. - return msqlN.Enable - } else if isKafkaQueue(sqsARN) { - kafkaN := globalServerConfig.Notify.GetKafkaByID(sqsARN.AccountID) - return (kafkaN.Enable && len(kafkaN.Brokers) > 0 && - kafkaN.Topic != "") - } else if isWebhookQueue(sqsARN) { - webhookN := globalServerConfig.Notify.GetWebhookByID(sqsARN.AccountID) - return webhookN.Enable && webhookN.Endpoint != "" - } - return false -} - -// Check - validates queue configuration and returns error if any. -func checkQueueConfig(qConfig queueConfig) APIErrorCode { - // Check queue arn is valid. - if s3Error := checkQueueARN(qConfig.QueueARN); s3Error != ErrNone { - return s3Error - } - - // Validate if the account ID is correct. - if !isValidQueueID(qConfig.QueueARN) { - return ErrARNNotification - } - - // Check if valid events are set in queue config. - if s3Error := checkEvents(qConfig.Events); s3Error != ErrNone { - return s3Error - } - - // Check if valid filters are set in queue config. - if s3Error := checkFilterRules(qConfig.Filter.Key.FilterRules); s3Error != ErrNone { - return s3Error - } - - // Success. - return ErrNone -} - -// Validates all incoming queue configs, checkQueueConfig validates if the -// input fields for each queues is not malformed and has valid configuration -// information. If validation fails bucket notifications are not enabled. -func validateQueueConfigs(queueConfigs []queueConfig) APIErrorCode { - for _, qConfig := range queueConfigs { - if s3Error := checkQueueConfig(qConfig); s3Error != ErrNone { - return s3Error - } - } - // Success. - return ErrNone -} - -// Check all the queue configs for any duplicates. -func checkDuplicateQueueConfigs(configs []queueConfig) APIErrorCode { - queueConfigARNS := set.NewStringSet() - - // Navigate through each configs and count the entries. - for _, config := range configs { - queueConfigARNS.Add(config.QueueARN) - } - - if len(queueConfigARNS) != len(configs) { - return ErrOverlappingConfigs - } - - // Success. - return ErrNone -} - -// Validates all the bucket notification configuration for their validity, -// if one of the config is malformed or has invalid data it is rejected. -// Configuration is never applied partially. -func validateNotificationConfig(nConfig notificationConfig) APIErrorCode { - // Minio server does not support lambda/topic configurations - // currently. Such configuration is rejected. - if len(nConfig.LambdaConfigs) > 0 || len(nConfig.TopicConfigs) > 0 { - return ErrUnsupportedNotification - } - - // Validate all queue configs. - if s3Error := validateQueueConfigs(nConfig.QueueConfigs); s3Error != ErrNone { - return s3Error - } - - // Check for duplicate queue configs. - if len(nConfig.QueueConfigs) > 1 { - if s3Error := checkDuplicateQueueConfigs(nConfig.QueueConfigs); s3Error != ErrNone { - return s3Error - } - } - - // Add validation for other configurations. - return ErrNone -} - -// Unmarshals input value of AWS ARN format into minioSqs object. -// Returned value represents minio sqs types, currently supported are -// - amqp -// - mqtt -// - nats -// - elasticsearch -// - redis -// - postgresql -// - mysql -// - kafka -// - webhook -func unmarshalSqsARN(queueARN string) (mSqs arnSQS) { - strs := strings.SplitN(queueARN, ":", -1) - if len(strs) != 6 { - return - } - - // Server region is allowed to be empty by default, - // in such a scenario ARN region is not validating - // allowing all regions. - if sregion := globalServerConfig.GetRegion(); sregion != "" { - region := strs[3] - if region != sregion { - return - } - } - sqsType := strs[5] - switch sqsType { - case queueTypeAMQP: - mSqs.Type = queueTypeAMQP - case queueTypeMQTT: - mSqs.Type = queueTypeMQTT - case queueTypeNATS: - mSqs.Type = queueTypeNATS - case queueTypeElastic: - mSqs.Type = queueTypeElastic - case queueTypeRedis: - mSqs.Type = queueTypeRedis - case queueTypePostgreSQL: - mSqs.Type = queueTypePostgreSQL - case queueTypeMySQL: - mSqs.Type = queueTypeMySQL - case queueTypeKafka: - mSqs.Type = queueTypeKafka - case queueTypeWebhook: - mSqs.Type = queueTypeWebhook - default: - errorIf(errors.New("invalid SQS type"), "SQS type: %s", sqsType) - } // Add more queues here. - - mSqs.AccountID = strs[4] - - return -} diff --git a/cmd/bucket-notification-utils_test.go b/cmd/bucket-notification-utils_test.go deleted file mode 100644 index b02c9d03d..000000000 --- a/cmd/bucket-notification-utils_test.go +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "os" - "strings" - "testing" -) - -// Test validates for duplicate configs. -func TestCheckDuplicateConfigs(t *testing.T) { - testCases := []struct { - qConfigs []queueConfig - expectedErrCode APIErrorCode - }{ - // Error for duplicate queue configs. - { - qConfigs: []queueConfig{ - { - QueueARN: "arn:minio:sqs:us-east-1:1:redis", - }, - { - QueueARN: "arn:minio:sqs:us-east-1:1:redis", - }, - }, - expectedErrCode: ErrOverlappingConfigs, - }, - // Valid queue configs. - { - qConfigs: []queueConfig{ - { - QueueARN: "arn:minio:sqs:us-east-1:1:redis", - }, - }, - expectedErrCode: ErrNone, - }, - } - - // ... validate for duplicate queue configs. - for i, testCase := range testCases { - errCode := checkDuplicateQueueConfigs(testCase.qConfigs) - if errCode != testCase.expectedErrCode { - t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.expectedErrCode, errCode) - } - } -} - -// Tests for validating filter rules. -func TestCheckFilterRules(t *testing.T) { - testCases := []struct { - rules []filterRule - expectedErrCode APIErrorCode - }{ - // Valid prefix and suffix values. - { - rules: []filterRule{ - { - Name: "prefix", - Value: "test/test1", - }, - { - Name: "suffix", - Value: ".jpg", - }, - }, - expectedErrCode: ErrNone, - }, - // Invalid filter name. - { - rules: []filterRule{ - { - Name: "unknown", - Value: "test/test1", - }, - }, - expectedErrCode: ErrFilterNameInvalid, - }, - // Cannot have duplicate prefixes. - { - rules: []filterRule{ - { - Name: "prefix", - Value: "test/test1", - }, - { - Name: "prefix", - Value: "test/test1", - }, - }, - expectedErrCode: ErrFilterNamePrefix, - }, - // Cannot have duplicate suffixes. - { - rules: []filterRule{ - { - Name: "suffix", - Value: ".jpg", - }, - { - Name: "suffix", - Value: ".txt", - }, - }, - expectedErrCode: ErrFilterNameSuffix, - }, - // Filter value cannot be bigger than > 1024. - { - rules: []filterRule{ - { - Name: "prefix", - Value: strings.Repeat("a", 1025), - }, - }, - expectedErrCode: ErrFilterValueInvalid, - }, - } - - for i, testCase := range testCases { - errCode := checkFilterRules(testCase.rules) - if errCode != testCase.expectedErrCode { - t.Errorf("Test %d: Expected %d, got %d", i+1, testCase.expectedErrCode, errCode) - } - } -} - -// Tests filter name validation. -func TestIsValidFilterName(t *testing.T) { - testCases := []struct { - filterName string - status bool - }{ - // Validate if 'prefix' is correct. - { - filterName: "prefix", - status: true, - }, - // Validate if 'suffix' is correct. - { - filterName: "suffix", - status: true, - }, - // Invalid filter name empty string should return false. - { - filterName: "", - status: false, - }, - // Invalid filter name random character should return false. - { - filterName: "unknown", - status: false, - }, - } - - for i, testCase := range testCases { - status := isValidFilterName(testCase.filterName) - if testCase.status != status { - t.Errorf("Test %d: Expected \"%t\", got \"%t\"", i+1, testCase.status, status) - } - } -} - -// Tests list of valid and invalid events. -func TestValidEvents(t *testing.T) { - testCases := []struct { - events []string - errCode APIErrorCode - }{ - // Return error for unknown event element. - { - events: []string{ - "s3:UnknownAPI", - }, - errCode: ErrEventNotification, - }, - // Return success for supported event. - { - events: []string{ - "s3:ObjectCreated:Put", - }, - errCode: ErrNone, - }, - // Return success for supported events. - { - events: []string{ - "s3:ObjectCreated:*", - "s3:ObjectRemoved:*", - }, - errCode: ErrNone, - }, - // Return error for empty event list. - { - events: []string{""}, - errCode: ErrEventNotification, - }, - } - - for i, testCase := range testCases { - errCode := checkEvents(testCase.events) - if testCase.errCode != errCode { - t.Errorf("Test %d: Expected \"%d\", got \"%d\"", i+1, testCase.errCode, errCode) - } - } -} - -// Tests queue arn validation. -func TestQueueARN(t *testing.T) { - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("unable initialize config file, %s", err) - } - defer os.RemoveAll(rootPath) - - testCases := []struct { - queueARN string - errCode APIErrorCode - }{ - - // Valid webhook queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:webhook", - errCode: ErrNone, - }, - // Valid redis queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:redis", - errCode: ErrNone, - }, - // Valid elasticsearch queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:elasticsearch", - errCode: ErrNone, - }, - // Valid amqp queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:amqp", - errCode: ErrNone, - }, - // Invalid empty queue arn. - { - queueARN: "", - errCode: ErrARNNotification, - }, - // Invalid notification service type. - { - queueARN: "arn:minio:sns:us-east-1:1:listen", - errCode: ErrARNNotification, - }, - // Invalid queue name empty in queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:", - errCode: ErrARNNotification, - }, - // Invalid queue id empty in queue arn. - { - queueARN: "arn:minio:sqs:us-east-1::redis", - errCode: ErrARNNotification, - }, - // Invalid queue id and queue name empty in queue arn. - { - queueARN: "arn:minio:sqs:us-east-1::", - errCode: ErrARNNotification, - }, - // Missing queue id and separator missing at the end in queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:amqp", - errCode: ErrARNNotification, - }, - // Missing queue id and empty string at the end in queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:", - errCode: ErrARNNotification, - }, - } - - // Validate all tests for queue arn. - for i, testCase := range testCases { - errCode := checkQueueARN(testCase.queueARN) - if testCase.errCode != errCode { - t.Errorf("Test %d: Expected \"%d\", got \"%d\"", i+1, testCase.errCode, errCode) - } - } - - // Test when server region is set. - rootPath, err = newTestConfig("us-east-1") - if err != nil { - t.Fatalf("unable initialize config file, %s", err) - } - defer os.RemoveAll(rootPath) - - testCases = []struct { - queueARN string - errCode APIErrorCode - }{ - // Incorrect region should produce error. - { - queueARN: "arn:minio:sqs:us-west-1:1:webhook", - errCode: ErrRegionNotification, - }, - // Correct region should not produce error. - { - queueARN: "arn:minio:sqs:us-east-1:1:webhook", - errCode: ErrNone, - }, - } - - // Validate all tests for queue arn. - for i, testCase := range testCases { - errCode := checkQueueARN(testCase.queueARN) - if testCase.errCode != errCode { - t.Errorf("Test %d: Expected \"%d\", got \"%d\"", i+1, testCase.errCode, errCode) - } - } -} - -// Test unmarshal queue arn. -func TestUnmarshalSQSARN(t *testing.T) { - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("unable initialize config file, %s", err) - } - defer os.RemoveAll(rootPath) - - testCases := []struct { - queueARN string - Type string - }{ - // Valid webhook queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:webhook", - Type: "webhook", - }, - // Valid redis queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:redis", - Type: "redis", - }, - // Valid elasticsearch queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:elasticsearch", - Type: "elasticsearch", - }, - // Valid amqp queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:amqp", - Type: "amqp", - }, - // Valid mqtt queue arn. - { - queueARN: "arn:minio:sqs:us-east-1:1:mqtt", - Type: "mqtt", - }, - // Invalid empty queue arn. - { - queueARN: "", - Type: "", - }, - // Partial queue arn. - { - queueARN: "arn:minio:sqs:", - Type: "", - }, - // Invalid queue service value. - { - queueARN: "arn:minio:sqs:us-east-1:1:*", - Type: "", - }, - } - - for i, testCase := range testCases { - mSqs := unmarshalSqsARN(testCase.queueARN) - if testCase.Type != mSqs.Type { - t.Errorf("Test %d: Expected \"%s\", got \"%s\"", i+1, testCase.Type, mSqs.Type) - } - } - - // Test when the server region is set. - rootPath, err = newTestConfig("us-east-1") - if err != nil { - t.Fatalf("unable initialize config file, %s", err) - } - defer os.RemoveAll(rootPath) - - testCases = []struct { - queueARN string - Type string - }{ - // Incorrect region in ARN returns empty mSqs.Type - { - queueARN: "arn:minio:sqs:us-west-1:1:webhook", - Type: "", - }, - // Correct regionin ARN returns valid mSqs.Type - { - queueARN: "arn:minio:sqs:us-east-1:1:webhook", - Type: "webhook", - }, - } - - for i, testCase := range testCases { - mSqs := unmarshalSqsARN(testCase.queueARN) - if testCase.Type != mSqs.Type { - t.Errorf("Test %d: Expected \"%s\", got \"%s\"", i+1, testCase.Type, mSqs.Type) - } - } -} diff --git a/cmd/bucket-policy-handlers.go b/cmd/bucket-policy-handlers.go index fbad15cdd..8bc833f3e 100644 --- a/cmd/bucket-policy-handlers.go +++ b/cmd/bucket-policy-handlers.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -285,6 +285,10 @@ func (api objectAPIHandlers) PutBucketPolicyHandler(w http.ResponseWriter, r *ht return } + for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { + errorIf(err, "unable to update policy change in remote peer %v", addr) + } + // Success. writeSuccessNoContent(w) } @@ -322,6 +326,10 @@ func (api objectAPIHandlers) DeleteBucketPolicyHandler(w http.ResponseWriter, r return } + for addr, err := range globalNotificationSys.UpdateBucketPolicy(bucket) { + errorIf(err, "unable to update policy change in remote peer %v", addr) + } + // Success. writeSuccessNoContent(w) } diff --git a/cmd/bucket-policy.go b/cmd/bucket-policy.go index eaddd5c28..5ec17e4ee 100644 --- a/cmd/bucket-policy.go +++ b/cmd/bucket-policy.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -31,7 +31,7 @@ import ( const ( // Static prefix to be used while constructing bucket ARN. // refer to S3 docs for more info. - bucketARNPrefix = "arn:" + eventSource + ":::" + bucketARNPrefix = "arn:aws:s3:::" // Bucket policy config name. bucketPolicyConfig = "policy.json" @@ -202,7 +202,5 @@ func persistAndNotifyBucketPolicyChange(bucket string, isRemove bool, bktPolicy } } - // Notify all peers (including self) to update in-memory state - S3PeersUpdateBucketPolicy(bucket) return nil } diff --git a/cmd/config-current.go b/cmd/config-current.go index b242e3fa8..29934b025 100644 --- a/cmd/config-current.go +++ b/cmd/config-current.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -24,6 +24,8 @@ import ( "sync" "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/event" + "github.com/minio/minio/pkg/event/target" "github.com/minio/minio/pkg/quick" "github.com/tidwall/gjson" ) @@ -129,7 +131,7 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string { return "AMQP Notification configuration differs" case !reflect.DeepEqual(s.Notify.NATS, t.Notify.NATS): return "NATS Notification configuration differs" - case !reflect.DeepEqual(s.Notify.ElasticSearch, t.Notify.ElasticSearch): + case !reflect.DeepEqual(s.Notify.Elasticsearch, t.Notify.Elasticsearch): return "ElasticSearch Notification configuration differs" case !reflect.DeepEqual(s.Notify.Redis, t.Notify.Redis): return "Redis Notification configuration differs" @@ -166,24 +168,24 @@ func newServerConfig() *serverConfig { } // Make sure to initialize notification configs. - srvCfg.Notify.AMQP = make(map[string]amqpNotify) - srvCfg.Notify.AMQP["1"] = amqpNotify{} - srvCfg.Notify.MQTT = make(map[string]mqttNotify) - srvCfg.Notify.MQTT["1"] = mqttNotify{} - srvCfg.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvCfg.Notify.ElasticSearch["1"] = elasticSearchNotify{} - srvCfg.Notify.Redis = make(map[string]redisNotify) - srvCfg.Notify.Redis["1"] = redisNotify{} - srvCfg.Notify.NATS = make(map[string]natsNotify) - srvCfg.Notify.NATS["1"] = natsNotify{} - srvCfg.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvCfg.Notify.PostgreSQL["1"] = postgreSQLNotify{} - srvCfg.Notify.MySQL = make(map[string]mySQLNotify) - srvCfg.Notify.MySQL["1"] = mySQLNotify{} - srvCfg.Notify.Kafka = make(map[string]kafkaNotify) - srvCfg.Notify.Kafka["1"] = kafkaNotify{} - srvCfg.Notify.Webhook = make(map[string]webhookNotify) - srvCfg.Notify.Webhook["1"] = webhookNotify{} + srvCfg.Notify.AMQP = make(map[string]target.AMQPArgs) + srvCfg.Notify.AMQP["1"] = target.AMQPArgs{} + srvCfg.Notify.MQTT = make(map[string]target.MQTTArgs) + srvCfg.Notify.MQTT["1"] = target.MQTTArgs{} + srvCfg.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvCfg.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{} + srvCfg.Notify.Redis = make(map[string]target.RedisArgs) + srvCfg.Notify.Redis["1"] = target.RedisArgs{} + srvCfg.Notify.NATS = make(map[string]target.NATSArgs) + srvCfg.Notify.NATS["1"] = target.NATSArgs{} + srvCfg.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvCfg.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} + srvCfg.Notify.MySQL = make(map[string]target.MySQLArgs) + srvCfg.Notify.MySQL["1"] = target.MySQLArgs{} + srvCfg.Notify.Kafka = make(map[string]target.KafkaArgs) + srvCfg.Notify.Kafka["1"] = target.KafkaArgs{} + srvCfg.Notify.Webhook = make(map[string]target.WebhookArgs) + srvCfg.Notify.Webhook["1"] = target.WebhookArgs{} return srvCfg } @@ -310,11 +312,6 @@ func getValidConfig() (*serverConfig, error) { return nil, errors.New("invalid credential in config file " + configFile) } - // Validate notify field - if err = srvCfg.Notify.Validate(); err != nil { - return nil, err - } - return srvCfg, nil } @@ -369,3 +366,119 @@ func loadConfig() error { return nil } + +// getNotificationTargets - returns TargetList which contains enabled targets in serverConfig. +// A new notification target is added like below +// * Add a new target in pkg/event/target package. +// * Add newly added target configuration to serverConfig.Notify.. +// * Handle the configuration in this function to create/add into TargetList. +func getNotificationTargets(config *serverConfig) (*event.TargetList, error) { + targetList := event.NewTargetList() + + for id, args := range config.Notify.AMQP { + if args.Enable { + newTarget, err := target.NewAMQPTarget(id, args) + if err != nil { + return nil, err + } + if err = targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + for id, args := range config.Notify.Elasticsearch { + if args.Enable { + newTarget, err := target.NewElasticsearchTarget(id, args) + if err != nil { + return nil, err + } + if err = targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + for id, args := range config.Notify.Kafka { + if args.Enable { + newTarget, err := target.NewKafkaTarget(id, args) + if err != nil { + return nil, err + } + if err = targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + for id, args := range config.Notify.MQTT { + if args.Enable { + newTarget, err := target.NewMQTTTarget(id, args) + if err != nil { + return nil, err + } + if err = targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + for id, args := range config.Notify.MySQL { + if args.Enable { + newTarget, err := target.NewMySQLTarget(id, args) + if err != nil { + return nil, err + } + if err = targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + for id, args := range config.Notify.NATS { + if args.Enable { + newTarget, err := target.NewNATSTarget(id, args) + if err != nil { + return nil, err + } + if err = targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + for id, args := range config.Notify.PostgreSQL { + if args.Enable { + newTarget, err := target.NewPostgreSQLTarget(id, args) + if err != nil { + return nil, err + } + if err = targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + for id, args := range config.Notify.Redis { + if args.Enable { + newTarget, err := target.NewRedisTarget(id, args) + if err != nil { + return nil, err + } + if err = targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + for id, args := range config.Notify.Webhook { + if args.Enable { + newTarget := target.NewWebhookTarget(id, args) + if err := targetList.Add(newTarget); err != nil { + return nil, err + } + } + } + + return targetList, nil +} diff --git a/cmd/config-current_test.go b/cmd/config-current_test.go index 257a55cc2..cab4d08f6 100644 --- a/cmd/config-current_test.go +++ b/cmd/config-current_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,10 +20,10 @@ import ( "io/ioutil" "os" "path/filepath" - "reflect" "testing" "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/event/target" "github.com/tidwall/gjson" ) @@ -45,55 +45,6 @@ func TestServerConfig(t *testing.T) { t.Errorf("Expecting region `us-west-1` found %s", globalServerConfig.GetRegion()) } - // Set new amqp notification id. - globalServerConfig.Notify.SetAMQPByID("2", amqpNotify{}) - savedNotifyCfg1 := globalServerConfig.Notify.GetAMQPByID("2") - if !reflect.DeepEqual(savedNotifyCfg1, amqpNotify{}) { - t.Errorf("Expecting AMQP config %#v found %#v", amqpNotify{}, savedNotifyCfg1) - } - - // Set new elastic search notification id. - globalServerConfig.Notify.SetElasticSearchByID("2", elasticSearchNotify{}) - savedNotifyCfg2 := globalServerConfig.Notify.GetElasticSearchByID("2") - if !reflect.DeepEqual(savedNotifyCfg2, elasticSearchNotify{}) { - t.Errorf("Expecting Elasticsearch config %#v found %#v", elasticSearchNotify{}, savedNotifyCfg2) - } - - // Set new redis notification id. - globalServerConfig.Notify.SetRedisByID("2", redisNotify{}) - savedNotifyCfg3 := globalServerConfig.Notify.GetRedisByID("2") - if !reflect.DeepEqual(savedNotifyCfg3, redisNotify{}) { - t.Errorf("Expecting Redis config %#v found %#v", redisNotify{}, savedNotifyCfg3) - } - - // Set new kafka notification id. - globalServerConfig.Notify.SetKafkaByID("2", kafkaNotify{}) - savedNotifyCfg4 := globalServerConfig.Notify.GetKafkaByID("2") - if !reflect.DeepEqual(savedNotifyCfg4, kafkaNotify{}) { - t.Errorf("Expecting Kafka config %#v found %#v", kafkaNotify{}, savedNotifyCfg4) - } - - // Set new Webhook notification id. - globalServerConfig.Notify.SetWebhookByID("2", webhookNotify{}) - savedNotifyCfg5 := globalServerConfig.Notify.GetWebhookByID("2") - if !reflect.DeepEqual(savedNotifyCfg5, webhookNotify{}) { - t.Errorf("Expecting Webhook config %#v found %#v", webhookNotify{}, savedNotifyCfg5) - } - - // Set new MySQL notification id. - globalServerConfig.Notify.SetMySQLByID("2", mySQLNotify{}) - savedNotifyCfg6 := globalServerConfig.Notify.GetMySQLByID("2") - if !reflect.DeepEqual(savedNotifyCfg6, mySQLNotify{}) { - t.Errorf("Expecting Webhook config %#v found %#v", mySQLNotify{}, savedNotifyCfg6) - } - - // Set new MQTT notification id. - globalServerConfig.Notify.SetMQTTByID("2", mqttNotify{}) - savedNotifyCfg7 := globalServerConfig.Notify.GetMQTTByID("2") - if !reflect.DeepEqual(savedNotifyCfg7, mqttNotify{}) { - t.Errorf("Expecting Webhook config %#v found %#v", mqttNotify{}, savedNotifyCfg7) - } - // Match version. if globalServerConfig.GetVersion() != serverConfigVersion { t.Errorf("Expecting version %s found %s", globalServerConfig.GetVersion(), serverConfigVersion) @@ -252,55 +203,55 @@ func TestValidateConfig(t *testing.T) { {`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false}, // Test 11 - Test AMQP - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, true}, // Test 12 - Test NATS - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "clientID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, true}, // Test 13 - Test ElasticSearch - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, true}, // Test 14 - Test Redis - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, true}, // Test 15 - Test PostgreSQL - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, true}, // Test 16 - Test Kafka - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, true}, // Test 17 - Test Webhook - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, true}, // Test 18 - Test MySQL - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, true}, // Test 19 - Test Format for MySQL - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true}, // Test 20 - Test valid Format for MySQL {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "namespace", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true}, // Test 21 - Test Format for PostgreSQL - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "invalid", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true}, // Test 22 - Test valid Format for PostgreSQL {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "format": "namespace", "table": "xxx", "host": "myhost", "port": "5432", "user": "abc", "password": "pqr", "database": "test1" }}}}`, true}, // Test 23 - Test Format for ElasticSearch - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "invalid", "url": "example.com", "index": "myindex" } }}}`, true}, // Test 24 - Test valid Format for ElasticSearch {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex" } }}}`, true}, // Test 25 - Test Format for Redis - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true}, // Test 26 - Test valid Format for Redis {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true}, // Test 27 - Test MQTT - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mqtt": { "1": { "enable": true, "broker": "", "topic": "", "qos": 0, "clientId": "", "username": "", "password": ""}}}}`, true}, } for i, testCase := range testCases { @@ -345,56 +296,56 @@ func TestConfigDiff(t *testing.T) { }, // 7 { - &serverConfig{Notify: notifier{AMQP: map[string]amqpNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{AMQP: map[string]amqpNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{AMQP: map[string]target.AMQPArgs{"1": {Enable: false}}}}, "AMQP Notification configuration differs", }, // 8 { - &serverConfig{Notify: notifier{NATS: map[string]natsNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{NATS: map[string]natsNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{NATS: map[string]target.NATSArgs{"1": {Enable: false}}}}, "NATS Notification configuration differs", }, // 9 { - &serverConfig{Notify: notifier{ElasticSearch: map[string]elasticSearchNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{ElasticSearch: map[string]elasticSearchNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{Elasticsearch: map[string]target.ElasticsearchArgs{"1": {Enable: false}}}}, "ElasticSearch Notification configuration differs", }, // 10 { - &serverConfig{Notify: notifier{Redis: map[string]redisNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{Redis: map[string]redisNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{Redis: map[string]target.RedisArgs{"1": {Enable: false}}}}, "Redis Notification configuration differs", }, // 11 { - &serverConfig{Notify: notifier{PostgreSQL: map[string]postgreSQLNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{PostgreSQL: map[string]postgreSQLNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{PostgreSQL: map[string]target.PostgreSQLArgs{"1": {Enable: false}}}}, "PostgreSQL Notification configuration differs", }, // 12 { - &serverConfig{Notify: notifier{Kafka: map[string]kafkaNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{Kafka: map[string]kafkaNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{Kafka: map[string]target.KafkaArgs{"1": {Enable: false}}}}, "Kafka Notification configuration differs", }, // 13 { - &serverConfig{Notify: notifier{Webhook: map[string]webhookNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{Webhook: map[string]webhookNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{Webhook: map[string]target.WebhookArgs{"1": {Enable: false}}}}, "Webhook Notification configuration differs", }, // 14 { - &serverConfig{Notify: notifier{MySQL: map[string]mySQLNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{MySQL: map[string]mySQLNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{MySQL: map[string]target.MySQLArgs{"1": {Enable: false}}}}, "MySQL Notification configuration differs", }, // 15 { - &serverConfig{Notify: notifier{MQTT: map[string]mqttNotify{"1": {Enable: true}}}}, - &serverConfig{Notify: notifier{MQTT: map[string]mqttNotify{"1": {Enable: false}}}}, + &serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: true}}}}, + &serverConfig{Notify: notifier{MQTT: map[string]target.MQTTArgs{"1": {Enable: false}}}}, "MQTT Notification configuration differs", }, } diff --git a/cmd/config-migrate.go b/cmd/config-migrate.go index ac17fcbcf..6247da174 100644 --- a/cmd/config-migrate.go +++ b/cmd/config-migrate.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -22,6 +22,9 @@ import ( "path/filepath" "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/event" + "github.com/minio/minio/pkg/event/target" + xnet "github.com/minio/minio/pkg/net" "github.com/minio/minio/pkg/quick" ) @@ -356,34 +359,55 @@ func migrateV5ToV6() error { srvConfig.Logger.File = cv5.Logger.File srvConfig.Logger.Syslog = cv5.Logger.Syslog - srvConfig.Notify.AMQP = map[string]amqpNotify{ - "1": { - Enable: cv5.Logger.AMQP.Enable, - URL: cv5.Logger.AMQP.URL, - Exchange: cv5.Logger.AMQP.Exchange, - RoutingKey: cv5.Logger.AMQP.RoutingKey, - Mandatory: cv5.Logger.AMQP.Mandatory, - Immediate: cv5.Logger.AMQP.Immediate, - Durable: cv5.Logger.AMQP.Durable, - Internal: cv5.Logger.AMQP.Internal, - NoWait: cv5.Logger.AMQP.NoWait, - AutoDeleted: cv5.Logger.AMQP.AutoDeleted, - }, + if cv5.Logger.AMQP.URL != "" { + var url *xnet.URL + if url, err = xnet.ParseURL(cv5.Logger.AMQP.URL); err != nil { + return err + } + srvConfig.Notify.AMQP = map[string]target.AMQPArgs{ + "1": { + Enable: cv5.Logger.AMQP.Enable, + URL: *url, + Exchange: cv5.Logger.AMQP.Exchange, + RoutingKey: cv5.Logger.AMQP.RoutingKey, + Mandatory: cv5.Logger.AMQP.Mandatory, + Immediate: cv5.Logger.AMQP.Immediate, + Durable: cv5.Logger.AMQP.Durable, + Internal: cv5.Logger.AMQP.Internal, + NoWait: cv5.Logger.AMQP.NoWait, + AutoDeleted: cv5.Logger.AMQP.AutoDeleted, + }, + } } - srvConfig.Notify.ElasticSearch = map[string]elasticSearchNotify{ - "1": { - Enable: cv5.Logger.ElasticSearch.Enable, - URL: cv5.Logger.ElasticSearch.URL, - Index: cv5.Logger.ElasticSearch.Index, - }, + + if cv5.Logger.ElasticSearch.URL != "" { + var url *xnet.URL + url, err = xnet.ParseURL(cv5.Logger.ElasticSearch.URL) + if err != nil { + return err + } + srvConfig.Notify.ElasticSearch = map[string]target.ElasticsearchArgs{ + "1": { + Enable: cv5.Logger.ElasticSearch.Enable, + URL: *url, + Index: cv5.Logger.ElasticSearch.Index, + }, + } } - srvConfig.Notify.Redis = map[string]redisNotify{ - "1": { - Enable: cv5.Logger.Redis.Enable, - Addr: cv5.Logger.Redis.Addr, - Password: cv5.Logger.Redis.Password, - Key: cv5.Logger.Redis.Key, - }, + + if cv5.Logger.Redis.Addr != "" { + var addr *xnet.Host + if addr, err = xnet.ParseHost(cv5.Logger.Redis.Addr); err != nil { + return err + } + srvConfig.Notify.Redis = map[string]target.RedisArgs{ + "1": { + Enable: cv5.Logger.Redis.Enable, + Addr: *addr, + Password: cv5.Logger.Redis.Password, + Key: cv5.Logger.Redis.Key, + }, + } } if err = quick.Save(configFile, srvConfig); err != nil { @@ -423,21 +447,21 @@ func migrateV6ToV7() error { srvConfig.Logger.Console = cv6.Logger.Console srvConfig.Logger.File = cv6.Logger.File srvConfig.Logger.Syslog = cv6.Logger.Syslog - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.Redis = make(map[string]redisNotify) + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.ElasticSearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) if len(cv6.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv6.Notify.AMQP } if len(cv6.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + srvConfig.Notify.ElasticSearch["1"] = target.ElasticsearchArgs{} } else { srvConfig.Notify.ElasticSearch = cv6.Notify.ElasticSearch } if len(cv6.Notify.Redis) == 0 { - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv6.Notify.Redis } @@ -479,13 +503,13 @@ func migrateV7ToV8() error { srvConfig.Logger.Console = cv7.Logger.Console srvConfig.Logger.File = cv7.Logger.File srvConfig.Logger.Syslog = cv7.Logger.Syslog - srvConfig.Notify.AMQP = make(map[string]amqpNotify) + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) srvConfig.Notify.NATS = make(map[string]natsNotifyV1) - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) + srvConfig.Notify.ElasticSearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) if len(cv7.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv7.Notify.AMQP } @@ -495,12 +519,12 @@ func migrateV7ToV8() error { srvConfig.Notify.NATS = cv7.Notify.NATS } if len(cv7.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + srvConfig.Notify.ElasticSearch["1"] = target.ElasticsearchArgs{} } else { srvConfig.Notify.ElasticSearch = cv7.Notify.ElasticSearch } if len(cv7.Notify.Redis) == 0 { - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv7.Notify.Redis } @@ -545,8 +569,8 @@ func migrateV8ToV9() error { // check and set notifiers config if len(cv8.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv8.Notify.AMQP } @@ -557,20 +581,20 @@ func migrateV8ToV9() error { srvConfig.Notify.NATS = cv8.Notify.NATS } if len(cv8.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + srvConfig.Notify.ElasticSearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.ElasticSearch["1"] = target.ElasticsearchArgs{} } else { srvConfig.Notify.ElasticSearch = cv8.Notify.ElasticSearch } if len(cv8.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv8.Notify.Redis } if len(cv8.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { srvConfig.Notify.PostgreSQL = cv8.Notify.PostgreSQL } @@ -613,8 +637,8 @@ func migrateV9ToV10() error { // check and set notifiers config if len(cv9.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv9.Notify.AMQP } @@ -625,20 +649,20 @@ func migrateV9ToV10() error { srvConfig.Notify.NATS = cv9.Notify.NATS } if len(cv9.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + srvConfig.Notify.ElasticSearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.ElasticSearch["1"] = target.ElasticsearchArgs{} } else { srvConfig.Notify.ElasticSearch = cv9.Notify.ElasticSearch } if len(cv9.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv9.Notify.Redis } if len(cv9.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { srvConfig.Notify.PostgreSQL = cv9.Notify.PostgreSQL } @@ -681,8 +705,8 @@ func migrateV10ToV11() error { // check and set notifiers config if len(cv10.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv10.Notify.AMQP } @@ -693,26 +717,26 @@ func migrateV10ToV11() error { srvConfig.Notify.NATS = cv10.Notify.NATS } if len(cv10.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + srvConfig.Notify.ElasticSearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.ElasticSearch["1"] = target.ElasticsearchArgs{} } else { srvConfig.Notify.ElasticSearch = cv10.Notify.ElasticSearch } if len(cv10.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv10.Notify.Redis } if len(cv10.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { srvConfig.Notify.PostgreSQL = cv10.Notify.PostgreSQL } // V10 will not have a Kafka config. So we initialize one here. - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} if err = quick.Save(configFile, srvConfig); err != nil { return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv10.Version, srvConfig.Version, err) @@ -752,32 +776,32 @@ func migrateV11ToV12() error { // check and set notifiers config if len(cv11.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv11.Notify.AMQP } if len(cv11.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + srvConfig.Notify.ElasticSearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.ElasticSearch["1"] = target.ElasticsearchArgs{} } else { srvConfig.Notify.ElasticSearch = cv11.Notify.ElasticSearch } if len(cv11.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv11.Notify.Redis } if len(cv11.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { srvConfig.Notify.PostgreSQL = cv11.Notify.PostgreSQL } if len(cv11.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv11.Notify.Kafka } @@ -785,14 +809,23 @@ func migrateV11ToV12() error { // V12 will have an updated config of nats. So we create a new one or we // update the old one if found. if len(cv11.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { - srvConfig.Notify.NATS = make(map[string]natsNotify) + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) for k, v := range cv11.Notify.NATS { - n := natsNotify{} + if v.Address == "" { + continue + } + + var addr *xnet.Host + addr, err = xnet.ParseHost(v.Address) + if err != nil { + return err + } + n := target.NATSArgs{} n.Enable = v.Enable - n.Address = v.Address + n.Address = *addr n.Subject = v.Subject n.Username = v.Username n.Password = v.Password @@ -843,45 +876,45 @@ func migrateV12ToV13() error { // check and set notifiers config if len(cv12.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv12.Notify.AMQP } if len(cv12.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{} } else { - srvConfig.Notify.ElasticSearch = cv12.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv12.Notify.ElasticSearch } if len(cv12.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv12.Notify.Redis } if len(cv12.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { srvConfig.Notify.PostgreSQL = cv12.Notify.PostgreSQL } if len(cv12.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv12.Notify.Kafka } if len(cv12.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv12.Notify.NATS } // V12 will not have a webhook config. So we initialize one here. - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} if err = quick.Save(configFile, srvConfig); err != nil { return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv12.Version, srvConfig.Version, err) @@ -923,44 +956,44 @@ func migrateV13ToV14() error { // check and set notifiers config if len(cv13.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv13.Notify.AMQP } - if len(cv13.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + if len(cv13.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{} } else { - srvConfig.Notify.ElasticSearch = cv13.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv13.Notify.Elasticsearch } if len(cv13.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv13.Notify.Redis } if len(cv13.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { srvConfig.Notify.PostgreSQL = cv13.Notify.PostgreSQL } if len(cv13.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv13.Notify.Kafka } if len(cv13.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv13.Notify.NATS } if len(cv13.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv13.Notify.Webhook } @@ -1008,51 +1041,51 @@ func migrateV14ToV15() error { // check and set notifiers config if len(cv14.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv14.Notify.AMQP } - if len(cv14.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + if len(cv14.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{} } else { - srvConfig.Notify.ElasticSearch = cv14.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv14.Notify.Elasticsearch } if len(cv14.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv14.Notify.Redis } if len(cv14.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { srvConfig.Notify.PostgreSQL = cv14.Notify.PostgreSQL } if len(cv14.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv14.Notify.Kafka } if len(cv14.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv14.Notify.NATS } if len(cv14.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv14.Notify.Webhook } // V14 will not have mysql support, so we add that here. - srvConfig.Notify.MySQL = make(map[string]mySQLNotify) - srvConfig.Notify.MySQL["1"] = mySQLNotify{} + srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs) + srvConfig.Notify.MySQL["1"] = target.MySQLArgs{} // Load browser config from existing config in the file. srvConfig.Browser = cv14.Browser @@ -1096,50 +1129,50 @@ func migrateV15ToV16() error { // check and set notifiers config if len(cv15.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv15.Notify.AMQP } - if len(cv15.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + if len(cv15.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{} } else { - srvConfig.Notify.ElasticSearch = cv15.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv15.Notify.Elasticsearch } if len(cv15.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { srvConfig.Notify.Redis = cv15.Notify.Redis } if len(cv15.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { srvConfig.Notify.PostgreSQL = cv15.Notify.PostgreSQL } if len(cv15.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv15.Notify.Kafka } if len(cv15.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv15.Notify.NATS } if len(cv15.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv15.Notify.Webhook } if len(cv15.Notify.MySQL) == 0 { - srvConfig.Notify.MySQL = make(map[string]mySQLNotify) - srvConfig.Notify.MySQL["1"] = mySQLNotify{} + srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs) + srvConfig.Notify.MySQL["1"] = target.MySQLArgs{} } else { srvConfig.Notify.MySQL = cv15.Notify.MySQL } @@ -1189,80 +1222,80 @@ func migrateV16ToV17() error { // check and set notifiers config if len(cv16.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { srvConfig.Notify.AMQP = cv16.Notify.AMQP } - if len(cv16.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{} + if len(cv16.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{} } else { // IMPORTANT NOTE: Future migrations should remove // this as existing configuration will already contain // a value for the "format" parameter. - for k, v := range cv16.Notify.ElasticSearch.Clone() { - v.Format = formatNamespace - cv16.Notify.ElasticSearch[k] = v + srvConfig.Notify.Elasticsearch = cv16.Notify.Elasticsearch + for k, v := range srvConfig.Notify.Elasticsearch { + v.Format = event.NamespaceFormat + srvConfig.Notify.Elasticsearch[k] = v } - srvConfig.Notify.ElasticSearch = cv16.Notify.ElasticSearch } if len(cv16.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{} + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{} } else { // IMPORTANT NOTE: Future migrations should remove // this as existing configuration will already contain // a value for the "format" parameter. - for k, v := range cv16.Notify.Redis.Clone() { - v.Format = formatNamespace - cv16.Notify.Redis[k] = v - } srvConfig.Notify.Redis = cv16.Notify.Redis + for k, v := range srvConfig.Notify.Redis { + v.Format = event.NamespaceFormat + srvConfig.Notify.Redis[k] = v + } } if len(cv16.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{} + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{} } else { // IMPORTANT NOTE: Future migrations should remove // this as existing configuration will already contain // a value for the "format" parameter. - for k, v := range cv16.Notify.PostgreSQL.Clone() { - v.Format = formatNamespace - cv16.Notify.PostgreSQL[k] = v - } srvConfig.Notify.PostgreSQL = cv16.Notify.PostgreSQL + for k, v := range srvConfig.Notify.PostgreSQL { + v.Format = event.NamespaceFormat + srvConfig.Notify.PostgreSQL[k] = v + } } if len(cv16.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv16.Notify.Kafka } if len(cv16.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv16.Notify.NATS } if len(cv16.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv16.Notify.Webhook } if len(cv16.Notify.MySQL) == 0 { - srvConfig.Notify.MySQL = make(map[string]mySQLNotify) - srvConfig.Notify.MySQL["1"] = mySQLNotify{} + srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs) + srvConfig.Notify.MySQL["1"] = target.MySQLArgs{} } else { // IMPORTANT NOTE: Future migrations should remove // this as existing configuration will already contain // a value for the "format" parameter. - for k, v := range cv16.Notify.MySQL.Clone() { - v.Format = formatNamespace - cv16.Notify.MySQL[k] = v - } srvConfig.Notify.MySQL = cv16.Notify.MySQL + for k, v := range srvConfig.Notify.MySQL { + v.Format = event.NamespaceFormat + srvConfig.Notify.MySQL[k] = v + } } // Load browser config from existing config in the file. @@ -1310,60 +1343,60 @@ func migrateV17ToV18() error { // check and set notifiers config if len(cv17.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { // New deliveryMode parameter is added for AMQP, // default value is already 0, so nothing to // explicitly migrate here. srvConfig.Notify.AMQP = cv17.Notify.AMQP } - if len(cv17.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{ - Format: formatNamespace, + if len(cv17.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{ + Format: event.NamespaceFormat, } } else { - srvConfig.Notify.ElasticSearch = cv17.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv17.Notify.Elasticsearch } if len(cv17.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{ - Format: formatNamespace, + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.Redis = cv17.Notify.Redis } if len(cv17.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{ - Format: formatNamespace, + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.PostgreSQL = cv17.Notify.PostgreSQL } if len(cv17.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv17.Notify.Kafka } if len(cv17.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv17.Notify.NATS } if len(cv17.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv17.Notify.Webhook } if len(cv17.Notify.MySQL) == 0 { - srvConfig.Notify.MySQL = make(map[string]mySQLNotify) - srvConfig.Notify.MySQL["1"] = mySQLNotify{ - Format: formatNamespace, + srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs) + srvConfig.Notify.MySQL["1"] = target.MySQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.MySQL = cv17.Notify.MySQL @@ -1412,68 +1445,68 @@ func migrateV18ToV19() error { // check and set notifiers config if len(cv18.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { // New deliveryMode parameter is added for AMQP, // default value is already 0, so nothing to // explicitly migrate here. srvConfig.Notify.AMQP = cv18.Notify.AMQP } - if len(cv18.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{ - Format: formatNamespace, + if len(cv18.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{ + Format: event.NamespaceFormat, } } else { - srvConfig.Notify.ElasticSearch = cv18.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv18.Notify.Elasticsearch } if len(cv18.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{ - Format: formatNamespace, + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.Redis = cv18.Notify.Redis } if len(cv18.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{ - Format: formatNamespace, + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.PostgreSQL = cv18.Notify.PostgreSQL } if len(cv18.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv18.Notify.Kafka } if len(cv18.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv18.Notify.NATS } if len(cv18.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv18.Notify.Webhook } if len(cv18.Notify.MySQL) == 0 { - srvConfig.Notify.MySQL = make(map[string]mySQLNotify) - srvConfig.Notify.MySQL["1"] = mySQLNotify{ - Format: formatNamespace, + srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs) + srvConfig.Notify.MySQL["1"] = target.MySQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.MySQL = cv18.Notify.MySQL } // V18 will not have mqtt support, so we add that here. - srvConfig.Notify.MQTT = make(map[string]mqttNotify) - srvConfig.Notify.MQTT["1"] = mqttNotify{} + srvConfig.Notify.MQTT = make(map[string]target.MQTTArgs) + srvConfig.Notify.MQTT["1"] = target.MQTTArgs{} // Load browser config from existing config in the file. srvConfig.Browser = cv18.Browser @@ -1516,69 +1549,66 @@ func migrateV19ToV20() error { srvConfig.Logger.Console = cv19.Logger.Console srvConfig.Logger.File = cv19.Logger.File - // check and set notifiers config if len(cv19.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { - // New deliveryMode parameter is added for AMQP, - // default value is already 0, so nothing to - // explicitly migrate here. srvConfig.Notify.AMQP = cv19.Notify.AMQP } - if len(cv19.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{ - Format: formatNamespace, + if len(cv19.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{ + Format: event.NamespaceFormat, } } else { - srvConfig.Notify.ElasticSearch = cv19.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv19.Notify.Elasticsearch } if len(cv19.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{ - Format: formatNamespace, + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.Redis = cv19.Notify.Redis } if len(cv19.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{ - Format: formatNamespace, + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.PostgreSQL = cv19.Notify.PostgreSQL } if len(cv19.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv19.Notify.Kafka } if len(cv19.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv19.Notify.NATS } if len(cv19.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv19.Notify.Webhook } if len(cv19.Notify.MySQL) == 0 { - srvConfig.Notify.MySQL = make(map[string]mySQLNotify) - srvConfig.Notify.MySQL["1"] = mySQLNotify{ - Format: formatNamespace, + srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs) + srvConfig.Notify.MySQL["1"] = target.MySQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.MySQL = cv19.Notify.MySQL } + if len(cv19.Notify.MQTT) == 0 { - srvConfig.Notify.MQTT = make(map[string]mqttNotify) - srvConfig.Notify.MQTT["1"] = mqttNotify{} + srvConfig.Notify.MQTT = make(map[string]target.MQTTArgs) + srvConfig.Notify.MQTT["1"] = target.MQTTArgs{} } else { srvConfig.Notify.MQTT = cv19.Notify.MQTT } @@ -1620,69 +1650,66 @@ func migrateV20ToV21() error { srvConfig.Region = globalMinioDefaultRegion } - // check and set notifiers config if len(cv20.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { - // New deliveryMode parameter is added for AMQP, - // default value is already 0, so nothing to - // explicitly migrate here. srvConfig.Notify.AMQP = cv20.Notify.AMQP } - if len(cv20.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{ - Format: formatNamespace, + if len(cv20.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{ + Format: event.NamespaceFormat, } } else { - srvConfig.Notify.ElasticSearch = cv20.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv20.Notify.Elasticsearch } if len(cv20.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{ - Format: formatNamespace, + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.Redis = cv20.Notify.Redis } if len(cv20.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{ - Format: formatNamespace, + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.PostgreSQL = cv20.Notify.PostgreSQL } if len(cv20.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv20.Notify.Kafka } if len(cv20.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv20.Notify.NATS } if len(cv20.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv20.Notify.Webhook } if len(cv20.Notify.MySQL) == 0 { - srvConfig.Notify.MySQL = make(map[string]mySQLNotify) - srvConfig.Notify.MySQL["1"] = mySQLNotify{ - Format: formatNamespace, + srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs) + srvConfig.Notify.MySQL["1"] = target.MySQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.MySQL = cv20.Notify.MySQL } + if len(cv20.Notify.MQTT) == 0 { - srvConfig.Notify.MQTT = make(map[string]mqttNotify) - srvConfig.Notify.MQTT["1"] = mqttNotify{} + srvConfig.Notify.MQTT = make(map[string]target.MQTTArgs) + srvConfig.Notify.MQTT["1"] = target.MQTTArgs{} } else { srvConfig.Notify.MQTT = cv20.Notify.MQTT } @@ -1727,69 +1754,66 @@ func migrateV21ToV22() error { srvConfig.Region = globalMinioDefaultRegion } - // check and set notifiers config if len(cv21.Notify.AMQP) == 0 { - srvConfig.Notify.AMQP = make(map[string]amqpNotify) - srvConfig.Notify.AMQP["1"] = amqpNotify{} + srvConfig.Notify.AMQP = make(map[string]target.AMQPArgs) + srvConfig.Notify.AMQP["1"] = target.AMQPArgs{} } else { - // New deliveryMode parameter is added for AMQP, - // default value is already 0, so nothing to - // explicitly migrate here. srvConfig.Notify.AMQP = cv21.Notify.AMQP } - if len(cv21.Notify.ElasticSearch) == 0 { - srvConfig.Notify.ElasticSearch = make(map[string]elasticSearchNotify) - srvConfig.Notify.ElasticSearch["1"] = elasticSearchNotify{ - Format: formatNamespace, + if len(cv21.Notify.Elasticsearch) == 0 { + srvConfig.Notify.Elasticsearch = make(map[string]target.ElasticsearchArgs) + srvConfig.Notify.Elasticsearch["1"] = target.ElasticsearchArgs{ + Format: event.NamespaceFormat, } } else { - srvConfig.Notify.ElasticSearch = cv21.Notify.ElasticSearch + srvConfig.Notify.Elasticsearch = cv21.Notify.Elasticsearch } if len(cv21.Notify.Redis) == 0 { - srvConfig.Notify.Redis = make(map[string]redisNotify) - srvConfig.Notify.Redis["1"] = redisNotify{ - Format: formatNamespace, + srvConfig.Notify.Redis = make(map[string]target.RedisArgs) + srvConfig.Notify.Redis["1"] = target.RedisArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.Redis = cv21.Notify.Redis } if len(cv21.Notify.PostgreSQL) == 0 { - srvConfig.Notify.PostgreSQL = make(map[string]postgreSQLNotify) - srvConfig.Notify.PostgreSQL["1"] = postgreSQLNotify{ - Format: formatNamespace, + srvConfig.Notify.PostgreSQL = make(map[string]target.PostgreSQLArgs) + srvConfig.Notify.PostgreSQL["1"] = target.PostgreSQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.PostgreSQL = cv21.Notify.PostgreSQL } if len(cv21.Notify.Kafka) == 0 { - srvConfig.Notify.Kafka = make(map[string]kafkaNotify) - srvConfig.Notify.Kafka["1"] = kafkaNotify{} + srvConfig.Notify.Kafka = make(map[string]target.KafkaArgs) + srvConfig.Notify.Kafka["1"] = target.KafkaArgs{} } else { srvConfig.Notify.Kafka = cv21.Notify.Kafka } if len(cv21.Notify.NATS) == 0 { - srvConfig.Notify.NATS = make(map[string]natsNotify) - srvConfig.Notify.NATS["1"] = natsNotify{} + srvConfig.Notify.NATS = make(map[string]target.NATSArgs) + srvConfig.Notify.NATS["1"] = target.NATSArgs{} } else { srvConfig.Notify.NATS = cv21.Notify.NATS } if len(cv21.Notify.Webhook) == 0 { - srvConfig.Notify.Webhook = make(map[string]webhookNotify) - srvConfig.Notify.Webhook["1"] = webhookNotify{} + srvConfig.Notify.Webhook = make(map[string]target.WebhookArgs) + srvConfig.Notify.Webhook["1"] = target.WebhookArgs{} } else { srvConfig.Notify.Webhook = cv21.Notify.Webhook } if len(cv21.Notify.MySQL) == 0 { - srvConfig.Notify.MySQL = make(map[string]mySQLNotify) - srvConfig.Notify.MySQL["1"] = mySQLNotify{ - Format: formatNamespace, + srvConfig.Notify.MySQL = make(map[string]target.MySQLArgs) + srvConfig.Notify.MySQL["1"] = target.MySQLArgs{ + Format: event.NamespaceFormat, } } else { srvConfig.Notify.MySQL = cv21.Notify.MySQL } + if len(cv21.Notify.MQTT) == 0 { - srvConfig.Notify.MQTT = make(map[string]mqttNotify) - srvConfig.Notify.MQTT["1"] = mqttNotify{} + srvConfig.Notify.MQTT = make(map[string]target.MQTTArgs) + srvConfig.Notify.MQTT["1"] = target.MQTTArgs{} } else { srvConfig.Notify.MQTT = cv21.Notify.MQTT } diff --git a/cmd/config-versions.go b/cmd/config-versions.go index 831f2d80b..acd511c5f 100644 --- a/cmd/config-versions.go +++ b/cmd/config-versions.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ import ( "sync" "github.com/minio/minio/pkg/auth" + "github.com/minio/minio/pkg/event/target" ) /////////////////// Config V1 /////////////////// @@ -226,23 +227,23 @@ type configV6 struct { // Notifier represents collection of supported notification queues in version // 1 without NATS streaming. type notifierV1 struct { - AMQP map[string]amqpNotify `json:"amqp"` - NATS map[string]natsNotifyV1 `json:"nats"` - ElasticSearch map[string]elasticSearchNotify `json:"elasticsearch"` - Redis map[string]redisNotify `json:"redis"` - PostgreSQL map[string]postgreSQLNotify `json:"postgresql"` - Kafka map[string]kafkaNotify `json:"kafka"` + AMQP map[string]target.AMQPArgs `json:"amqp"` + NATS map[string]natsNotifyV1 `json:"nats"` + ElasticSearch map[string]target.ElasticsearchArgs `json:"elasticsearch"` + Redis map[string]target.RedisArgs `json:"redis"` + PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"` + Kafka map[string]target.KafkaArgs `json:"kafka"` } // Notifier represents collection of supported notification queues in version 2 // with NATS streaming but without webhook. type notifierV2 struct { - AMQP map[string]amqpNotify `json:"amqp"` - NATS map[string]natsNotify `json:"nats"` - ElasticSearch map[string]elasticSearchNotify `json:"elasticsearch"` - Redis map[string]redisNotify `json:"redis"` - PostgreSQL map[string]postgreSQLNotify `json:"postgresql"` - Kafka map[string]kafkaNotify `json:"kafka"` + AMQP map[string]target.AMQPArgs `json:"amqp"` + NATS map[string]target.NATSArgs `json:"nats"` + ElasticSearch map[string]target.ElasticsearchArgs `json:"elasticsearch"` + Redis map[string]target.RedisArgs `json:"redis"` + PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"` + Kafka map[string]target.KafkaArgs `json:"kafka"` } // configV7 server configuration version '7'. @@ -368,6 +369,18 @@ type serverConfigV12 struct { Notify notifierV2 `json:"notify"` } +type notifier struct { + AMQP map[string]target.AMQPArgs `json:"amqp"` + Elasticsearch map[string]target.ElasticsearchArgs `json:"elasticsearch"` + Kafka map[string]target.KafkaArgs `json:"kafka"` + MQTT map[string]target.MQTTArgs `json:"mqtt"` + MySQL map[string]target.MySQLArgs `json:"mysql"` + NATS map[string]target.NATSArgs `json:"nats"` + PostgreSQL map[string]target.PostgreSQLArgs `json:"postgresql"` + Redis map[string]target.RedisArgs `json:"redis"` + Webhook map[string]target.WebhookArgs `json:"webhook"` +} + // serverConfigV13 server configuration version '13' which is like // version '12' except it adds support for webhook notification. type serverConfigV13 struct { diff --git a/cmd/endpoint.go b/cmd/endpoint.go index 4c584b7b7..ce3e8374e 100644 --- a/cmd/endpoint.go +++ b/cmd/endpoint.go @@ -462,9 +462,13 @@ func GetLocalPeer(endpoints EndpointList) (localPeer string) { } } if peerSet.IsEmpty() { - // If local peer is empty can happen in FS or Erasure coded mode. - // then set the value to globalMinioAddr instead. - return globalMinioAddr + // Local peer can be empty in FS or Erasure coded mode. + // If so, return globalMinioHost + globalMinioPort value. + if globalMinioHost != "" { + return globalMinioHost + ":" + globalMinioPort + } + + return "127.0.0.1:" + globalMinioPort } return peerSet.ToSlice()[0] } diff --git a/cmd/endpoint_test.go b/cmd/endpoint_test.go index 9355515c8..125241f3f 100644 --- a/cmd/endpoint_test.go +++ b/cmd/endpoint_test.go @@ -334,16 +334,19 @@ func TestCreateEndpoints(t *testing.T) { // is considered a remote service from localhost:9000 perspective. func TestGetLocalPeer(t *testing.T) { tempGlobalMinioAddr := globalMinioAddr + tempGlobalMinioPort := globalMinioPort defer func() { globalMinioAddr = tempGlobalMinioAddr + globalMinioPort = tempGlobalMinioPort }() globalMinioAddr = ":9000" + globalMinioPort = "9000" testCases := []struct { endpointArgs []string expectedResult string }{ - {[]string{"/d1", "/d2", "d3", "d4"}, ":9000"}, + {[]string{"/d1", "/d2", "d3", "d4"}, "127.0.0.1:9000"}, {[]string{"http://localhost:9000/d1", "http://localhost:9000/d2", "http://example.org:9000/d3", "http://example.com:9000/d4"}, "localhost:9000"}, {[]string{"http://localhost:9000/d1", "http://example.org:9000/d2", "http://example.com:9000/d3", "http://example.net:9000/d4"}, diff --git a/cmd/event-notifier.go b/cmd/event-notifier.go deleted file mode 100644 index 3d5ec1ac2..000000000 --- a/cmd/event-notifier.go +++ /dev/null @@ -1,804 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bytes" - "encoding/json" - "encoding/xml" - "fmt" - "net" - "net/url" - "path" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/minio/minio/pkg/errors" - "github.com/minio/minio/pkg/hash" -) - -const ( - minioEventSource = "minio:s3" -) - -type externalNotifier struct { - // Per-bucket notification config. This is updated via - // PutBucketNotification API. - notificationConfigs map[string]*notificationConfig - - // An external target keeps a connection to an external - // service to which events are to be sent. It is a mapping - // from an ARN to a log object - targets map[string]*logrus.Logger - - rwMutex *sync.RWMutex -} - -type internalNotifier struct { - // per-bucket listener configuration. This is updated - // when listeners connect or disconnect. - listenerConfigs map[string][]listenerConfig - - // An internal target is a peer Minio server, that is - // connected to a listening client. Here, targets is a map of - // listener ARN to log object. - targets map[string]*listenerLogger - - // Connected listeners is a map of listener ARNs to channels - // on which the ListenBucket API handler go routine is waiting - // for events to send to a client. - connectedListeners map[string]*listenChan - - rwMutex *sync.RWMutex -} - -// Global event notification configuration. This structure has state -// about configured external notifications, and run-time configuration -// for listener notifications. -type eventNotifier struct { - - // `external` here refers to notification configuration to - // send events to supported external systems - external externalNotifier - - // `internal` refers to notification configuration for live - // listening clients. Events for a client are send from all - // servers, internally to a particular server that is - // connected to the client. - internal internalNotifier -} - -// Represents data to be sent with notification event. -type eventData struct { - Type EventName - Bucket string - ObjInfo ObjectInfo - ReqParams map[string]string - Host string - Port string - UserAgent string -} - -// New notification event constructs a new notification event message from -// input request metadata which completed successfully. -func newNotificationEvent(event eventData) NotificationEvent { - getResponseOriginEndpointKey := func() string { - host := globalMinioHost - if host == "" { - // FIXME: Send FQDN or hostname of this machine than sending IP address. - host = localIP4.ToSlice()[0] - } - - return fmt.Sprintf("%s://%s:%s", getURLScheme(globalIsSSL), host, globalMinioPort) - } - - // Fetch the region. - region := globalServerConfig.GetRegion() - - // Fetch the credentials. - creds := globalServerConfig.GetCredential() - - // Time when Minio finished processing the request. - eventTime := UTCNow() - - // Fetch a hexadecimal representation of event time in nano seconds. - uniqueID := mustGetRequestID(eventTime) - - /// Construct a new object created event. - - // Following blocks fills in all the necessary details of s3 - // event message structure. - // http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html - nEvent := NotificationEvent{ - EventVersion: eventVersion, - EventSource: minioEventSource, - AwsRegion: region, - EventTime: eventTime.Format(timeFormatAMZ), - EventName: event.Type.String(), - UserIdentity: identity{creds.AccessKey}, - RequestParameters: event.ReqParams, - ResponseElements: map[string]string{ - responseRequestIDKey: uniqueID, - // Following is a custom response element to indicate - // event origin server endpoint. - responseOriginEndpointKey: getResponseOriginEndpointKey(), - }, - S3: eventMeta{ - SchemaVersion: eventSchemaVersion, - ConfigurationID: eventConfigID, - Bucket: bucketMeta{ - Name: event.Bucket, - OwnerIdentity: identity{creds.AccessKey}, - ARN: bucketARNPrefix + event.Bucket, - }, - }, - Source: sourceInfo{ - Host: event.Host, - Port: event.Port, - UserAgent: event.UserAgent, - }, - } - - // Escape the object name. For example "red flower.jpg" becomes "red+flower.jpg". - escapedObj := url.QueryEscape(event.ObjInfo.Name) - - // For delete object event type, we do not need to set ETag and Size. - if event.Type == ObjectRemovedDelete { - nEvent.S3.Object = objectMeta{ - Key: escapedObj, - VersionID: "1", - Sequencer: uniqueID, - } - return nEvent - } - - // For all other events we should set ETag and Size. - nEvent.S3.Object = objectMeta{ - Key: escapedObj, - ETag: event.ObjInfo.ETag, - Size: event.ObjInfo.Size, - ContentType: event.ObjInfo.ContentType, - UserMetadata: event.ObjInfo.UserDefined, - VersionID: "1", - Sequencer: uniqueID, - } - - // Success. - return nEvent -} - -// Fetch all external targets. This returns a copy of the current map of -// external notification targets. -func (en eventNotifier) GetAllExternalTargets() map[string]*logrus.Logger { - en.external.rwMutex.RLock() - defer en.external.rwMutex.RUnlock() - targetsCopy := make(map[string]*logrus.Logger) - for k, v := range en.external.targets { - targetsCopy[k] = v - } - return targetsCopy -} - -// Fetch the external target. -func (en eventNotifier) GetExternalTarget(queueARN string) *logrus.Logger { - en.external.rwMutex.RLock() - defer en.external.rwMutex.RUnlock() - return en.external.targets[queueARN] -} - -func (en eventNotifier) GetInternalTarget(arn string) *listenerLogger { - en.internal.rwMutex.RLock() - defer en.internal.rwMutex.RUnlock() - return en.internal.targets[arn] -} - -// Set a new sns target for an input sns ARN. -func (en *eventNotifier) AddListenerChan(snsARN string, listenerCh *listenChan) error { - if listenerCh == nil { - return errInvalidArgument - } - en.internal.rwMutex.Lock() - defer en.internal.rwMutex.Unlock() - en.internal.connectedListeners[snsARN] = listenerCh - return nil -} - -// Remove sns target for an input sns ARN. -func (en *eventNotifier) RemoveListenerChan(snsARN string) { - en.internal.rwMutex.Lock() - defer en.internal.rwMutex.Unlock() - if en.internal.connectedListeners != nil { - delete(en.internal.connectedListeners, snsARN) - } -} - -func (en *eventNotifier) SendListenerEvent(arn string, event []NotificationEvent) error { - en.internal.rwMutex.Lock() - defer en.internal.rwMutex.Unlock() - - listenChan, ok := en.internal.connectedListeners[arn] - if ok { - listenChan.sendNotificationEvent(event) - } - // If the channel is not present we ignore the event. - return nil -} - -// Fetch bucket notification config for an input bucket. -func (en eventNotifier) GetBucketNotificationConfig(bucket string) *notificationConfig { - en.external.rwMutex.RLock() - defer en.external.rwMutex.RUnlock() - return en.external.notificationConfigs[bucket] -} - -func (en *eventNotifier) SetBucketNotificationConfig(bucket string, ncfg *notificationConfig) { - en.external.rwMutex.Lock() - if ncfg == nil { - delete(en.external.notificationConfigs, bucket) - } else { - en.external.notificationConfigs[bucket] = ncfg - } - en.external.rwMutex.Unlock() -} - -func (en *eventNotifier) GetBucketListenerConfig(bucket string) []listenerConfig { - en.internal.rwMutex.RLock() - defer en.internal.rwMutex.RUnlock() - return en.internal.listenerConfigs[bucket] -} - -func (en *eventNotifier) SetBucketListenerConfig(bucket string, lcfg []listenerConfig) error { - en.internal.rwMutex.Lock() - defer en.internal.rwMutex.Unlock() - if len(lcfg) == 0 { - delete(en.internal.listenerConfigs, bucket) - } else { - en.internal.listenerConfigs[bucket] = lcfg - } - for _, elcArr := range en.internal.listenerConfigs { - for _, elcElem := range elcArr { - currArn := elcElem.TopicConfig.TopicARN - logger, err := newListenerLogger(currArn, elcElem.TargetServer) - if err != nil { - return err - } - en.internal.targets[currArn] = logger - } - } - return nil -} - -func eventNotifyForBucketNotifications(eventType, objectName, bucketName string, nEvent []NotificationEvent) { - nConfig := globalEventNotifier.GetBucketNotificationConfig(bucketName) - if nConfig == nil { - return - } - // Validate if the event and object match the queue configs. - for _, qConfig := range nConfig.QueueConfigs { - eventMatch := eventMatch(eventType, qConfig.Events) - ruleMatch := filterRuleMatch(objectName, qConfig.Filter.Key.FilterRules) - if eventMatch && ruleMatch { - targetLog := globalEventNotifier.GetExternalTarget(qConfig.QueueARN) - if targetLog != nil { - targetLog.WithFields(logrus.Fields{ - "Key": path.Join(bucketName, objectName), - "EventType": eventType, - "Records": nEvent, - }).Info() - } - } - } -} - -func eventNotifyForBucketListeners(eventType, objectName, bucketName string, - nEvent []NotificationEvent) { - lCfgs := globalEventNotifier.GetBucketListenerConfig(bucketName) - if lCfgs == nil { - return - } - // Validate if the event and object match listener configs - for _, lcfg := range lCfgs { - ruleMatch := filterRuleMatch(objectName, lcfg.TopicConfig.Filter.Key.FilterRules) - eventMatch := eventMatch(eventType, lcfg.TopicConfig.Events) - if eventMatch && ruleMatch { - targetLog := globalEventNotifier.GetInternalTarget( - lcfg.TopicConfig.TopicARN) - if targetLog != nil && targetLog.log != nil { - targetLog.log.WithFields(logrus.Fields{ - "Key": path.Join(bucketName, objectName), - "EventType": eventType, - "Records": nEvent, - }).Info() - } - } - } - -} - -// eventNotify notifies an event to relevant targets based on their -// bucket configuration (notifications and listeners). -func eventNotify(event eventData) { - if globalEventNotifier == nil { - return - } - // Notifies a new event. - // List of events reported through this function are - // - s3:ObjectCreated:Put - // - s3:ObjectCreated:Post - // - s3:ObjectCreated:Copy - // - s3:ObjectCreated:CompleteMultipartUpload - // - s3:ObjectRemoved:Delete - - // Event type. - eventType := event.Type.String() - - // Object name. - objectName := event.ObjInfo.Name - - // Save the notification event to be sent. - notificationEvent := []NotificationEvent{newNotificationEvent(event)} - - // Notify external targets. - eventNotifyForBucketNotifications(eventType, objectName, event.Bucket, notificationEvent) - - // Notify internal targets. - eventNotifyForBucketListeners(eventType, objectName, event.Bucket, notificationEvent) -} - -// loads notification config if any for a given bucket, returns -// structured notification config. -func loadNotificationConfig(bucket string, objAPI ObjectLayer) (*notificationConfig, error) { - // Construct the notification config path. - ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) - - var buffer bytes.Buffer - err := objAPI.GetObject(minioMetaBucket, ncPath, 0, -1, &buffer, "") // Read everything. - if err != nil { - // 'notification.xml' not found return - // 'errNoSuchNotifications'. This is default when no - // bucket notifications are found on the bucket. - if isErrObjectNotFound(err) || isErrIncompleteBody(err) { - return nil, errors.Trace(errNoSuchNotifications) - } - errorIf(err, "Unable to load bucket-notification for bucket %s", bucket) - // Returns error for other errors. - return nil, err - } - - // if `notifications.xml` is empty we should return NoSuchNotifications. - if buffer.Len() == 0 { - return nil, errors.Trace(errNoSuchNotifications) - } - - // Unmarshal notification bytes. - notificationConfigBytes := buffer.Bytes() - notificationCfg := ¬ificationConfig{} - // Unmarshal notification bytes only if we read data. - if err = xml.Unmarshal(notificationConfigBytes, notificationCfg); err != nil { - return nil, errors.Trace(err) - } - - // Return success. - return notificationCfg, nil -} - -// loads notification config if any for a given bucket, returns -// structured notification config. -func loadListenerConfig(bucket string, objAPI ObjectLayer) ([]listenerConfig, error) { - // in single node mode, there are no peers, so in this case - // there is no configuration to load, as any previously - // connected listen clients have been disconnected - if !globalIsDistXL { - return nil, nil - } - - // Construct the notification config path. - lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig) - - var buffer bytes.Buffer - err := objAPI.GetObject(minioMetaBucket, lcPath, 0, -1, &buffer, "") - if err != nil { - // 'listener.json' not found return - // 'errNoSuchNotifications'. This is default when no - // bucket listeners are found on the bucket - if isErrObjectNotFound(err) || isErrIncompleteBody(err) { - return nil, errors.Trace(errNoSuchNotifications) - } - errorIf(err, "Unable to load bucket-listeners for bucket %s", bucket) - // Returns error for other errors. - return nil, err - } - - // if `listener.json` is empty we should return NoSuchNotifications. - if buffer.Len() == 0 { - return nil, errors.Trace(errNoSuchNotifications) - } - - var lCfg []listenerConfig - lConfigBytes := buffer.Bytes() - if err = json.Unmarshal(lConfigBytes, &lCfg); err != nil { - errorIf(err, "Unable to unmarshal listener config from JSON.") - return nil, errors.Trace(err) - } - - // Return success. - return lCfg, nil -} - -func persistNotificationConfig(bucket string, ncfg *notificationConfig, obj ObjectLayer) error { - // marshal to xml - buf, err := xml.Marshal(ncfg) - if err != nil { - errorIf(err, "Unable to marshal notification configuration into XML") - return err - } - - // build path - ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) - - // write object to path - hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf)) - if err != nil { - errorIf(err, "Unable to write bucket notification configuration.") - return err - } - _, err = obj.PutObject(minioMetaBucket, ncPath, hashReader, nil) - if err != nil { - errorIf(err, "Unable to write bucket notification configuration.") - return err - } - return nil -} - -// Persists validated listener config to object layer. -func persistListenerConfig(bucket string, lcfg []listenerConfig, obj ObjectLayer) error { - buf, err := json.Marshal(lcfg) - if err != nil { - errorIf(err, "Unable to marshal listener config to JSON.") - return err - } - - // build path - lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig) - - // write object to path - hashReader, err := hash.NewReader(bytes.NewReader(buf), int64(len(buf)), "", getSHA256Hash(buf)) - if err != nil { - errorIf(err, "Unable to write bucket listener configuration to object layer.") - return err - } - - // write object to path - _, err = obj.PutObject(minioMetaBucket, lcPath, hashReader, nil) - if err != nil { - errorIf(err, "Unable to write bucket listener configuration to object layer.") - return err - } - return nil -} - -// Removes notification.xml for a given bucket, only used during DeleteBucket. -func removeNotificationConfig(bucket string, objAPI ObjectLayer) error { - // Verify bucket is valid. - if !IsValidBucketName(bucket) { - return BucketNameInvalid{Bucket: bucket} - } - - ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) - - return objAPI.DeleteObject(minioMetaBucket, ncPath) -} - -// Remove listener configuration from storage layer. Used when a bucket is deleted. -func removeListenerConfig(bucket string, objAPI ObjectLayer) error { - // make the path - lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig) - - return objAPI.DeleteObject(minioMetaBucket, lcPath) -} - -// Loads both notification and listener config. -func loadNotificationAndListenerConfig(bucketName string, objAPI ObjectLayer) (nCfg *notificationConfig, lCfg []listenerConfig, err error) { - // Loads notification config if any. - nCfg, err = loadNotificationConfig(bucketName, objAPI) - if err != nil && !errors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { - return nil, nil, err - } - - // Loads listener config if any. - lCfg, err = loadListenerConfig(bucketName, objAPI) - if err != nil && !errors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { - return nil, nil, err - } - return nCfg, lCfg, nil -} - -// loads all bucket notifications if present. -func loadAllBucketNotifications(objAPI ObjectLayer) (map[string]*notificationConfig, map[string][]listenerConfig, error) { - // List buckets to proceed loading all notification configuration. - buckets, err := objAPI.ListBuckets() - if err != nil { - return nil, nil, err - } - - nConfigs := make(map[string]*notificationConfig) - lConfigs := make(map[string][]listenerConfig) - - // Loads all bucket notifications. - for _, bucket := range buckets { - // Load persistent notification and listener configurations - // a given bucket name. - nConfigs[bucket.Name], lConfigs[bucket.Name], err = loadNotificationAndListenerConfig(bucket.Name, objAPI) - if err != nil { - return nil, nil, err - } - } - - // Success. - return nConfigs, lConfigs, nil -} - -// addQueueTarget - calls newTargetFunc function and adds its returned value to queueTargets -func addQueueTarget(queueTargets map[string]*logrus.Logger, - accountID, queueType string, - newTargetFunc func(string) (*logrus.Logger, error)) (string, error) { - - // Construct the queue ARN for AMQP. - queueARN := minioSqs + globalServerConfig.GetRegion() + ":" + accountID + ":" + queueType - - // Queue target if already initialized we move to the next ARN. - if _, ok := queueTargets[queueARN]; ok { - return queueARN, nil - } - - // Using accountID we can now initialize a new AMQP logrus instance. - logger, err := newTargetFunc(accountID) - if err == nil { - queueTargets[queueARN] = logger - } - - return queueARN, err -} - -// Loads all queue targets, initializes each queueARNs depending on their config. -// Each instance of queueARN registers its own logrus to communicate with the -// queue service. QueueARN once initialized is not initialized again for the -// same queueARN, instead previous connection is used. -func loadAllQueueTargets() (map[string]*logrus.Logger, error) { - queueTargets := make(map[string]*logrus.Logger) - // Load all amqp targets, initialize their respective loggers. - for accountID, amqpN := range globalServerConfig.Notify.GetAMQP() { - if !amqpN.Enable { - continue - } - - if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeAMQP, newAMQPNotify); err != nil { - if _, ok := err.(net.Error); ok { - err = &net.OpError{ - Op: "Connecting to " + queueARN, - Net: "tcp", - Err: err, - } - } - - return nil, err - } - } - - // Load all mqtt targets, initialize their respective loggers. - for accountID, mqttN := range globalServerConfig.Notify.GetMQTT() { - if !mqttN.Enable { - continue - } - - if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeMQTT, newMQTTNotify); err != nil { - if _, ok := err.(net.Error); ok { - err = &net.OpError{ - Op: "Connecting to " + queueARN, - Net: "tcp", - Err: err, - } - } - - return nil, err - } - } - - // Load all nats targets, initialize their respective loggers. - for accountID, natsN := range globalServerConfig.Notify.GetNATS() { - if !natsN.Enable { - continue - } - - if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeNATS, newNATSNotify); err != nil { - if _, ok := err.(net.Error); ok { - err = &net.OpError{ - Op: "Connecting to " + queueARN, - Net: "tcp", - Err: err, - } - } - - return nil, err - } - } - - // Load redis targets, initialize their respective loggers. - for accountID, redisN := range globalServerConfig.Notify.GetRedis() { - if !redisN.Enable { - continue - } - - if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeRedis, newRedisNotify); err != nil { - if _, ok := err.(net.Error); ok { - err = &net.OpError{ - Op: "Connecting to " + queueARN, - Net: "tcp", - Err: err, - } - } - - return nil, err - } - } - - // Load Webhook targets, initialize their respective loggers. - for accountID, webhookN := range globalServerConfig.Notify.GetWebhook() { - if !webhookN.Enable { - continue - } - if _, err := addQueueTarget(queueTargets, accountID, queueTypeWebhook, newWebhookNotify); err != nil { - return nil, err - } - } - - // Load elastic targets, initialize their respective loggers. - for accountID, elasticN := range globalServerConfig.Notify.GetElasticSearch() { - if !elasticN.Enable { - continue - } - - if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeElastic, newElasticNotify); err != nil { - if _, ok := err.(net.Error); ok { - err = &net.OpError{ - Op: "Connecting to " + queueARN, - Net: "tcp", - Err: err, - } - } - - return nil, err - } - } - - // Load PostgreSQL targets, initialize their respective loggers. - for accountID, pgN := range globalServerConfig.Notify.GetPostgreSQL() { - if !pgN.Enable { - continue - } - - if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypePostgreSQL, newPostgreSQLNotify); err != nil { - if _, ok := err.(net.Error); ok { - err = &net.OpError{ - Op: "Connecting to " + queueARN, - Net: "tcp", - Err: err, - } - } - - return nil, err - } - } - - // Load MySQL targets, initialize their respective loggers. - for accountID, msqlN := range globalServerConfig.Notify.GetMySQL() { - if !msqlN.Enable { - continue - } - - if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeMySQL, newMySQLNotify); err != nil { - if _, ok := err.(net.Error); ok { - err = &net.OpError{ - Op: "Connecting to " + queueARN, - Net: "tcp", - Err: err, - } - } - - return nil, err - } - } - - // Load Kafka targets, initialize their respective loggers. - for accountID, kafkaN := range globalServerConfig.Notify.GetKafka() { - if !kafkaN.Enable { - continue - } - - if queueARN, err := addQueueTarget(queueTargets, accountID, queueTypeKafka, newKafkaNotify); err != nil { - if _, ok := err.(net.Error); ok { - err = &net.OpError{ - Op: "Connecting to " + queueARN, - Net: "tcp", - Err: err, - } - } - - return nil, err - } - } - - // Successfully initialized queue targets. - return queueTargets, nil -} - -// Global instance of event notification queue. -var globalEventNotifier *eventNotifier - -// Initialize event notifier. -func initEventNotifier(objAPI ObjectLayer) error { - if objAPI == nil { - return errInvalidArgument - } - - // Read all saved bucket notifications. - nConfigs, lConfigs, err := loadAllBucketNotifications(objAPI) - if err != nil { - errorIf(err, "Error loading bucket notifications - %v", err) - return err - } - - // Initializes all queue targets. - queueTargets, err := loadAllQueueTargets() - if err != nil { - return err - } - - // Initialize internal listener targets - listenTargets := make(map[string]*listenerLogger) - for _, listeners := range lConfigs { - for _, listener := range listeners { - ln, err := newListenerLogger( - listener.TopicConfig.TopicARN, - listener.TargetServer, - ) - if err != nil { - errorIf(err, "Unable to initialize listener target logger.") - //TODO: improve error - return fmt.Errorf("Error initializing listner target logger - %v", err) - } - listenTargets[listener.TopicConfig.TopicARN] = ln - } - } - - // Initialize event notifier queue. - globalEventNotifier = &eventNotifier{ - external: externalNotifier{ - notificationConfigs: nConfigs, - targets: queueTargets, - rwMutex: &sync.RWMutex{}, - }, - internal: internalNotifier{ - rwMutex: &sync.RWMutex{}, - targets: listenTargets, - listenerConfigs: lConfigs, - connectedListeners: make(map[string]*listenChan), - }, - } - - return nil -} diff --git a/cmd/event-notifier_test.go b/cmd/event-notifier_test.go deleted file mode 100644 index 02496a389..000000000 --- a/cmd/event-notifier_test.go +++ /dev/null @@ -1,586 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bytes" - "fmt" - "os" - "reflect" - "testing" - "time" - - "github.com/minio/minio/pkg/errors" -) - -// Test InitEventNotifier with faulty disks -func TestInitEventNotifierFaultyDisks(t *testing.T) { - // Prepare for tests - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Init Test config failed") - } - // remove the root directory after the test ends. - defer os.RemoveAll(rootPath) - - disks, err := getRandomDisks(16) - if err != nil { - t.Fatal("Unable to create directories for FS backend. ", err) - } - defer removeRoots(disks) - obj, _, err := initObjectLayer(mustGetNewEndpointList(disks...)) - if err != nil { - t.Fatal("Unable to initialize FS backend.", err) - } - - bucketName := "bucket" - if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil { - t.Fatal("Unexpected error:", err) - } - - xl := obj.(*xlObjects) - - listenARN := "arn:minio:sns:us-east-1:1:listen" - queueARN := "arn:minio:sqs:us-east-1:1:redis" - - // Write a notification.xml in the disk - notificationXML := "" - notificationXML += "s3:ObjectRemoved:*s3:ObjectRemoved:*" + listenARN + "" - notificationXML += "s3:ObjectRemoved:*s3:ObjectRemoved:*" + queueARN + "" - notificationXML += "" - size := int64(len([]byte(notificationXML))) - reader := bytes.NewReader([]byte(notificationXML)) - bucketConfigPath := bucketConfigPrefix + "/" + bucketName + "/" + bucketNotificationConfig - if _, err := xl.PutObject(minioMetaBucket, bucketConfigPath, mustGetHashReader(t, reader, size, "", ""), nil); err != nil { - t.Fatal("Unexpected error:", err) - } - - for i, d := range xl.storageDisks { - xl.storageDisks[i] = newNaughtyDisk(d, nil, errFaultyDisk) - } - // Test initEventNotifier() with faulty disks - for i := 1; i <= 3; i++ { - if err := initEventNotifier(xl); errors.Cause(err) != errFaultyDisk { - t.Fatal("Unexpected error:", err) - } - } -} - -// InitEventNotifierWithPostgreSQL - tests InitEventNotifier when PostgreSQL is not prepared -func TestInitEventNotifierWithPostgreSQL(t *testing.T) { - // initialize the server and obtain the credentials and root. - // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Init Test config failed") - } - // remove the root directory after the test ends. - defer os.RemoveAll(rootPath) - - disks, err := getRandomDisks(1) - defer os.RemoveAll(disks[0]) - if err != nil { - t.Fatal("Unable to create directories for FS backend. ", err) - } - fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...)) - if err != nil { - t.Fatal("Unable to initialize FS backend.", err) - } - - globalServerConfig.Notify.SetPostgreSQLByID("1", postgreSQLNotify{Enable: true}) - if err := initEventNotifier(fs); err == nil { - t.Fatal("PostgreSQL config didn't fail.") - } -} - -// InitEventNotifierWithNATS - tests InitEventNotifier when NATS is not prepared -func TestInitEventNotifierWithNATS(t *testing.T) { - // initialize the server and obtain the credentials and root. - // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Init Test config failed") - } - // remove the root directory after the test ends. - defer os.RemoveAll(rootPath) - - disks, err := getRandomDisks(1) - defer os.RemoveAll(disks[0]) - if err != nil { - t.Fatal("Unable to create directories for FS backend. ", err) - } - fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...)) - if err != nil { - t.Fatal("Unable to initialize FS backend.", err) - } - - globalServerConfig.Notify.SetNATSByID("1", natsNotify{Enable: true}) - if err := initEventNotifier(fs); err == nil { - t.Fatal("NATS config didn't fail.") - } -} - -// InitEventNotifierWithWebHook - tests InitEventNotifier when WebHook is not prepared -func TestInitEventNotifierWithWebHook(t *testing.T) { - // initialize the server and obtain the credentials and root. - // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Init Test config failed") - } - // remove the root directory after the test ends. - defer os.RemoveAll(rootPath) - - disks, err := getRandomDisks(1) - defer os.RemoveAll(disks[0]) - if err != nil { - t.Fatal("Unable to create directories for FS backend. ", err) - } - fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...)) - if err != nil { - t.Fatal("Unable to initialize FS backend.", err) - } - - globalServerConfig.Notify.SetWebhookByID("1", webhookNotify{Enable: true}) - if err := initEventNotifier(fs); err == nil { - t.Fatal("WebHook config didn't fail.") - } -} - -// InitEventNotifierWithAMQP - tests InitEventNotifier when AMQP is not prepared -func TestInitEventNotifierWithAMQP(t *testing.T) { - // initialize the server and obtain the credentials and root. - // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Init Test config failed") - } - // remove the root directory after the test ends. - defer os.RemoveAll(rootPath) - - disks, err := getRandomDisks(1) - defer os.RemoveAll(disks[0]) - if err != nil { - t.Fatal("Unable to create directories for FS backend. ", err) - } - fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...)) - if err != nil { - t.Fatal("Unable to initialize FS backend.", err) - } - - globalServerConfig.Notify.SetAMQPByID("1", amqpNotify{Enable: true}) - if err := initEventNotifier(fs); err == nil { - t.Fatal("AMQP config didn't fail.") - } -} - -// InitEventNotifierWithElasticSearch - test InitEventNotifier when ElasticSearch is not ready -func TestInitEventNotifierWithElasticSearch(t *testing.T) { - // initialize the server and obtain the credentials and root. - // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Init Test config failed") - } - // remove the root directory after the test ends. - defer os.RemoveAll(rootPath) - - disks, err := getRandomDisks(1) - defer os.RemoveAll(disks[0]) - if err != nil { - t.Fatal("Unable to create directories for FS backend. ", err) - } - fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...)) - if err != nil { - t.Fatal("Unable to initialize FS backend.", err) - } - - globalServerConfig.Notify.SetElasticSearchByID("1", elasticSearchNotify{Enable: true}) - if err := initEventNotifier(fs); err == nil { - t.Fatal("ElasticSearch config didn't fail.") - } -} - -// InitEventNotifierWithRedis - test InitEventNotifier when Redis is not ready -func TestInitEventNotifierWithRedis(t *testing.T) { - // initialize the server and obtain the credentials and root. - // credentials are necessary to sign the HTTP request. - rootPath, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("Init Test config failed") - } - // remove the root directory after the test ends. - defer os.RemoveAll(rootPath) - - disks, err := getRandomDisks(1) - defer os.RemoveAll(disks[0]) - if err != nil { - t.Fatal("Unable to create directories for FS backend. ", err) - } - fs, _, err := initObjectLayer(mustGetNewEndpointList(disks...)) - if err != nil { - t.Fatal("Unable to initialize FS backend.", err) - } - - globalServerConfig.Notify.SetRedisByID("1", redisNotify{Enable: true}) - if err := initEventNotifier(fs); err == nil { - t.Fatal("Redis config didn't fail.") - } -} - -type TestPeerRPCServerData struct { - serverType string - testServer TestServer -} - -func (s *TestPeerRPCServerData) Setup(t *testing.T) { - s.testServer = StartTestPeersRPCServer(t, s.serverType) - - // setup port and minio addr - host, port := mustSplitHostPort(s.testServer.Server.Listener.Addr().String()) - globalMinioHost = host - globalMinioPort = port - globalMinioAddr = getEndpointsLocalAddr(s.testServer.endpoints) - - // initialize the peer client(s) - initGlobalS3Peers(s.testServer.Disks) -} - -func (s *TestPeerRPCServerData) TearDown() { - s.testServer.Stop() - _ = os.RemoveAll(s.testServer.Root) - for _, d := range s.testServer.Disks { - _ = os.RemoveAll(d.Path) - } -} - -func TestSetNGetBucketNotification(t *testing.T) { - s := TestPeerRPCServerData{serverType: "XL"} - - // setup and teardown - s.Setup(t) - defer s.TearDown() - - bucketName := getRandomBucketName() - - obj := s.testServer.Obj - if err := initEventNotifier(obj); err != nil { - t.Fatal("Unexpected error:", err) - } - - globalEventNotifier.SetBucketNotificationConfig(bucketName, ¬ificationConfig{}) - nConfig := globalEventNotifier.GetBucketNotificationConfig(bucketName) - if nConfig == nil { - t.Errorf("Notification expected to be set, but notification not set.") - } - - if !reflect.DeepEqual(nConfig, ¬ificationConfig{}) { - t.Errorf("Mismatching notification configs.") - } -} - -func TestInitEventNotifier(t *testing.T) { - currentIsDistXL := globalIsDistXL - defer func() { - globalIsDistXL = currentIsDistXL - }() - - s := TestPeerRPCServerData{serverType: "XL"} - - // setup and teardown - s.Setup(t) - defer s.TearDown() - - // test if empty object layer arg. returns expected error. - if err := initEventNotifier(nil); err == nil || err != errInvalidArgument { - t.Fatalf("initEventNotifier returned unexpected error value - %v", err) - } - - obj := s.testServer.Obj - bucketName := getRandomBucketName() - // declare sample configs - filterRules := []filterRule{ - { - Name: "prefix", - Value: "minio", - }, - { - Name: "suffix", - Value: "*.jpg", - }, - } - sampleSvcCfg := ServiceConfig{ - []string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"}, - filterStruct{ - keyFilter{filterRules}, - }, - "1", - } - sampleNotifCfg := notificationConfig{ - QueueConfigs: []queueConfig{ - { - ServiceConfig: sampleSvcCfg, - QueueARN: "testqARN", - }, - }, - } - sampleListenCfg := []listenerConfig{ - { - TopicConfig: topicConfig{ServiceConfig: sampleSvcCfg, - TopicARN: "testlARN"}, - TargetServer: globalMinioAddr, - }, - } - - // create bucket - if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil { - t.Fatal("Unexpected error:", err) - } - - // bucket is created, now writing should not give errors. - if err := persistNotificationConfig(bucketName, &sampleNotifCfg, obj); err != nil { - t.Fatal("Unexpected error:", err) - } - - if err := persistListenerConfig(bucketName, sampleListenCfg, obj); err != nil { - t.Fatal("Unexpected error:", err) - } - - // needed to load listener config from disk for testing (in - // single peer mode, the listener config is ignored, but here - // we want to test the loading from disk too.) - globalIsDistXL = true - - // test event notifier init - if err := initEventNotifier(obj); err != nil { - t.Fatal("Unexpected error:", err) - } - - // fetch bucket configs and verify - ncfg := globalEventNotifier.GetBucketNotificationConfig(bucketName) - if ncfg == nil { - t.Error("Bucket notification was not present for ", bucketName) - } - if len(ncfg.QueueConfigs) != 1 || ncfg.QueueConfigs[0].QueueARN != "testqARN" { - t.Error("Unexpected bucket notification found - ", *ncfg) - } - if globalEventNotifier.GetExternalTarget("testqARN") != nil { - t.Error("A logger was not expected to be found as it was not enabled in the config.") - } - - lcfg := globalEventNotifier.GetBucketListenerConfig(bucketName) - if lcfg == nil { - t.Error("Bucket listener was not present for ", bucketName) - } - if len(lcfg) != 1 || lcfg[0].TargetServer != globalMinioAddr || lcfg[0].TopicConfig.TopicARN != "testlARN" { - t.Error("Unexpected listener config found - ", lcfg[0]) - } - if globalEventNotifier.GetInternalTarget("testlARN") == nil { - t.Error("A listen logger was not found.") - } -} - -func TestListenBucketNotification(t *testing.T) { - currentIsDistXL := globalIsDistXL - defer func() { - globalIsDistXL = currentIsDistXL - }() - - s := TestPeerRPCServerData{serverType: "XL"} - // setup and teardown - s.Setup(t) - defer s.TearDown() - - // test initialisation - obj := s.testServer.Obj - - bucketName := "bucket" - objectName := "object" - - // Create the bucket to listen on - if err := obj.MakeBucketWithLocation(bucketName, ""); err != nil { - t.Fatal("Unexpected error:", err) - } - - listenARN := fmt.Sprintf("%s:%s:1:%s-%s", - minioTopic, - globalServerConfig.GetRegion(), - snsTypeMinio, - s.testServer.Server.Listener.Addr(), - ) - lcfg := listenerConfig{ - TopicConfig: topicConfig{ - ServiceConfig{ - []string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"}, - filterStruct{}, - "0", - }, - listenARN, - }, - TargetServer: globalMinioAddr, - } - - // write listener config to storage layer - lcfgs := []listenerConfig{lcfg} - if err := persistListenerConfig(bucketName, lcfgs, obj); err != nil { - t.Fatalf("Test Setup error: %v", err) - } - - // needed to load listener config from disk for testing (in - // single peer mode, the listener config is ingored, but here - // we want to test the loading from disk too.) - globalIsDistXL = true - - // Init event notifier - if err := initEventNotifier(obj); err != nil { - t.Fatal("Unexpected error:", err) - } - - // Check if the config is loaded - listenerCfg := globalEventNotifier.GetBucketListenerConfig(bucketName) - if listenerCfg == nil { - t.Fatal("Cannot load bucket listener config") - } - if len(listenerCfg) != 1 { - t.Fatal("Listener config is not correctly loaded. Exactly one listener config is expected") - } - - // Check if topic ARN is correct - if listenerCfg[0].TopicConfig.TopicARN != listenARN { - t.Fatal("Configured topic ARN is incorrect.") - } - - // Create a new notification event channel. - nListenCh := newListenChan() - // Add events channel for listener. - if err := globalEventNotifier.AddListenerChan(listenARN, nListenCh); err != nil { - t.Fatalf("Test Setup error: %v", err) - } - // Remove listen channel after the writer has closed or the - // client disconnected. - defer globalEventNotifier.RemoveListenerChan(listenARN) - - // Fire an event notification - go eventNotify(eventData{ - Type: ObjectRemovedDelete, - Bucket: bucketName, - ObjInfo: ObjectInfo{ - Bucket: bucketName, - Name: objectName, - }, - ReqParams: map[string]string{ - "sourceIPAddress": "localhost:1337", - }, - }) - - // Wait for the event notification here, if nothing is received within 30 seconds, - // test error will be fired - select { - case n := <-nListenCh.dataCh: - // Check that received event - if len(n) == 0 { - t.Fatal("Unexpected error occurred") - } - if n[0].S3.Object.Key != objectName { - t.Fatalf("Received wrong object name in notification, expected %s, received %s", n[0].S3.Object.Key, objectName) - } - case <-time.After(3 * time.Second): - } - -} - -func TestAddRemoveBucketListenerConfig(t *testing.T) { - s := TestPeerRPCServerData{serverType: "XL"} - - // setup and teardown - s.Setup(t) - defer s.TearDown() - - // test code - obj := s.testServer.Obj - if err := initEventNotifier(obj); err != nil { - t.Fatalf("Failed to initialize event notifier: %v", err) - } - - // Make a bucket to store topicConfigs. - randBucket := getRandomBucketName() - if err := obj.MakeBucketWithLocation(randBucket, ""); err != nil { - t.Fatalf("Failed to make bucket %s", randBucket) - } - - // Add a topicConfig to an empty notificationConfig. - accountID := fmt.Sprintf("%d", UTCNow().UnixNano()) - accountARN := fmt.Sprintf( - "arn:minio:sqs:%s:%s:listen-%s", - globalServerConfig.GetRegion(), - accountID, - globalMinioAddr, - ) - - // Make topic configuration - filterRules := []filterRule{ - { - Name: "prefix", - Value: "minio", - }, - { - Name: "suffix", - Value: "*.jpg", - }, - } - sampleTopicCfg := topicConfig{ - TopicARN: accountARN, - ServiceConfig: ServiceConfig{ - []string{"s3:ObjectRemoved:*", "s3:ObjectCreated:*"}, - filterStruct{ - keyFilter{filterRules}, - }, - "sns-" + accountID, - }, - } - sampleListenerCfg := &listenerConfig{ - TopicConfig: sampleTopicCfg, - TargetServer: globalMinioAddr, - } - testCases := []struct { - lCfg *listenerConfig - expectedErr error - }{ - {sampleListenerCfg, nil}, - {nil, errInvalidArgument}, - } - - for i, test := range testCases { - err := AddBucketListenerConfig(randBucket, test.lCfg, obj) - if err != test.expectedErr { - t.Errorf( - "Test %d: Failed with error %v, expected to fail with %v", - i+1, err, test.expectedErr, - ) - } - } - - // test remove listener actually removes a listener - RemoveBucketListenerConfig(randBucket, sampleListenerCfg, obj) - // since it does not return errors we fetch the config and - // check - lcSlice := globalEventNotifier.GetBucketListenerConfig(randBucket) - if len(lcSlice) != 0 { - t.Errorf("Remove Listener Config Test: did not remove listener config - %v", - lcSlice) - } -} diff --git a/cmd/fs-v1.go b/cmd/fs-v1.go index 0eb13a274..2248fcd40 100644 --- a/cmd/fs-v1.go +++ b/cmd/fs-v1.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -169,8 +169,8 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) { return nil, fmt.Errorf("Unable to load all bucket policies. %s", err) } - // Initialize a new event notifier. - if err = initEventNotifier(fs); err != nil { + // Initialize notification system. + if err = globalNotificationSys.Init(fs); err != nil { return nil, fmt.Errorf("Unable to initialize event notification. %s", err) } diff --git a/cmd/gateway-main.go b/cmd/gateway-main.go index cd6d69222..ce9174737 100644 --- a/cmd/gateway-main.go +++ b/cmd/gateway-main.go @@ -162,6 +162,10 @@ func StartGateway(ctx *cli.Context, gw Gateway) { initNSLock(false) // Enable local namespace lock. + // Initialize notification system. + globalNotificationSys, err = NewNotificationSys(globalServerConfig, EndpointList{}) + fatalIf(err, "Unable to initialize notification system.") + newObject, err := gw.NewGatewayLayer(globalServerConfig.GetCredential()) fatalIf(err, "Unable to initialize gateway layer") diff --git a/cmd/globals.go b/cmd/globals.go index 6bd0bde88..23dfd2f07 100644 --- a/cmd/globals.go +++ b/cmd/globals.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -114,8 +114,7 @@ var ( // Holds the host that was passed using --address globalMinioHost = "" - // Peer communication struct - globalS3Peers = s3Peers{} + globalNotificationSys *NotificationSys // CA root certificates, a nil value means system certs pool will be used globalRootCAs *x509.CertPool @@ -160,9 +159,6 @@ var ( globalOperationTimeout = newDynamicTimeout(10*time.Minute /*30*/, 600*time.Second) // default timeout for general ops globalHealingTimeout = newDynamicTimeout(30*time.Minute /*1*/, 30*time.Minute) // timeout for healing related ops - // Keep connection active for clients actively using ListenBucketNotification. - globalSNSConnAlive = 5 * time.Second // Send a whitespace every 5 seconds. - // Storage classes // Set to indicate if storage class is set up globalIsStorageClass bool diff --git a/cmd/notification.go b/cmd/notification.go new file mode 100644 index 000000000..b64ba9343 --- /dev/null +++ b/cmd/notification.go @@ -0,0 +1,634 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "net/url" + "path" + "sync" + + xerrors "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/event" + "github.com/minio/minio/pkg/hash" + xnet "github.com/minio/minio/pkg/net" +) + +// NotificationSys - notification system. +type NotificationSys struct { + sync.RWMutex + targetList *event.TargetList + bucketRulesMap map[string]event.RulesMap + bucketRemoteTargetRulesMap map[string]map[event.TargetID]event.RulesMap + peerRPCClientMap map[xnet.Host]*PeerRPCClient +} + +// GetARNList - returns available ARNs. +func (sys *NotificationSys) GetARNList() []string { + arns := []string{} + region := globalServerConfig.GetRegion() + for _, targetID := range sys.targetList.List() { + arns = append(arns, targetID.ToARN(region).String()) + } + + return arns +} + +// GetPeerRPCClient - returns PeerRPCClient of addr. +func (sys *NotificationSys) GetPeerRPCClient(addr xnet.Host) *PeerRPCClient { + return sys.peerRPCClientMap[addr] +} + +// DeleteBucket - calls DeleteBucket RPC call on all peers. +func (sys *NotificationSys) DeleteBucket(bucketName string) map[xnet.Host]error { + errors := make(map[xnet.Host]error) + var wg sync.WaitGroup + for addr, client := range sys.peerRPCClientMap { + wg.Add(1) + go func(addr xnet.Host, client *PeerRPCClient) { + defer wg.Done() + if err := client.DeleteBucket(bucketName); err != nil { + errors[addr] = err + } + }(addr, client) + } + wg.Wait() + + return errors +} + +// UpdateBucketPolicy - calls UpdateBucketPolicy RPC call on all peers. +func (sys *NotificationSys) UpdateBucketPolicy(bucketName string) map[xnet.Host]error { + errors := make(map[xnet.Host]error) + var wg sync.WaitGroup + for addr, client := range sys.peerRPCClientMap { + wg.Add(1) + go func(addr xnet.Host, client *PeerRPCClient) { + defer wg.Done() + if err := client.UpdateBucketPolicy(bucketName); err != nil { + errors[addr] = err + } + }(addr, client) + } + wg.Wait() + + return errors +} + +// PutBucketNotification - calls PutBucketNotification RPC call on all peers. +func (sys *NotificationSys) PutBucketNotification(bucketName string, rulesMap event.RulesMap) map[xnet.Host]error { + errors := make(map[xnet.Host]error) + var wg sync.WaitGroup + for addr, client := range sys.peerRPCClientMap { + wg.Add(1) + go func(addr xnet.Host, client *PeerRPCClient, rulesMap event.RulesMap) { + defer wg.Done() + if err := client.PutBucketNotification(bucketName, rulesMap); err != nil { + errors[addr] = err + } + }(addr, client, rulesMap.Clone()) + } + wg.Wait() + + return errors +} + +// ListenBucketNotification - calls ListenBucketNotification RPC call on all peers. +func (sys *NotificationSys) ListenBucketNotification(bucketName string, eventNames []event.Name, pattern string, targetID event.TargetID, localPeer xnet.Host) map[xnet.Host]error { + errors := make(map[xnet.Host]error) + var wg sync.WaitGroup + for addr, client := range sys.peerRPCClientMap { + wg.Add(1) + go func(addr xnet.Host, client *PeerRPCClient) { + defer wg.Done() + if err := client.ListenBucketNotification(bucketName, eventNames, pattern, targetID, localPeer); err != nil { + errors[addr] = err + } + }(addr, client) + } + wg.Wait() + + return errors +} + +// AddRemoteTarget - adds event rules map, HTTP/PeerRPC client target to bucket name. +func (sys *NotificationSys) AddRemoteTarget(bucketName string, target event.Target, rulesMap event.RulesMap) error { + if err := sys.targetList.Add(target); err != nil { + return err + } + + sys.Lock() + targetMap := sys.bucketRemoteTargetRulesMap[bucketName] + if targetMap == nil { + targetMap = make(map[event.TargetID]event.RulesMap) + } + targetMap[target.ID()] = rulesMap.Clone() + sys.bucketRemoteTargetRulesMap[bucketName] = targetMap + sys.Unlock() + + sys.AddRulesMap(bucketName, rulesMap) + return nil +} + +// RemoteTargetExist - checks whether given target ID is a HTTP/PeerRPC client target or not. +func (sys *NotificationSys) RemoteTargetExist(bucketName string, targetID event.TargetID) bool { + sys.Lock() + defer sys.Unlock() + + targetMap, ok := sys.bucketRemoteTargetRulesMap[bucketName] + if ok { + _, ok = targetMap[targetID] + } + + return ok +} + +// initListeners - initializes PeerRPC clients available in listener.json. +func (sys *NotificationSys) initListeners(objAPI ObjectLayer, bucketName string) error { + // listener.json is available/applicable only in DistXL mode. + if !globalIsDistXL { + return nil + } + + // Construct path to listener.json for the given bucket. + configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig) + transactionConfigFile := configFile + ".transaction" + + // As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket + // and configFile, take a transaction lock to avoid data race between readConfig() + // and saveConfig(). + objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile) + if err := objLock.GetLock(globalOperationTimeout); err != nil { + return err + } + defer objLock.Unlock() + + reader, err := readConfig(objAPI, configFile) + if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { + return err + } + + listenerList := []ListenBucketNotificationArgs{} + if reader != nil { + if err = json.NewDecoder(reader).Decode(&listenerList); err != nil { + errorIf(err, "Unable to parse listener.json.") + return xerrors.Trace(err) + } + } + + if len(listenerList) == 0 { + // Nothing to initialize for empty listener list. + return nil + } + + activeListenerList := []ListenBucketNotificationArgs{} + for _, args := range listenerList { + var found bool + if found, err = isLocalHost(args.Addr.Name); err != nil { + errorIf(err, "unable to check address %v is local host", args.Addr) + return err + } + if found { + // As this function is called at startup, skip HTTP listener to this host. + continue + } + + rpcClient := sys.GetPeerRPCClient(args.Addr) + if rpcClient == nil { + return fmt.Errorf("unable to find PeerRPCClient by address %v in listener.json for bucket %v", args.Addr, bucketName) + } + + var exist bool + if exist, err = rpcClient.RemoteTargetExist(bucketName, args.TargetID); err != nil { + return err + } + if !exist { + // Skip previously connected HTTP listener which is not found in remote peer. + continue + } + + target := NewPeerRPCClientTarget(bucketName, args.TargetID, rpcClient) + rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID()) + if err = sys.AddRemoteTarget(bucketName, target, rulesMap); err != nil { + return err + } + activeListenerList = append(activeListenerList, args) + } + + data, err := json.Marshal(activeListenerList) + if err != nil { + return err + } + + return saveConfig(objAPI, configFile, data) +} + +// Init - initializes notification system from notification.xml and listener.json of all buckets. +func (sys *NotificationSys) Init(objAPI ObjectLayer) error { + if objAPI == nil { + return errInvalidArgument + } + + buckets, err := objAPI.ListBuckets() + if err != nil { + return err + } + + for _, bucket := range buckets { + config, err := readNotificationConfig(objAPI, bucket.Name) + if err != nil { + if !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { + errorIf(err, "Unable to load notification configuration of bucket %v", bucket.Name) + return err + } + } else { + sys.AddRulesMap(bucket.Name, config.ToRulesMap()) + } + + if err = sys.initListeners(objAPI, bucket.Name); err != nil { + errorIf(err, "Unable to initialize HTTP listener for bucket %v", bucket.Name) + return err + } + } + + return nil +} + +// AddRulesMap - adds rules map for bucket name. +func (sys *NotificationSys) AddRulesMap(bucketName string, rulesMap event.RulesMap) { + sys.Lock() + defer sys.Unlock() + + rulesMap = rulesMap.Clone() + + for _, targetRulesMap := range sys.bucketRemoteTargetRulesMap[bucketName] { + rulesMap.Add(targetRulesMap) + } + + rulesMap.Add(sys.bucketRulesMap[bucketName]) + sys.bucketRulesMap[bucketName] = rulesMap +} + +// RemoveRulesMap - removes rules map for bucket name. +func (sys *NotificationSys) RemoveRulesMap(bucketName string, rulesMap event.RulesMap) { + sys.Lock() + defer sys.Unlock() + + sys.bucketRulesMap[bucketName].Remove(rulesMap) + if len(sys.bucketRulesMap[bucketName]) == 0 { + delete(sys.bucketRulesMap, bucketName) + } +} + +// RemoveNotification - removes all notification configuration for bucket name. +func (sys *NotificationSys) RemoveNotification(bucketName string) { + sys.Lock() + defer sys.Unlock() + + delete(sys.bucketRulesMap, bucketName) + + for targetID := range sys.bucketRemoteTargetRulesMap[bucketName] { + sys.targetList.Remove(targetID) + delete(sys.bucketRemoteTargetRulesMap[bucketName], targetID) + } + + delete(sys.bucketRemoteTargetRulesMap, bucketName) +} + +// RemoveAllRemoteTargets - closes and removes all HTTP/PeerRPC client targets. +func (sys *NotificationSys) RemoveAllRemoteTargets() { + for _, targetMap := range sys.bucketRemoteTargetRulesMap { + for targetID := range targetMap { + sys.targetList.Remove(targetID) + } + } +} + +// RemoveRemoteTarget - closes and removes target by target ID. +func (sys *NotificationSys) RemoveRemoteTarget(bucketName string, targetID event.TargetID) { + for id, err := range sys.targetList.Remove(targetID) { + errorIf(err, "unable to close target ID %v", id) + } + + sys.Lock() + defer sys.Unlock() + + if _, ok := sys.bucketRemoteTargetRulesMap[bucketName]; ok { + delete(sys.bucketRemoteTargetRulesMap[bucketName], targetID) + if len(sys.bucketRemoteTargetRulesMap[bucketName]) == 0 { + delete(sys.bucketRemoteTargetRulesMap, bucketName) + } + } +} + +func (sys *NotificationSys) send(bucketName string, eventData event.Event, targetIDs ...event.TargetID) map[event.TargetID]error { + errMap := sys.targetList.Send(eventData, targetIDs...) + for targetID := range errMap { + if sys.RemoteTargetExist(bucketName, targetID) { + sys.RemoveRemoteTarget(bucketName, targetID) + } + } + + return errMap +} + +// Send - sends event data to all matching targets. +func (sys *NotificationSys) Send(args eventArgs) map[event.TargetID]error { + sys.RLock() + targetIDSet := sys.bucketRulesMap[args.BucketName].Match(args.EventName, args.Object.Name) + sys.RUnlock() + if len(targetIDSet) == 0 { + return nil + } + + targetIDs := targetIDSet.ToSlice() + return sys.send(args.BucketName, args.ToEvent(), targetIDs...) +} + +// NewNotificationSys - creates new notification system object. +func NewNotificationSys(config *serverConfig, endpoints EndpointList) (*NotificationSys, error) { + targetList, err := getNotificationTargets(config) + if err != nil { + return nil, err + } + + peerRPCClientMap := makeRemoteRPCClients(endpoints) + + // bucketRulesMap/bucketRemoteTargetRulesMap are initialized by NotificationSys.Init() + return &NotificationSys{ + targetList: targetList, + bucketRulesMap: make(map[string]event.RulesMap), + bucketRemoteTargetRulesMap: make(map[string]map[event.TargetID]event.RulesMap), + peerRPCClientMap: peerRPCClientMap, + }, nil +} + +type eventArgs struct { + EventName event.Name + BucketName string + Object ObjectInfo + ReqParams map[string]string + Host string + Port string + UserAgent string +} + +// ToEvent - converts to notification event. +func (args eventArgs) ToEvent() event.Event { + getOriginEndpoint := func() string { + host := globalMinioHost + if host == "" { + // FIXME: Send FQDN or hostname of this machine than sending IP address. + host = localIP4.ToSlice()[0] + } + + return fmt.Sprintf("%s://%s:%s", getURLScheme(globalIsSSL), host, globalMinioPort) + } + + creds := globalServerConfig.GetCredential() + eventTime := UTCNow() + uniqueID := fmt.Sprintf("%X", eventTime.UnixNano()) + + newEvent := event.Event{ + EventVersion: "2.0", + EventSource: "minio:s3", + AwsRegion: globalServerConfig.GetRegion(), + EventTime: eventTime.Format(event.AMZTimeFormat), + EventName: args.EventName, + UserIdentity: event.Identity{creds.AccessKey}, + RequestParameters: args.ReqParams, + ResponseElements: map[string]string{ + "x-amz-request-id": uniqueID, + "x-minio-origin-endpoint": getOriginEndpoint(), // Minio specific custom elements. + }, + S3: event.Metadata{ + SchemaVersion: "1.0", + ConfigurationID: "Config", + Bucket: event.Bucket{ + Name: args.BucketName, + OwnerIdentity: event.Identity{creds.AccessKey}, + ARN: bucketARNPrefix + args.BucketName, + }, + Object: event.Object{ + Key: url.QueryEscape(args.Object.Name), + VersionID: "1", + Sequencer: uniqueID, + }, + }, + Source: event.Source{ + Host: args.Host, + Port: args.Port, + UserAgent: args.UserAgent, + }, + } + + if args.EventName != event.ObjectRemovedDelete { + newEvent.S3.Object.ETag = args.Object.ETag + newEvent.S3.Object.Size = args.Object.Size + newEvent.S3.Object.ContentType = args.Object.ContentType + newEvent.S3.Object.UserMetadata = args.Object.UserDefined + } + + return newEvent +} + +func sendEvent(args eventArgs) { + // globalNotificationSys is not initialized in gateway mode. + if globalNotificationSys == nil { + return + } + + for targetID, err := range globalNotificationSys.Send(args) { + errorIf(err, "unable to send event %v of bucket: %v, object: %v to target %v", + args.EventName, args.BucketName, args.Object.Name, targetID) + } +} + +func saveConfig(objAPI ObjectLayer, configFile string, data []byte) error { + hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data)) + if err != nil { + return err + } + + _, err = objAPI.PutObject(minioMetaBucket, configFile, hashReader, nil) + return err +} + +func readConfig(objAPI ObjectLayer, configFile string) (*bytes.Buffer, error) { + var buffer bytes.Buffer + // Read entire content by setting size to -1 + err := objAPI.GetObject(minioMetaBucket, configFile, 0, -1, &buffer, "") + if err != nil { + // Ignore if err is ObjectNotFound or IncompleteBody when bucket is not configured with notification + if isErrObjectNotFound(err) || isErrIncompleteBody(err) { + return nil, xerrors.Trace(errNoSuchNotifications) + } + errorIf(err, "Unable to read file %v", configFile) + return nil, err + } + + // Return NoSuchNotifications on empty content. + if buffer.Len() == 0 { + return nil, xerrors.Trace(errNoSuchNotifications) + } + + return &buffer, nil +} + +func readNotificationConfig(objAPI ObjectLayer, bucketName string) (*event.Config, error) { + // Construct path to notification.xml for the given bucket. + configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig) + + // Get read lock. + objLock := globalNSMutex.NewNSLock(minioMetaBucket, configFile) + if err := objLock.GetRLock(globalOperationTimeout); err != nil { + return nil, err + } + defer objLock.RUnlock() + + reader, err := readConfig(objAPI, configFile) + if err != nil { + return nil, err + } + + return event.ParseConfig(reader, globalServerConfig.GetRegion(), globalNotificationSys.targetList) +} + +func saveNotificationConfig(objAPI ObjectLayer, bucketName string, config *event.Config) error { + data, err := xml.Marshal(config) + if err != nil { + return err + } + + configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig) + + // Get write lock. + objLock := globalNSMutex.NewNSLock(minioMetaBucket, configFile) + if err := objLock.GetLock(globalOperationTimeout); err != nil { + return err + } + defer objLock.Unlock() + + return saveConfig(objAPI, configFile, data) +} + +// SaveListener - saves HTTP client currently listening for events to listener.json. +func SaveListener(objAPI ObjectLayer, bucketName string, eventNames []event.Name, pattern string, targetID event.TargetID, addr xnet.Host) error { + // listener.json is available/applicable only in DistXL mode. + if !globalIsDistXL { + return nil + } + + // Construct path to listener.json for the given bucket. + configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig) + transactionConfigFile := configFile + ".transaction" + + // As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket + // and configFile, take a transaction lock to avoid data race between readConfig() + // and saveConfig(). + objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile) + if err := objLock.GetLock(globalOperationTimeout); err != nil { + return err + } + defer objLock.Unlock() + + reader, err := readConfig(objAPI, configFile) + if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { + return err + } + + listenerList := []ListenBucketNotificationArgs{} + if reader != nil { + if err = json.NewDecoder(reader).Decode(&listenerList); err != nil { + errorIf(err, "Unable to parse listener.json.") + return xerrors.Trace(err) + } + } + + listenerList = append(listenerList, ListenBucketNotificationArgs{ + EventNames: eventNames, + Pattern: pattern, + TargetID: targetID, + Addr: addr, + }) + + data, err := json.Marshal(listenerList) + if err != nil { + return err + } + + return saveConfig(objAPI, configFile, data) +} + +// RemoveListener - removes HTTP client currently listening for events from listener.json. +func RemoveListener(objAPI ObjectLayer, bucketName string, targetID event.TargetID, addr xnet.Host) error { + // listener.json is available/applicable only in DistXL mode. + if !globalIsDistXL { + return nil + } + + // Construct path to listener.json for the given bucket. + configFile := path.Join(bucketConfigPrefix, bucketName, bucketListenerConfig) + transactionConfigFile := configFile + ".transaction" + + // As object layer's GetObject() and PutObject() take respective lock on minioMetaBucket + // and configFile, take a transaction lock to avoid data race between readConfig() + // and saveConfig(). + objLock := globalNSMutex.NewNSLock(minioMetaBucket, transactionConfigFile) + if err := objLock.GetLock(globalOperationTimeout); err != nil { + return err + } + defer objLock.Unlock() + + reader, err := readConfig(objAPI, configFile) + if err != nil && !xerrors.IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) { + return err + } + + listenerList := []ListenBucketNotificationArgs{} + if reader != nil { + if err = json.NewDecoder(reader).Decode(&listenerList); err != nil { + errorIf(err, "Unable to parse listener.json.") + return xerrors.Trace(err) + } + } + + if len(listenerList) == 0 { + // Nothing to remove. + return nil + } + + activeListenerList := []ListenBucketNotificationArgs{} + for _, args := range listenerList { + if args.TargetID == targetID && args.Addr.Equal(addr) { + // Skip if matches + continue + } + + activeListenerList = append(activeListenerList, args) + } + + data, err := json.Marshal(activeListenerList) + if err != nil { + return err + } + + return saveConfig(objAPI, configFile, data) +} diff --git a/cmd/notifier-config.go b/cmd/notifier-config.go deleted file mode 100644 index d87c72ec8..000000000 --- a/cmd/notifier-config.go +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "fmt" -) - -// Notifier represents collection of supported notification queues. -type notifier struct { - AMQP amqpConfigs `json:"amqp"` - NATS natsConfigs `json:"nats"` - ElasticSearch elasticSearchConfigs `json:"elasticsearch"` - Redis redisConfigs `json:"redis"` - PostgreSQL postgreSQLConfigs `json:"postgresql"` - Kafka kafkaConfigs `json:"kafka"` - Webhook webhookConfigs `json:"webhook"` - MySQL mySQLConfigs `json:"mysql"` - MQTT mqttConfigs `json:"mqtt"` - // Add new notification queues. IMPORTANT: When new queues are - // added, update `serverConfig.ConfigDiff()` to reflect the - // change. -} - -type amqpConfigs map[string]amqpNotify - -func (a amqpConfigs) Clone() amqpConfigs { - a2 := make(amqpConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a amqpConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("AMQP [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -type mqttConfigs map[string]mqttNotify - -func (a mqttConfigs) Clone() mqttConfigs { - a2 := make(mqttConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a mqttConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("MQTT [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -type natsConfigs map[string]natsNotify - -func (a natsConfigs) Clone() natsConfigs { - a2 := make(natsConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a natsConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("NATS [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -type elasticSearchConfigs map[string]elasticSearchNotify - -func (a elasticSearchConfigs) Clone() elasticSearchConfigs { - a2 := make(elasticSearchConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a elasticSearchConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("ElasticSearch [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -type redisConfigs map[string]redisNotify - -func (a redisConfigs) Clone() redisConfigs { - a2 := make(redisConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a redisConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("Redis [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -type postgreSQLConfigs map[string]postgreSQLNotify - -func (a postgreSQLConfigs) Clone() postgreSQLConfigs { - a2 := make(postgreSQLConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a postgreSQLConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("PostgreSQL [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -type kafkaConfigs map[string]kafkaNotify - -func (a kafkaConfigs) Clone() kafkaConfigs { - a2 := make(kafkaConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a kafkaConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("Kafka [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -type webhookConfigs map[string]webhookNotify - -func (a webhookConfigs) Clone() webhookConfigs { - a2 := make(webhookConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a webhookConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("Webhook [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -type mySQLConfigs map[string]mySQLNotify - -func (a mySQLConfigs) Clone() mySQLConfigs { - a2 := make(mySQLConfigs, len(a)) - for k, v := range a { - a2[k] = v - } - return a2 -} - -func (a mySQLConfigs) Validate() error { - for k, v := range a { - if err := v.Validate(); err != nil { - return fmt.Errorf("MySQL [%s] configuration invalid: %s", k, err.Error()) - } - } - return nil -} - -func (n *notifier) Validate() error { - if n == nil { - return nil - } - if err := n.AMQP.Validate(); err != nil { - return err - } - if err := n.NATS.Validate(); err != nil { - return err - } - if err := n.ElasticSearch.Validate(); err != nil { - return err - } - if err := n.Redis.Validate(); err != nil { - return err - } - if err := n.PostgreSQL.Validate(); err != nil { - return err - } - if err := n.Kafka.Validate(); err != nil { - return err - } - if err := n.Webhook.Validate(); err != nil { - return err - } - if err := n.MySQL.Validate(); err != nil { - return err - } - return n.MQTT.Validate() -} - -func (n *notifier) SetAMQPByID(accountID string, amqpn amqpNotify) { - n.AMQP[accountID] = amqpn -} - -func (n *notifier) GetAMQP() map[string]amqpNotify { - return n.AMQP.Clone() -} - -func (n *notifier) GetAMQPByID(accountID string) amqpNotify { - return n.AMQP[accountID] -} - -func (n *notifier) SetMQTTByID(accountID string, mqttn mqttNotify) { - n.MQTT[accountID] = mqttn -} - -func (n *notifier) GetMQTT() map[string]mqttNotify { - return n.MQTT.Clone() -} - -func (n *notifier) GetMQTTByID(accountID string) mqttNotify { - return n.MQTT[accountID] -} - -func (n *notifier) SetNATSByID(accountID string, natsn natsNotify) { - n.NATS[accountID] = natsn -} - -func (n *notifier) GetNATS() map[string]natsNotify { - return n.NATS.Clone() -} - -func (n *notifier) GetNATSByID(accountID string) natsNotify { - return n.NATS[accountID] -} - -func (n *notifier) SetElasticSearchByID(accountID string, es elasticSearchNotify) { - n.ElasticSearch[accountID] = es -} - -func (n *notifier) GetElasticSearchByID(accountID string) elasticSearchNotify { - return n.ElasticSearch[accountID] -} - -func (n *notifier) GetElasticSearch() map[string]elasticSearchNotify { - return n.ElasticSearch.Clone() -} - -func (n *notifier) SetRedisByID(accountID string, r redisNotify) { - n.Redis[accountID] = r -} - -func (n *notifier) GetRedis() map[string]redisNotify { - return n.Redis.Clone() -} - -func (n *notifier) GetRedisByID(accountID string) redisNotify { - return n.Redis[accountID] -} - -func (n *notifier) GetWebhook() map[string]webhookNotify { - return n.Webhook.Clone() -} - -func (n *notifier) GetWebhookByID(accountID string) webhookNotify { - return n.Webhook[accountID] -} - -func (n *notifier) SetWebhookByID(accountID string, pgn webhookNotify) { - n.Webhook[accountID] = pgn -} - -func (n *notifier) SetPostgreSQLByID(accountID string, pgn postgreSQLNotify) { - n.PostgreSQL[accountID] = pgn -} - -func (n *notifier) GetPostgreSQL() map[string]postgreSQLNotify { - return n.PostgreSQL.Clone() -} - -func (n *notifier) GetPostgreSQLByID(accountID string) postgreSQLNotify { - return n.PostgreSQL[accountID] -} - -func (n *notifier) SetMySQLByID(accountID string, pgn mySQLNotify) { - n.MySQL[accountID] = pgn -} - -func (n *notifier) GetMySQL() map[string]mySQLNotify { - return n.MySQL.Clone() -} - -func (n *notifier) GetMySQLByID(accountID string) mySQLNotify { - return n.MySQL[accountID] -} - -func (n *notifier) SetKafkaByID(accountID string, kn kafkaNotify) { - n.Kafka[accountID] = kn -} - -func (n *notifier) GetKafka() map[string]kafkaNotify { - return n.Kafka.Clone() -} - -func (n *notifier) GetKafkaByID(accountID string) kafkaNotify { - return n.Kafka[accountID] -} diff --git a/cmd/notifier-config_test.go b/cmd/notifier-config_test.go deleted file mode 100644 index a416825a9..000000000 --- a/cmd/notifier-config_test.go +++ /dev/null @@ -1,17 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd diff --git a/cmd/notifiers.go b/cmd/notifiers.go deleted file mode 100644 index a9c669bac..000000000 --- a/cmd/notifiers.go +++ /dev/null @@ -1,258 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "errors" - "fmt" - - "github.com/minio/minio/pkg/wildcard" -) - -// SQS type. -const ( - // Minio sqs ARN prefix. - minioSqs = "arn:minio:sqs:" - - // Static string indicating queue type 'amqp'. - queueTypeAMQP = "amqp" - // Static string indicating queue type 'mqtt'. - queueTypeMQTT = "mqtt" - // Static string indicating queue type 'nats'. - queueTypeNATS = "nats" - // Static string indicating queue type 'elasticsearch'. - queueTypeElastic = "elasticsearch" - // Static string indicating queue type 'redis'. - queueTypeRedis = "redis" - // Static string indicating queue type 'postgresql'. - queueTypePostgreSQL = "postgresql" - // Static string indicating queue type 'mysql'. - queueTypeMySQL = "mysql" - // Static string indicating queue type 'kafka'. - queueTypeKafka = "kafka" - // Static string for Webhooks - queueTypeWebhook = "webhook" - - // Notifier format value constants - formatNamespace = "namespace" - formatAccess = "access" -) - -// Topic type. -const ( - // Minio topic ARN prefix. - minioTopic = "arn:minio:sns:" - - // Static string indicating sns type 'listen'. - snsTypeMinio = "listen" -) - -var errNotifyNotEnabled = errors.New("requested notifier not enabled") - -// Returns true if queueArn is for an AMQP queue. -func isAMQPQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypeAMQP { - return false - } - amqpL := globalServerConfig.Notify.GetAMQPByID(sqsArn.AccountID) - if !amqpL.Enable { - return false - } - // Connect to amqp server to validate. - amqpC, err := dialAMQP(amqpL) - if err != nil { - errorIf(err, "Unable to connect to amqp service. %#v", amqpL) - return false - } - defer amqpC.conn.Close() - return true -} - -// Returns true if mqttARN is for an MQTT queue. -func isMQTTQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypeMQTT { - return false - } - mqttL := globalServerConfig.Notify.GetMQTTByID(sqsArn.AccountID) - if !mqttL.Enable { - return false - } - // Connect to mqtt server to validate. - mqttC, err := dialMQTT(mqttL) - if err != nil { - errorIf(err, "Unable to connect to mqtt service. %#v", mqttL) - return false - } - defer mqttC.Client.Disconnect(250) - return true -} - -// Returns true if natsArn is for an NATS queue. -func isNATSQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypeNATS { - return false - } - natsL := globalServerConfig.Notify.GetNATSByID(sqsArn.AccountID) - if !natsL.Enable { - return false - } - // Connect to nats server to validate. - natsC, err := dialNATS(natsL, true) - if err != nil { - errorIf(err, "Unable to connect to nats service. %#v", natsL) - return false - } - closeNATS(natsC) - return true -} - -// Returns true if queueArn is for an Webhook queue -func isWebhookQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypeWebhook { - return false - } - rNotify := globalServerConfig.Notify.GetWebhookByID(sqsArn.AccountID) - return rNotify.Enable -} - -// Returns true if queueArn is for an Redis queue. -func isRedisQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypeRedis { - return false - } - rNotify := globalServerConfig.Notify.GetRedisByID(sqsArn.AccountID) - if !rNotify.Enable { - return false - } - // Connect to redis server to validate. - rPool, err := dialRedis(rNotify) - if err != nil { - errorIf(err, "Unable to connect to redis service. %#v", rNotify) - return false - } - defer rPool.Close() - return true -} - -// Returns true if queueArn is for an ElasticSearch queue. -func isElasticQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypeElastic { - return false - } - esNotify := globalServerConfig.Notify.GetElasticSearchByID(sqsArn.AccountID) - if !esNotify.Enable { - return false - } - elasticC, err := dialElastic(esNotify) - if err != nil { - errorIf(err, "Unable to connect to elasticsearch service %#v", esNotify) - return false - } - defer elasticC.Stop() - return true -} - -// Returns true if queueArn is for PostgreSQL. -func isPostgreSQLQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypePostgreSQL { - return false - } - pgNotify := globalServerConfig.Notify.GetPostgreSQLByID(sqsArn.AccountID) - if !pgNotify.Enable { - return false - } - pgC, err := dialPostgreSQL(pgNotify) - if err != nil { - errorIf(err, "Unable to connect to PostgreSQL server %#v", pgNotify) - return false - } - defer pgC.Close() - return true -} - -// Returns true if queueArn is for MySQL. -func isMySQLQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypeMySQL { - return false - } - msqlNotify := globalServerConfig.Notify.GetMySQLByID(sqsArn.AccountID) - if !msqlNotify.Enable { - return false - } - myC, err := dialMySQL(msqlNotify) - if err != nil { - errorIf(err, "Unable to connect to MySQL server %#v", msqlNotify) - return false - } - defer myC.Close() - return true -} - -// Returns true if queueArn is for Kafka. -func isKafkaQueue(sqsArn arnSQS) bool { - if sqsArn.Type != queueTypeKafka { - return false - } - kafkaNotifyCfg := globalServerConfig.Notify.GetKafkaByID(sqsArn.AccountID) - if !kafkaNotifyCfg.Enable { - return false - } - kafkaC, err := dialKafka(kafkaNotifyCfg) - if err != nil { - errorIf(err, "Unable to dial Kafka server %#v", kafkaNotifyCfg) - return false - } - defer kafkaC.Close() - return true -} - -// Match function matches wild cards in 'pattern' for events. -func eventMatch(eventType string, events []string) (ok bool) { - for _, event := range events { - ok = wildcard.MatchSimple(event, eventType) - if ok { - break - } - } - return ok -} - -// Filter rule match, matches an object against the filter rules. -func filterRuleMatch(object string, frs []filterRule) bool { - var prefixMatch, suffixMatch = true, true - for _, fr := range frs { - if isValidFilterNamePrefix(fr.Name) { - prefixMatch = hasPrefix(object, fr.Value) - } else if isValidFilterNameSuffix(fr.Name) { - suffixMatch = hasSuffix(object, fr.Value) - } - } - return prefixMatch && suffixMatch -} - -// A type to represent dynamic error generation functions for -// notifications. -type notificationErrorFactoryFunc func(string, ...interface{}) error - -// A function to build dynamic error generation functions for -// notifications by setting an error prefix string. -func newNotificationErrorFactory(prefix string) notificationErrorFactoryFunc { - return func(msg string, a ...interface{}) error { - s := fmt.Sprintf(msg, a...) - return fmt.Errorf("%s: %s", prefix, s) - } -} diff --git a/cmd/notifiers_test.go b/cmd/notifiers_test.go deleted file mode 100644 index 2869472ad..000000000 --- a/cmd/notifiers_test.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import "testing" - -// Tests for event filter rules. -func TestFilterMatch(t *testing.T) { - testCases := []struct { - objectName string - rules []filterRule - expectedRuleMatch bool - }{ - // Prefix matches for a parent. - { - objectName: "test/test1/object.txt", - rules: []filterRule{ - { - Name: "prefix", - Value: "test", - }, - }, - expectedRuleMatch: true, - }, - // Prefix matches for the object. - { - objectName: "test/test1/object.txt", - rules: []filterRule{ - { - Name: "prefix", - Value: "test/test1/object", - }, - }, - expectedRuleMatch: true, - }, - // Prefix doesn't match. - { - objectName: "test/test1/object.txt", - rules: []filterRule{ - { - Name: "prefix", - Value: "test/test1/object/", - }, - }, - expectedRuleMatch: false, - }, - // Suffix matches. - { - objectName: "test/test1/object.txt", - rules: []filterRule{ - { - Name: "suffix", - Value: ".txt", - }, - }, - expectedRuleMatch: true, - }, - // Suffix doesn't match but prefix matches. - { - objectName: "test/test1/object.txt", - rules: []filterRule{ - { - Name: "suffix", - Value: ".jpg", - }, - { - Name: "prefix", - Value: "test/test1", - }, - }, - expectedRuleMatch: false, - }, - // Prefix doesn't match but suffix matches. - { - objectName: "test/test2/object.jpg", - rules: []filterRule{ - { - Name: "suffix", - Value: ".jpg", - }, - { - Name: "prefix", - Value: "test/test1", - }, - }, - expectedRuleMatch: false, - }, - // Suffix and prefix doesn't match. - { - objectName: "test/test2/object.jpg", - rules: []filterRule{ - { - Name: "suffix", - Value: ".txt", - }, - { - Name: "prefix", - Value: "test/test1", - }, - }, - expectedRuleMatch: false, - }, - } - - // .. Validate all cases. - for i, testCase := range testCases { - ruleMatch := filterRuleMatch(testCase.objectName, testCase.rules) - if ruleMatch != testCase.expectedRuleMatch { - t.Errorf("Test %d: Expected %t, got %t", i+1, testCase.expectedRuleMatch, ruleMatch) - } - } -} - -// Tests all event match. -func TestEventMatch(t *testing.T) { - testCases := []struct { - eventName EventName - events []string - match bool - }{ - // Valid object created PUT event. - { - eventName: ObjectCreatedPut, - events: []string{ - "s3:ObjectCreated:Put", - }, - match: true, - }, - // Valid object removed DELETE event. - { - eventName: ObjectRemovedDelete, - events: []string{ - "s3:ObjectRemoved:Delete", - }, - match: true, - }, - // Invalid events fails to match with empty events. - { - eventName: ObjectRemovedDelete, - events: []string{""}, - match: false, - }, - // Invalid events fails to match with valid events. - { - eventName: ObjectCreatedCompleteMultipartUpload, - events: []string{ - "s3:ObjectRemoved:*", - }, - match: false, - }, - // Valid events wild card match. - { - eventName: ObjectCreatedPut, - events: []string{ - "s3:ObjectCreated:*", - }, - match: true, - }, - // Valid events wild card match. - { - eventName: ObjectCreatedPost, - events: []string{ - "s3:ObjectCreated:*", - }, - match: true, - }, - // Valid events wild card match. - { - eventName: ObjectCreatedCopy, - events: []string{ - "s3:ObjectCreated:*", - }, - match: true, - }, - // Valid events wild card match. - { - eventName: ObjectCreatedCompleteMultipartUpload, - events: []string{ - "s3:ObjectCreated:*", - }, - match: true, - }, - // Valid events wild card match. - { - eventName: ObjectCreatedPut, - events: []string{ - "s3:ObjectCreated:*", - "s3:ObjectRemoved:*", - }, - match: true, - }, - } - - for i, testCase := range testCases { - ok := eventMatch(testCase.eventName.String(), testCase.events) - if testCase.match != ok { - t.Errorf("Test %d: Expected \"%t\", got \"%t\"", i+1, testCase.match, ok) - } - } -} diff --git a/cmd/notify-amqp.go b/cmd/notify-amqp.go deleted file mode 100644 index 2b89409ab..000000000 --- a/cmd/notify-amqp.go +++ /dev/null @@ -1,192 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "io/ioutil" - "net" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/streadway/amqp" -) - -// amqpNotify - represents logrus compatible AMQP hook. -// All fields represent AMQP configuration details. -type amqpNotify struct { - Enable bool `json:"enable"` - URL string `json:"url"` - Exchange string `json:"exchange"` - RoutingKey string `json:"routingKey"` - ExchangeType string `json:"exchangeType"` - DeliveryMode uint8 `json:"deliveryMode"` - Mandatory bool `json:"mandatory"` - Immediate bool `json:"immediate"` - Durable bool `json:"durable"` - Internal bool `json:"internal"` - NoWait bool `json:"noWait"` - AutoDeleted bool `json:"autoDeleted"` -} - -func (a *amqpNotify) Validate() error { - if !a.Enable { - return nil - } - if _, err := checkURL(a.URL); err != nil { - return err - } - return nil -} - -// amqpConn implements a reconnecting amqp conn extending *amqp.Connection, -// also provides additional protection for such a mutation. -type amqpConn struct { - sync.Mutex - conn *amqp.Connection - params amqpNotify -} - -// dialAMQP - dials and returns an amqpConnection instance, -// for sending notifications. Returns error if amqp logger -// is not enabled. -func dialAMQP(amqpL amqpNotify) (*amqpConn, error) { - if !amqpL.Enable { - return nil, errNotifyNotEnabled - } - conn, err := amqp.Dial(amqpL.URL) - if err != nil { - return nil, err - } - return &amqpConn{ - conn: conn, - params: amqpL, - }, nil -} - -func newAMQPNotify(accountID string) (*logrus.Logger, error) { - amqpL := globalServerConfig.Notify.GetAMQPByID(accountID) - - // Connect to amqp server. - amqpC, err := dialAMQP(amqpL) - if err != nil { - return nil, err - } - - amqpLog := logrus.New() - - // Disable writing to console. - amqpLog.Out = ioutil.Discard - - // Add a amqp hook. - amqpLog.Hooks.Add(amqpC) - - // Set default JSON formatter. - amqpLog.Formatter = new(logrus.JSONFormatter) - - // Successfully enabled all AMQPs. - return amqpLog, nil -} - -// Returns true if the error represents a closed -// network error. -func isAMQPClosedNetworkErr(err error) bool { - // Any other error other than connection closed, return. - if neterr, ok := err.(*net.OpError); ok && - neterr.Err.Error() == "use of closed network connection" { - return true - } else if err == amqp.ErrClosed { - return true - } - return false -} - -// Channel is a wrapper implementation of amqp.Connection.Channel() -// which implements transparent reconnection. -func (q *amqpConn) Channel() (*amqp.Channel, error) { - q.Lock() - ch, err := q.conn.Channel() - q.Unlock() - if err != nil { - if !isAMQPClosedNetworkErr(err) { - return nil, err - } - // Attempt to connect again. - var conn *amqp.Connection - conn, err = amqp.Dial(q.params.URL) - if err != nil { - return nil, err - } - ch, err = conn.Channel() - if err != nil { - return nil, err - } - q.Lock() - q.conn = conn - q.Unlock() - } - return ch, nil -} - -// Fire is called when an event should be sent to the message broker. -func (q *amqpConn) Fire(entry *logrus.Entry) error { - ch, err := q.Channel() - if err != nil { - return err - } - defer ch.Close() - - err = ch.ExchangeDeclare( - q.params.Exchange, - q.params.ExchangeType, - q.params.Durable, - q.params.AutoDeleted, - q.params.Internal, - q.params.NoWait, - nil, - ) - if err != nil { - return err - } - - body, err := entry.String() - if err != nil { - return err - } - - err = ch.Publish( - q.params.Exchange, - q.params.RoutingKey, - q.params.Mandatory, - q.params.Immediate, - amqp.Publishing{ - ContentType: "application/json", - DeliveryMode: q.params.DeliveryMode, - Body: []byte(body), - }) - if err != nil { - return err - } - - return nil -} - -// Levels is available logging levels. -func (q *amqpConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-amqp_test.go b/cmd/notify-amqp_test.go deleted file mode 100644 index d056220a3..000000000 --- a/cmd/notify-amqp_test.go +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "errors" - "net" - "testing" - - "github.com/streadway/amqp" -) - -// Tests for is closed network error. -func TestIsClosedNetworkErr(t *testing.T) { - testCases := []struct { - err error - success bool - }{ - { - err: amqp.ErrClosed, - success: true, - }, - { - err: &net.OpError{Err: errors.New("use of closed network connection")}, - success: true, - }, - { - err: nil, - success: false, - }, - { - err: errors.New("testing error"), - success: false, - }, - } - - for i, testCase := range testCases { - ok := isAMQPClosedNetworkErr(testCase.err) - if ok != testCase.success { - t.Errorf("Test %d: Expected %t, got %t", i+1, testCase.success, ok) - } - } -} diff --git a/cmd/notify-elasticsearch.go b/cmd/notify-elasticsearch.go deleted file mode 100644 index 82b48844c..000000000 --- a/cmd/notify-elasticsearch.go +++ /dev/null @@ -1,183 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "context" - "fmt" - "io/ioutil" - "time" - - "github.com/Sirupsen/logrus" - "gopkg.in/olivere/elastic.v5" -) - -var ( - esErrFunc = newNotificationErrorFactory("Elasticsearch") - - errESFormat = esErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess) - errESIndex = esErrFunc("Index name was not specified in the configuration.") -) - -// elasticQueue is a elasticsearch event notification queue. -type elasticSearchNotify struct { - Enable bool `json:"enable"` - Format string `json:"format"` - URL string `json:"url"` - Index string `json:"index"` -} - -func (e *elasticSearchNotify) Validate() error { - if !e.Enable { - return nil - } - if e.Format != formatNamespace && e.Format != formatAccess { - return errESFormat - } - if _, err := checkURL(e.URL); err != nil { - return err - } - if e.Index == "" { - return errESIndex - } - return nil -} - -type elasticClient struct { - *elastic.Client - params elasticSearchNotify -} - -// Connects to elastic search instance at URL. -func dialElastic(esNotify elasticSearchNotify) (*elastic.Client, error) { - if !esNotify.Enable { - return nil, errNotifyNotEnabled - } - return elastic.NewClient( - elastic.SetURL(esNotify.URL), - elastic.SetSniff(false), - elastic.SetMaxRetries(10), - ) -} - -func newElasticNotify(accountID string) (*logrus.Logger, error) { - esNotify := globalServerConfig.Notify.GetElasticSearchByID(accountID) - - // Dial to elastic search. - client, err := dialElastic(esNotify) - if err != nil { - return nil, esErrFunc("Error dialing the server: %v", err) - } - - // Use the IndexExists service to check if a specified index exists. - exists, err := client.IndexExists(esNotify.Index). - Do(context.Background()) - if err != nil { - return nil, esErrFunc("Error checking if index exists: %v", err) - } - // Index does not exist, attempt to create it. - if !exists { - var createIndex *elastic.IndicesCreateResult - createIndex, err = client.CreateIndex(esNotify.Index). - Do(context.Background()) - if err != nil { - return nil, esErrFunc("Error creating index `%s`: %v", - esNotify.Index, err) - } - if !createIndex.Acknowledged { - return nil, esErrFunc("Index not created") - } - } - - elasticCl := elasticClient{ - Client: client, - params: esNotify, - } - - elasticSearchLog := logrus.New() - - // Disable writing to console. - elasticSearchLog.Out = ioutil.Discard - - // Add a elasticSearch hook. - elasticSearchLog.Hooks.Add(elasticCl) - - // Set default JSON formatter. - elasticSearchLog.Formatter = new(logrus.JSONFormatter) - - // Success, elastic search successfully initialized. - return elasticSearchLog, nil -} - -// Fire is required to implement logrus hook -func (q elasticClient) Fire(entry *logrus.Entry) (err error) { - // Reflect on eventType and Key on their native type. - entryStr, ok := entry.Data["EventType"].(string) - if !ok { - return nil - } - keyStr, ok := entry.Data["Key"].(string) - if !ok { - return nil - } - - switch q.params.Format { - case formatNamespace: - // If event matches as delete, we purge the previous index. - if eventMatch(entryStr, []string{"s3:ObjectRemoved:*"}) { - _, err = q.Client.Delete().Index(q.params.Index). - Type("event").Id(keyStr).Do(context.Background()) - break - } // else we update elastic index or create a new one. - _, err = q.Client.Index().Index(q.params.Index). - Type("event"). - BodyJson(map[string]interface{}{ - "Records": entry.Data["Records"], - }).Id(keyStr).Do(context.Background()) - case formatAccess: - // eventTime is taken from the first entry in the - // records. - events, ok := entry.Data["Records"].([]NotificationEvent) - if !ok { - return esErrFunc("Unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"]) - } - var eventTime time.Time - eventTime, err = time.Parse(timeFormatAMZ, events[0].EventTime) - if err != nil { - return esErrFunc("Unable to parse event time \"%s\": %v", - events[0].EventTime, err) - } - // Extract event time in milliseconds for Elasticsearch. - eventTimeStr := fmt.Sprintf("%d", eventTime.UnixNano()/1000000) - _, err = q.Client.Index().Index(q.params.Index).Type("event"). - Timestamp(eventTimeStr). - BodyJson(map[string]interface{}{ - "Records": entry.Data["Records"], - }).Do(context.Background()) - } - if err != nil { - return esErrFunc("Error inserting/deleting entry: %v", err) - } - return nil -} - -// Required for logrus hook implementation -func (q elasticClient) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-kafka.go b/cmd/notify-kafka.go deleted file mode 100644 index 028cff4e7..000000000 --- a/cmd/notify-kafka.go +++ /dev/null @@ -1,150 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "io/ioutil" - "net" - - "github.com/Sirupsen/logrus" - - sarama "gopkg.in/Shopify/sarama.v1" -) - -var ( - kkErrFunc = newNotificationErrorFactory("Kafka") -) - -// kafkaNotify holds the configuration of the Kafka server/cluster to -// send notifications to. -type kafkaNotify struct { - // Flag to enable/disable this notification from configuration - // file. - Enable bool `json:"enable"` - - // List of Kafka brokers in `addr:host` format. - Brokers []string `json:"brokers"` - - // Topic to which event notifications should be sent. - Topic string `json:"topic"` -} - -func (k *kafkaNotify) Validate() error { - if !k.Enable { - return nil - } - if len(k.Brokers) == 0 { - return kkErrFunc("No broker(s) specified.") - } - // Validate all specified brokers. - for _, brokerAddr := range k.Brokers { - if _, _, err := net.SplitHostPort(brokerAddr); err != nil { - return err - } - } - return nil -} - -// kafkaConn contains the active connection to the Kafka cluster and -// the topic to send event notifications to. -type kafkaConn struct { - producer sarama.SyncProducer - topic string -} - -func dialKafka(kn kafkaNotify) (kc kafkaConn, e error) { - if !kn.Enable { - return kc, errNotifyNotEnabled - } - - if kn.Topic == "" { - return kc, kkErrFunc( - "Topic was not specified in configuration") - } - - config := sarama.NewConfig() - // Wait for all in-sync replicas to ack the message - config.Producer.RequiredAcks = sarama.WaitForAll - // Retry up to 10 times to produce the message - config.Producer.Retry.Max = 10 - config.Producer.Return.Successes = true - - p, err := sarama.NewSyncProducer(kn.Brokers, config) - if err != nil { - return kc, kkErrFunc("Failed to start producer: %v", err) - } - - return kafkaConn{p, kn.Topic}, nil -} - -func newKafkaNotify(accountID string) (*logrus.Logger, error) { - kafkaNotifyCfg := globalServerConfig.Notify.GetKafkaByID(accountID) - - // Try connecting to the configured Kafka broker(s). - kc, err := dialKafka(kafkaNotifyCfg) - if err != nil { - return nil, err - } - - // Configure kafkaConn object as a Hook in logrus. - kafkaLog := logrus.New() - kafkaLog.Out = ioutil.Discard - kafkaLog.Formatter = new(logrus.JSONFormatter) - kafkaLog.Hooks.Add(kc) - - return kafkaLog, nil -} - -func (kC kafkaConn) Close() { - _ = kC.producer.Close() -} - -// Fire - to implement logrus.Hook interface -func (kC kafkaConn) Fire(entry *logrus.Entry) error { - body, err := entry.Reader() - if err != nil { - return err - } - - // Extract the key of the event as a string - keyStr, ok := entry.Data["Key"].(string) - if !ok { - return kkErrFunc("Unable to convert event key %v to string.", - entry.Data["Key"]) - } - - // Construct message to send to Kafka - msg := sarama.ProducerMessage{ - Topic: kC.topic, - Key: sarama.StringEncoder(keyStr), - Value: sarama.ByteEncoder(body.Bytes()), - } - - // Attempt sending the message to Kafka - _, _, err = kC.producer.SendMessage(&msg) - if err != nil { - return kkErrFunc("Error sending event to Kafka - %v", err) - } - return nil -} - -// Levels - to implement logrus.Hook interface -func (kC kafkaConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-listener.go b/cmd/notify-listener.go deleted file mode 100644 index 816690a2e..000000000 --- a/cmd/notify-listener.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "fmt" - "io/ioutil" - - "github.com/Sirupsen/logrus" -) - -type listenerConn struct { - TargetAddr string - ListenerARN string - BMSClient BucketMetaState -} - -type listenerLogger struct { - log *logrus.Logger - lconn listenerConn -} - -func newListenerLogger(listenerArn, targetAddr string) (*listenerLogger, error) { - bmsClient := globalS3Peers.GetPeerClient(targetAddr) - if bmsClient == nil { - return nil, fmt.Errorf( - "Peer %s was not initialized, unexpected error", - targetAddr, - ) - } - lc := listenerConn{ - TargetAddr: targetAddr, - ListenerARN: listenerArn, - BMSClient: bmsClient, - } - - lcLog := logrus.New() - - lcLog.Out = ioutil.Discard - - lcLog.Formatter = new(logrus.JSONFormatter) - - lcLog.Hooks.Add(lc) - - return &listenerLogger{lcLog, lc}, nil -} - -// send event to target server via rpc client calls. -func (lc listenerConn) Fire(entry *logrus.Entry) error { - notificationEvent, ok := entry.Data["Records"].([]NotificationEvent) - if !ok { - // If the record is not of the expected type, silently - // discard. - return nil - } - - // Send Event RPC call and return error - arg := EventArgs{Event: notificationEvent, Arn: lc.ListenerARN} - return lc.BMSClient.SendEvent(&arg) -} - -func (lc listenerConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-mqtt.go b/cmd/notify-mqtt.go deleted file mode 100644 index fa925e9d1..000000000 --- a/cmd/notify-mqtt.go +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "crypto/tls" - "io/ioutil" - "time" - - "github.com/Sirupsen/logrus" - MQTT "github.com/eclipse/paho.mqtt.golang" -) - -type mqttNotify struct { - Enable bool `json:"enable"` - Broker string `json:"broker"` - Topic string `json:"topic"` - QoS int `json:"qos"` - ClientID string `json:"clientId"` - User string `json:"username"` - Password string `json:"password"` -} - -func (m *mqttNotify) Validate() error { - if !m.Enable { - return nil - } - if _, err := checkURL(m.Broker); err != nil { - return err - } - return nil -} - -type mqttConn struct { - params mqttNotify - Client MQTT.Client -} - -func dialMQTT(mqttL mqttNotify) (mc mqttConn, e error) { - if !mqttL.Enable { - return mc, errNotifyNotEnabled - } - connOpts := &MQTT.ClientOptions{ - ClientID: mqttL.ClientID, - CleanSession: true, - Username: mqttL.User, - Password: mqttL.Password, - MaxReconnectInterval: 1 * time.Second, - KeepAlive: 30 * time.Second, - TLSConfig: tls.Config{RootCAs: globalRootCAs}, - } - connOpts.AddBroker(mqttL.Broker) - client := MQTT.NewClient(connOpts) - if token := client.Connect(); token.Wait() && token.Error() != nil { - return mc, token.Error() - } - return mqttConn{Client: client, params: mqttL}, nil -} - -func newMQTTNotify(accountID string) (*logrus.Logger, error) { - mqttL := globalServerConfig.Notify.GetMQTTByID(accountID) - - //connect to MQTT Server - mqttC, err := dialMQTT(mqttL) - if err != nil { - return nil, err - } - - mqttLog := logrus.New() - - // Disable writing to console. - mqttLog.Out = ioutil.Discard - - // Add a mqtt hook. - mqttLog.Hooks.Add(mqttC) - - // Set default JSON formatter - mqttLog.Formatter = new(logrus.JSONFormatter) - - // successfully enabled all MQTTs - return mqttLog, nil -} - -// Fire if called when an event should be sent to the message broker. -func (q mqttConn) Fire(entry *logrus.Entry) error { - body, err := entry.String() - if err != nil { - return err - } - - if !q.Client.IsConnected() { - if token := q.Client.Connect(); token.Wait() && token.Error() != nil { - return token.Error() - } - } - token := q.Client.Publish(q.params.Topic, byte(q.params.QoS), false, body) - if token.Wait() && token.Error() != nil { - return token.Error() - } - - return nil -} - -// Levels is available logging levels. -func (q mqttConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-mysql.go b/cmd/notify-mysql.go deleted file mode 100644 index 7ce9f2b33..000000000 --- a/cmd/notify-mysql.go +++ /dev/null @@ -1,337 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// MySQL Notifier implementation. Two formats, "namespace" and -// "access" are supported. -// -// * Namespace format -// -// On each create or update object event in Minio Object storage -// server, a row is created or updated in the table in MySQL. On each -// object removal, the corresponding row is deleted from the table. -// -// A table with a specific structure (column names, column types, and -// primary key/uniqueness constraint) is used. The user may set the -// table name in the configuration. A sample SQL command that creates -// a command with the required structure is: -// -// CREATE TABLE myminio ( -// key_name VARCHAR(2048), -// value JSONB, -// PRIMARY KEY (key_name), -// ); -// -// MySQL's "INSERT ... ON DUPLICATE ..." feature (UPSERT) is used -// here. The implementation has been tested with MySQL Ver 14.14 -// Distrib 5.7.17. -// -// * Access format -// -// On each event, a row is appended to the configured table. There is -// no deletion or modification of existing rows. -// -// A different table schema is used for this format. A sample SQL -// commant that creates a table with the required structure is: -// -// CREATE TABLE myminio ( -// event_time TIMESTAMP WITH TIME ZONE NOT NULL, -// event_data JSONB -// ); - -package cmd - -import ( - "database/sql" - "encoding/json" - "fmt" - "io/ioutil" - "time" - - "github.com/Sirupsen/logrus" - "github.com/go-sql-driver/mysql" -) - -const ( - // Queries for format=namespace mode. - upsertRowForNSMySQL = `INSERT INTO %s (key_name, value) -VALUES (?, ?) -ON DUPLICATE KEY UPDATE value=VALUES(value); -` - deleteRowForNSMySQL = ` DELETE FROM %s -WHERE key_name = ?;` - createTableForNSMySQL = `CREATE TABLE %s ( - key_name VARCHAR(2048), - value JSON, - PRIMARY KEY (key_name) -);` - - // Queries for format=access mode. - insertRowForAccessMySQL = `INSERT INTO %s (event_time, event_data) -VALUES (?, ?);` - createTableForAccessMySQL = `CREATE TABLE %s ( - event_time DATETIME NOT NULL, - event_data JSON -);` - - // Query to check if a table already exists. - tableExistsMySQL = `SELECT 1 FROM %s;` -) - -var ( - mysqlErrFunc = newNotificationErrorFactory("MySQL") - - errMysqlFormat = mysqlErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess) - errMysqlTable = mysqlErrFunc("Table was not specified in the configuration.") -) - -type mySQLNotify struct { - Enable bool `json:"enable"` - - Format string `json:"format"` - - // pass data-source-name connection string in config - // directly. This string is formatted according to - // https://github.com/go-sql-driver/mysql#dsn-data-source-name - DsnString string `json:"dsnString"` - // specifying a table name is required. - Table string `json:"table"` - - // uses the values below if no connection string is specified - // - however the connection string method offers more - // flexibility. - Host string `json:"host"` - Port string `json:"port"` - User string `json:"user"` - Password string `json:"password"` - Database string `json:"database"` -} - -func (m *mySQLNotify) Validate() error { - if !m.Enable { - return nil - } - if m.Format != formatNamespace && m.Format != formatAccess { - return errMysqlFormat - } - if m.DsnString == "" { - if _, err := checkURL(m.Host); err != nil { - return err - } - } - if m.Table == "" { - return errMysqlTable - } - return nil -} - -type mySQLConn struct { - dsnStr string - table string - format string - preparedStmts map[string]*sql.Stmt - *sql.DB -} - -func dialMySQL(msql mySQLNotify) (mc mySQLConn, e error) { - if !msql.Enable { - return mc, errNotifyNotEnabled - } - - dsnStr := msql.DsnString - // check if connection string is specified - if dsnStr == "" { - // build from other parameters - config := mysql.Config{ - User: msql.User, - Passwd: msql.Password, - Net: "tcp", - Addr: msql.Host + ":" + msql.Port, - DBName: msql.Database, - } - dsnStr = config.FormatDSN() - } - - db, err := sql.Open("mysql", dsnStr) - if err != nil { - return mc, mysqlErrFunc( - "Connection opening failure (dsnStr=%s): %v", - dsnStr, err) - } - - // ping to check that server is actually reachable. - err = db.Ping() - if err != nil { - return mc, mysqlErrFunc( - "Ping to server failed with: %v", err) - } - - // check that table exists - if not, create it. - _, err = db.Exec(fmt.Sprintf(tableExistsMySQL, msql.Table)) - if err != nil { - createStmt := createTableForNSMySQL - if msql.Format == formatAccess { - createStmt = createTableForAccessMySQL - } - - // most likely, table does not exist. try to create it: - _, errCreate := db.Exec(fmt.Sprintf(createStmt, msql.Table)) - if errCreate != nil { - // failed to create the table. error out. - return mc, mysqlErrFunc( - "'Select' failed with %v, then 'Create Table' failed with %v", - err, errCreate, - ) - } - } - - // create prepared statements - stmts := make(map[string]*sql.Stmt) - switch msql.Format { - case formatNamespace: - // insert or update statement - stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNSMySQL, - msql.Table)) - if err != nil { - return mc, mysqlErrFunc("create UPSERT prepared statement failed with: %v", err) - } - // delete statement - stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNSMySQL, - msql.Table)) - if err != nil { - return mc, mysqlErrFunc("create DELETE prepared statement failed with: %v", err) - } - case formatAccess: - // insert statement - stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccessMySQL, - msql.Table)) - if err != nil { - return mc, mysqlErrFunc( - "create INSERT prepared statement failed with: %v", err) - } - - } - return mySQLConn{dsnStr, msql.Table, msql.Format, stmts, db}, nil -} - -func newMySQLNotify(accountID string) (*logrus.Logger, error) { - mysqlNotify := globalServerConfig.Notify.GetMySQLByID(accountID) - - // Dial mysql - myC, err := dialMySQL(mysqlNotify) - if err != nil { - return nil, err - } - - mySQLLog := logrus.New() - - mySQLLog.Out = ioutil.Discard - - mySQLLog.Formatter = new(logrus.JSONFormatter) - - mySQLLog.Hooks.Add(myC) - - return mySQLLog, nil -} - -func (myC mySQLConn) Close() { - // first close all prepared statements - for _, v := range myC.preparedStmts { - _ = v.Close() - } - // close db connection - _ = myC.DB.Close() -} - -func (myC mySQLConn) Fire(entry *logrus.Entry) error { - // get event type by trying to convert to string - entryEventType, ok := entry.Data["EventType"].(string) - if !ok { - // ignore event if converting EventType to string - // fails. - return nil - } - - jsonEncoder := func(d interface{}) ([]byte, error) { - value, err := json.Marshal(map[string]interface{}{ - "Records": d, - }) - if err != nil { - return nil, mysqlErrFunc( - "Unable to encode event %v to JSON: %v", d, err) - } - return value, nil - } - - switch myC.format { - case formatNamespace: - // Check for event delete - if eventMatch(entryEventType, []string{"s3:ObjectRemoved:*"}) { - // delete row from the table - _, err := myC.preparedStmts["deleteRow"].Exec(entry.Data["Key"]) - if err != nil { - return mysqlErrFunc( - "Error deleting event with key = %v - got mysql error - %v", - entry.Data["Key"], err, - ) - } - } else { - value, err := jsonEncoder(entry.Data["Records"]) - if err != nil { - return err - } - - // upsert row into the table - _, err = myC.preparedStmts["upsertRow"].Exec(entry.Data["Key"], value) - if err != nil { - return mysqlErrFunc( - "Unable to upsert event with Key=%v and Value=%v - got mysql error - %v", - entry.Data["Key"], entry.Data["Records"], err, - ) - } - } - case formatAccess: - // eventTime is taken from the first entry in the - // records. - events, ok := entry.Data["Records"].([]NotificationEvent) - if !ok { - return mysqlErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"]) - } - eventTime, err := time.Parse(timeFormatAMZ, events[0].EventTime) - if err != nil { - return mysqlErrFunc("unable to parse event time \"%s\": %v", - events[0].EventTime, err) - } - - value, err := jsonEncodeEventData(entry.Data["Records"]) - if err != nil { - return err - } - - _, err = myC.preparedStmts["insertRow"].Exec(eventTime, value) - if err != nil { - return mysqlErrFunc("Unable to insert event with value=%v: %v", - value, err) - } - } - - return nil -} - -func (myC mySQLConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-nats.go b/cmd/notify-nats.go deleted file mode 100644 index 715bf808e..000000000 --- a/cmd/notify-nats.go +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "io/ioutil" - "net" - - "github.com/Sirupsen/logrus" - "github.com/nats-io/go-nats-streaming" - "github.com/nats-io/nats" -) - -// natsNotifyStreaming contains specific options related to connection -// to a NATS streaming server -type natsNotifyStreaming struct { - Enable bool `json:"enable"` - ClusterID string `json:"clusterID"` - ClientID string `json:"clientID"` - Async bool `json:"async"` - MaxPubAcksInflight int `json:"maxPubAcksInflight"` -} - -// natsNotify - represents logrus compatible NATS hook. -// All fields represent NATS configuration details. -type natsNotify struct { - Enable bool `json:"enable"` - Address string `json:"address"` - Subject string `json:"subject"` - Username string `json:"username"` - Password string `json:"password"` - Token string `json:"token"` - Secure bool `json:"secure"` - PingInterval int64 `json:"pingInterval"` - Streaming natsNotifyStreaming `json:"streaming"` -} - -func (n *natsNotify) Validate() error { - if !n.Enable { - return nil - } - if _, _, err := net.SplitHostPort(n.Address); err != nil { - return err - } - return nil -} - -// natsIOConn abstracts connection to any type of NATS server -type natsIOConn struct { - params natsNotify - natsConn *nats.Conn - stanConn stan.Conn -} - -// dialNATS - dials and returns an natsIOConn instance, -// for sending notifications. Returns error if nats logger -// is not enabled. -func dialNATS(natsL natsNotify, testDial bool) (nioc natsIOConn, e error) { - if !natsL.Enable { - return nioc, errNotifyNotEnabled - } - - // Construct natsIOConn which holds all NATS connection information - conn := natsIOConn{params: natsL} - - if natsL.Streaming.Enable { - // Construct scheme to differentiate between clear and TLS connections - scheme := "nats" - if natsL.Secure { - scheme = "tls" - } - // Construct address URL - addressURL := scheme + "://" + natsL.Username + ":" + natsL.Password + "@" + natsL.Address - // Fetch the user-supplied client ID and provide a random one if not provided - clientID := natsL.Streaming.ClientID - if clientID == "" { - clientID = mustGetUUID() - } - // Add test suffix to clientID to avoid clientID already registered error - if testDial { - clientID += "-test" - } - connOpts := []stan.Option{ - stan.NatsURL(addressURL), - } - // Setup MaxPubAcksInflight parameter - if natsL.Streaming.MaxPubAcksInflight > 0 { - connOpts = append(connOpts, - stan.MaxPubAcksInflight(natsL.Streaming.MaxPubAcksInflight)) - } - // Do the real connection to the NATS server - sc, err := stan.Connect(natsL.Streaming.ClusterID, clientID, connOpts...) - if err != nil { - return nioc, err - } - // Save the created connection - conn.stanConn = sc - } else { - // Configure and connect to NATS server - natsC := nats.DefaultOptions - natsC.Url = "nats://" + natsL.Address - natsC.User = natsL.Username - natsC.Password = natsL.Password - natsC.Token = natsL.Token - natsC.Secure = natsL.Secure - // Do the real connection - nc, err := natsC.Connect() - if err != nil { - return nioc, err - } - // Save the created connection - conn.natsConn = nc - } - return conn, nil -} - -// closeNATS - close the underlying NATS connection -func closeNATS(conn natsIOConn) { - if conn.params.Streaming.Enable { - conn.stanConn.Close() - } else { - conn.natsConn.Close() - } -} - -func newNATSNotify(accountID string) (*logrus.Logger, error) { - natsL := globalServerConfig.Notify.GetNATSByID(accountID) - - // Connect to nats server. - natsC, err := dialNATS(natsL, false) - if err != nil { - return nil, err - } - - natsLog := logrus.New() - - // Disable writing to console. - natsLog.Out = ioutil.Discard - - // Add a nats hook. - natsLog.Hooks.Add(natsC) - - // Set default JSON formatter. - natsLog.Formatter = new(logrus.JSONFormatter) - - // Successfully enabled all NATSs. - return natsLog, nil -} - -// Fire is called when an event should be sent to the message broker -func (n natsIOConn) Fire(entry *logrus.Entry) error { - body, err := entry.Reader() - if err != nil { - return err - } - if n.params.Streaming.Enable { - // Streaming flag is enabled, publish the log synchronously or asynchronously - // depending on the user supplied parameter - if n.params.Streaming.Async { - _, err = n.stanConn.PublishAsync(n.params.Subject, body.Bytes(), nil) - } else { - err = n.stanConn.Publish(n.params.Subject, body.Bytes()) - } - if err != nil { - return err - } - } else { - // Publish the log - err = n.natsConn.Publish(n.params.Subject, body.Bytes()) - if err != nil { - return err - } - } - return nil -} - -// Levels is available logging levels. -func (n natsIOConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-postgresql.go b/cmd/notify-postgresql.go deleted file mode 100644 index 2f814ef33..000000000 --- a/cmd/notify-postgresql.go +++ /dev/null @@ -1,353 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -// PostgreSQL Notifier implementation. Two formats, "namespace" and -// "access" are supported. -// -// * Namespace format -// -// On each create or update object event in Minio Object storage -// server, a row is created or updated in the table in Postgres. On -// each object removal, the corresponding row is deleted from the -// table. -// -// A table with a specific structure (column names, column types, and -// primary key/uniqueness constraint) is used. The user may set the -// table name in the configuration. A sample SQL command that creates -// a table with the required structure is: -// -// CREATE TABLE myminio ( -// key VARCHAR PRIMARY KEY, -// value JSONB -// ); -// -// PostgreSQL's "INSERT ... ON CONFLICT ... DO UPDATE ..." feature -// (UPSERT) is used here, so the minimum version of PostgreSQL -// required is 9.5. -// -// * Access format -// -// On each event, a row is appended to the configured table. There is -// no deletion or modification of existing rows. -// -// A different table schema is used for this format. A sample SQL -// commant that creates a table with the required structure is: -// -// CREATE TABLE myminio ( -// event_time TIMESTAMP WITH TIME ZONE NOT NULL, -// event_data JSONB -// ); - -package cmd - -import ( - "database/sql" - "encoding/json" - "fmt" - "io/ioutil" - "strings" - "time" - - "github.com/Sirupsen/logrus" - - // Register postgres driver - _ "github.com/lib/pq" -) - -const ( - // Queries for format=namespace mode. Here the `key` column is - // the bucket and object of the event. When objects are - // deleted, the corresponding row is deleted in the - // table. When objects are created or over-written, rows are - // inserted or updated respectively in the table. - upsertRowForNS = `INSERT INTO %s (key, value) -VALUES ($1, $2) -ON CONFLICT (key) -DO UPDATE SET value = EXCLUDED.value;` - deleteRowForNS = ` DELETE FROM %s -WHERE key = $1;` - createTableForNS = `CREATE TABLE %s ( - key VARCHAR PRIMARY KEY, - value JSONB -);` - - // Queries for format=access mode. Here the `event_time` - // column of the table, stores the time at which the event - // occurred in the Minio server. - insertRowForAccess = `INSERT INTO %s (event_time, event_data) -VALUES ($1, $2);` - createTableForAccess = `CREATE TABLE %s ( - event_time TIMESTAMP WITH TIME ZONE NOT NULL, - event_data JSONB -);` - - // Query to check if a table already exists. - tableExists = `SELECT 1 FROM %s;` -) - -var ( - pgErrFunc = newNotificationErrorFactory("PostgreSQL") - - errPGFormatError = pgErrFunc(`"format" value is invalid - it must be one of "%s" or "%s".`, formatNamespace, formatAccess) - errPGTableError = pgErrFunc("Table was not specified in the configuration.") -) - -type postgreSQLNotify struct { - Enable bool `json:"enable"` - - Format string `json:"format"` - - // Pass connection string in config directly. This string is - // formatted according to - // https://godoc.org/github.com/lib/pq#hdr-Connection_String_Parameters - ConnectionString string `json:"connectionString"` - // specifying a table name is required. - Table string `json:"table"` - - // The values below, if non-empty are appended to - // ConnectionString above. Default values are shown in - // comments below (implicitly used by the library). - Host string `json:"host"` // default: localhost - Port string `json:"port"` // default: 5432 - User string `json:"user"` // default: user running minio - Password string `json:"password"` // default: no password - Database string `json:"database"` // default: same as user -} - -func (p *postgreSQLNotify) Validate() error { - if !p.Enable { - return nil - } - if p.Format != formatNamespace && p.Format != formatAccess { - return errPGFormatError - } - if p.ConnectionString == "" { - if _, err := checkURL(p.Host); err != nil { - return err - } - } - if p.Table == "" { - return errPGTableError - } - return nil -} - -type pgConn struct { - connStr string - table string - format string - preparedStmts map[string]*sql.Stmt - *sql.DB -} - -func dialPostgreSQL(pgN postgreSQLNotify) (pc pgConn, e error) { - if !pgN.Enable { - return pc, errNotifyNotEnabled - } - - // collect connection params - params := []string{pgN.ConnectionString} - if pgN.Host != "" { - params = append(params, "host="+pgN.Host) - } - if pgN.Port != "" { - params = append(params, "port="+pgN.Port) - } - if pgN.User != "" { - params = append(params, "user="+pgN.User) - } - if pgN.Password != "" { - params = append(params, "password="+pgN.Password) - } - if pgN.Database != "" { - params = append(params, "dbname="+pgN.Database) - } - connStr := strings.Join(params, " ") - - db, err := sql.Open("postgres", connStr) - if err != nil { - return pc, pgErrFunc( - "Connection opening failure (connectionString=%s): %v", - connStr, err) - } - - // ping to check that server is actually reachable. - err = db.Ping() - if err != nil { - return pc, pgErrFunc("Ping to server failed with: %v", - err) - } - - // check that table exists - if not, create it. - _, err = db.Exec(fmt.Sprintf(tableExists, pgN.Table)) - if err != nil { - createStmt := createTableForNS - if pgN.Format == formatAccess { - createStmt = createTableForAccess - } - - // most likely, table does not exist. try to create it: - _, errCreate := db.Exec(fmt.Sprintf(createStmt, pgN.Table)) - if errCreate != nil { - // failed to create the table. error out. - return pc, pgErrFunc( - "'Select' failed with %v, then 'Create Table' failed with %v", - err, errCreate, - ) - } - } - - // create prepared statements - stmts := make(map[string]*sql.Stmt) - switch pgN.Format { - case formatNamespace: - // insert or update statement - stmts["upsertRow"], err = db.Prepare(fmt.Sprintf(upsertRowForNS, - pgN.Table)) - if err != nil { - return pc, pgErrFunc( - "create UPSERT prepared statement failed with: %v", err) - } - // delete statement - stmts["deleteRow"], err = db.Prepare(fmt.Sprintf(deleteRowForNS, - pgN.Table)) - if err != nil { - return pc, pgErrFunc( - "create DELETE prepared statement failed with: %v", err) - } - case formatAccess: - // insert statement - stmts["insertRow"], err = db.Prepare(fmt.Sprintf(insertRowForAccess, - pgN.Table)) - if err != nil { - return pc, pgErrFunc( - "create INSERT prepared statement failed with: %v", err) - } - } - - return pgConn{connStr, pgN.Table, pgN.Format, stmts, db}, nil -} - -func newPostgreSQLNotify(accountID string) (*logrus.Logger, error) { - pgNotify := globalServerConfig.Notify.GetPostgreSQLByID(accountID) - - // Dial postgres - pgC, err := dialPostgreSQL(pgNotify) - if err != nil { - return nil, err - } - - pgLog := logrus.New() - - pgLog.Out = ioutil.Discard - - pgLog.Formatter = new(logrus.JSONFormatter) - - pgLog.Hooks.Add(pgC) - - return pgLog, nil -} - -func (pgC pgConn) Close() { - // first close all prepared statements - for _, v := range pgC.preparedStmts { - _ = v.Close() - } - // close db connection - _ = pgC.DB.Close() -} - -func jsonEncodeEventData(d interface{}) ([]byte, error) { - // json encode the value for the row - value, err := json.Marshal(map[string]interface{}{ - "Records": d, - }) - if err != nil { - return nil, pgErrFunc( - "Unable to encode event %v to JSON: %v", d, err) - } - return value, nil -} - -func (pgC pgConn) Fire(entry *logrus.Entry) error { - // get event type by trying to convert to string - entryEventType, ok := entry.Data["EventType"].(string) - if !ok { - // ignore event if converting EventType to string - // fails. - return nil - } - - switch pgC.format { - case formatNamespace: - // Check for event delete - if eventMatch(entryEventType, []string{"s3:ObjectRemoved:*"}) { - // delete row from the table - _, err := pgC.preparedStmts["deleteRow"].Exec(entry.Data["Key"]) - if err != nil { - return pgErrFunc( - "Error deleting event with key=%v: %v", - entry.Data["Key"], err, - ) - } - } else { - value, err := jsonEncodeEventData(entry.Data["Records"]) - if err != nil { - return err - } - - // upsert row into the table - _, err = pgC.preparedStmts["upsertRow"].Exec(entry.Data["Key"], value) - if err != nil { - return pgErrFunc( - "Unable to upsert event with key=%v and value=%v: %v", - entry.Data["Key"], entry.Data["Records"], err, - ) - } - } - case formatAccess: - // eventTime is taken from the first entry in the - // records. - events, ok := entry.Data["Records"].([]NotificationEvent) - if !ok { - return pgErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"]) - } - eventTime, err := time.Parse(timeFormatAMZ, events[0].EventTime) - if err != nil { - return pgErrFunc("unable to parse event time \"%s\": %v", - events[0].EventTime, err) - } - - value, err := jsonEncodeEventData(entry.Data["Records"]) - if err != nil { - return err - } - - _, err = pgC.preparedStmts["insertRow"].Exec(eventTime, value) - if err != nil { - return pgErrFunc("Unable to insert event with value=%v: %v", - value, err) - } - } - - return nil -} - -func (pgC pgConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-redis.go b/cmd/notify-redis.go deleted file mode 100644 index afb1ea3d3..000000000 --- a/cmd/notify-redis.go +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "encoding/json" - "io/ioutil" - "net" - "time" - - "github.com/Sirupsen/logrus" - "github.com/garyburd/redigo/redis" -) - -var ( - redisErrFunc = newNotificationErrorFactory("Redis") - - errRedisFormat = redisErrFunc(`"format" value is invalid - it must be one of "access" or "namespace".`) - errRedisKeyError = redisErrFunc("Key was not specified in the configuration.") -) - -// redisNotify to send logs to Redis server -type redisNotify struct { - Enable bool `json:"enable"` - Format string `json:"format"` - Addr string `json:"address"` - Password string `json:"password"` - Key string `json:"key"` -} - -func (r *redisNotify) Validate() error { - if !r.Enable { - return nil - } - if r.Format != formatNamespace && r.Format != formatAccess { - return errRedisFormat - } - if _, _, err := net.SplitHostPort(r.Addr); err != nil { - return err - } - if r.Key == "" { - return errRedisKeyError - } - return nil -} - -type redisConn struct { - *redis.Pool - params redisNotify -} - -// Dial a new connection to redis instance at addr, optionally with a -// password if any. -func dialRedis(rNotify redisNotify) (*redis.Pool, error) { - // Return error if redis not enabled. - if !rNotify.Enable { - return nil, errNotifyNotEnabled - } - - addr := rNotify.Addr - password := rNotify.Password - rPool := &redis.Pool{ - MaxIdle: 3, - IdleTimeout: 240 * time.Second, // Time 2minutes. - Dial: func() (redis.Conn, error) { - c, err := redis.Dial("tcp", addr) - if err != nil { - return nil, err - } - if password != "" { - if _, derr := c.Do("AUTH", password); derr != nil { - c.Close() - return nil, derr - } - } - return c, err - }, - TestOnBorrow: func(c redis.Conn, t time.Time) error { - _, err := c.Do("PING") - return err - }, - } - - // Test if connection with REDIS can be established. - rConn := rPool.Get() - defer rConn.Close() - - // Check connection. - _, err := rConn.Do("PING") - if err != nil { - return nil, redisErrFunc("Error connecting to server: %v", err) - } - - // Test that Key is of desired type - reply, err := redis.String(rConn.Do("TYPE", rNotify.Key)) - if err != nil { - return nil, redisErrFunc("Error getting type of Key=%s: %v", - rNotify.Key, err) - } - if reply != "none" { - expectedType := "hash" - if rNotify.Format == formatAccess { - expectedType = "list" - } - if reply != expectedType { - return nil, redisErrFunc( - "Key=%s has type %s, but we expect it to be a %s", - rNotify.Key, reply, expectedType) - } - } - - // Return pool. - return rPool, nil -} - -func newRedisNotify(accountID string) (*logrus.Logger, error) { - rNotify := globalServerConfig.Notify.GetRedisByID(accountID) - - // Dial redis. - rPool, err := dialRedis(rNotify) - if err != nil { - return nil, redisErrFunc("Error dialing server: %v", err) - } - - rrConn := redisConn{ - Pool: rPool, - params: rNotify, - } - - redisLog := logrus.New() - - redisLog.Out = ioutil.Discard - - // Set default JSON formatter. - redisLog.Formatter = new(logrus.JSONFormatter) - - redisLog.Hooks.Add(rrConn) - - // Success, redis enabled. - return redisLog, nil -} - -// Fire is called when an event should be sent to the message broker. -func (r redisConn) Fire(entry *logrus.Entry) error { - rConn := r.Pool.Get() - defer rConn.Close() - - // Fetch event type upon reflecting on its original type. - entryStr, ok := entry.Data["EventType"].(string) - if !ok { - return nil - } - - switch r.params.Format { - case formatNamespace: - // Match the event if its a delete request, attempt to delete the key - if eventMatch(entryStr, []string{"s3:ObjectRemoved:*"}) { - _, err := rConn.Do("HDEL", r.params.Key, entry.Data["Key"]) - if err != nil { - return redisErrFunc("Error deleting entry: %v", - err) - } - return nil - } // else save this as new entry or update any existing ones. - - value, err := json.Marshal(map[string]interface{}{ - "Records": entry.Data["Records"], - }) - if err != nil { - return redisErrFunc( - "Unable to encode event %v to JSON: %v", - entry.Data["Records"], err) - } - _, err = rConn.Do("HSET", r.params.Key, entry.Data["Key"], - value) - if err != nil { - return redisErrFunc("Error updating hash entry: %v", - err) - } - case formatAccess: - // eventTime is taken from the first entry in the - // records. - events, ok := entry.Data["Records"].([]NotificationEvent) - if !ok { - return redisErrFunc("unable to extract event time due to conversion error of entry.Data[\"Records\"]=%v", entry.Data["Records"]) - } - eventTime := events[0].EventTime - - listEntry := []interface{}{eventTime, entry.Data["Records"]} - jsonValue, err := json.Marshal(listEntry) - if err != nil { - return redisErrFunc("JSON encoding error: %v", err) - } - _, err = rConn.Do("RPUSH", r.params.Key, jsonValue) - if err != nil { - return redisErrFunc("Error appending to Redis list: %v", - err) - } - } - return nil -} - -// Required for logrus hook implementation -func (r redisConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-webhook.go b/cmd/notify-webhook.go deleted file mode 100644 index 097171da9..000000000 --- a/cmd/notify-webhook.go +++ /dev/null @@ -1,212 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "bytes" - "crypto/tls" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/Sirupsen/logrus" -) - -type webhookNotify struct { - Enable bool `json:"enable"` - Endpoint string `json:"endpoint"` -} - -func (w *webhookNotify) Validate() error { - if !w.Enable { - return nil - } - if _, err := checkURL(w.Endpoint); err != nil { - return err - } - return nil -} - -type httpConn struct { - *http.Client - Endpoint string -} - -// isNetErrorIgnored - is network error ignored. -func isNetErrorIgnored(err error) bool { - if err == nil { - return false - } - if strings.Contains(err.Error(), "Client.Timeout exceeded while awaiting headers") { - return true - } - switch err.(type) { - case net.Error: - switch e := err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: - return true - case *url.Error: - // Fixes https://github.com/minio/minio/issues/4050 - switch e.Err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: - return true - } - // For a URL error, where it replies back "connection closed" - // retry again. - if strings.Contains(err.Error(), "Connection closed by foreign host") { - return true - } - default: - if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { - // If error is - tlsHandshakeTimeoutError, retry. - return true - } else if strings.Contains(err.Error(), "i/o timeout") { - // If error is - tcp timeoutError, retry. - return true - } else if strings.Contains(err.Error(), "connection timed out") { - // If err is a net.Dial timeout, retry. - return true - } - } - } - return false -} - -// Lookup endpoint address by successfully POSTting -// empty body. -func lookupEndpoint(urlStr string) error { - req, err := http.NewRequest("POST", urlStr, bytes.NewReader([]byte(""))) - if err != nil { - return err - } - - client := &http.Client{ - Timeout: 1 * time.Second, - Transport: &http.Transport{ - // Need to close connection after usage. - DisableKeepAlives: true, - }, - } - - // Set content-length to zero as there is no payload. - req.ContentLength = 0 - - // Set proper server user-agent. - req.Header.Set("User-Agent", globalServerUserAgent) - - resp, err := client.Do(req) - if err != nil { - if isNetErrorIgnored(err) { - errorIf(err, "Unable to lookup webhook endpoint %s", urlStr) - return nil - } - return err - } - defer resp.Body.Close() - // HTTP status OK/NoContent. - if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNoContent { - return fmt.Errorf("Unable to lookup webhook endpoint %s response(%s)", urlStr, resp.Status) - } - return nil -} - -// Initializes new webhook logrus notifier. -func newWebhookNotify(accountID string) (*logrus.Logger, error) { - rNotify := globalServerConfig.Notify.GetWebhookByID(accountID) - if rNotify.Endpoint == "" { - return nil, errInvalidArgument - } - - if err := lookupEndpoint(rNotify.Endpoint); err != nil { - return nil, err - } - - conn := httpConn{ - // Configure aggressive timeouts for client posts. - Client: &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{RootCAs: globalRootCAs}, - DialContext: (&net.Dialer{ - Timeout: 5 * time.Second, - KeepAlive: 5 * time.Second, - }).DialContext, - TLSHandshakeTimeout: 3 * time.Second, - ResponseHeaderTimeout: 3 * time.Second, - ExpectContinueTimeout: 2 * time.Second, - }, - }, - Endpoint: rNotify.Endpoint, - } - - notifyLog := logrus.New() - notifyLog.Out = ioutil.Discard - - // Set default JSON formatter. - notifyLog.Formatter = new(logrus.JSONFormatter) - - notifyLog.Hooks.Add(conn) - - // Success - return notifyLog, nil -} - -// Fire is called when an event should be sent to the message broker. -func (n httpConn) Fire(entry *logrus.Entry) error { - body, err := entry.Reader() - if err != nil { - return err - } - - req, err := http.NewRequest("POST", n.Endpoint, body) - if err != nil { - return err - } - - // Set content-type. - req.Header.Set("Content-Type", "application/json") - - // Set proper server user-agent. - req.Header.Set("User-Agent", globalServerUserAgent) - - // Initiate the http request. - resp, err := n.Do(req) - if err != nil { - return err - } - - // Make sure to close the response body so the connection can be re-used. - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK && - resp.StatusCode != http.StatusAccepted && - resp.StatusCode != http.StatusContinue { - return fmt.Errorf("Unable to send event %s", resp.Status) - } - - return nil -} - -// Levels are Required for logrus hook implementation -func (httpConn) Levels() []logrus.Level { - return []logrus.Level{ - logrus.InfoLevel, - } -} diff --git a/cmd/notify-webhook_test.go b/cmd/notify-webhook_test.go deleted file mode 100644 index 27ea65e6b..000000000 --- a/cmd/notify-webhook_test.go +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "errors" - "fmt" - "io" - "net/http" - "net/http/httptest" - "os" - "path" - "testing" - - "github.com/Sirupsen/logrus" -) - -// Custom post handler to handle POST requests. -type postHandler struct{} - -func (p postHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - http.Error(w, fmt.Sprintf("Unexpected method %s", r.Method), http.StatusBadRequest) - return - } - io.Copy(w, r.Body) -} - -type errorHandler struct{} - -func (e errorHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - http.Error(w, fmt.Sprintf("Unexpected method %s", r.Method), http.StatusBadRequest) -} - -// Tests web hook initialization. -func TestNewWebHookNotify(t *testing.T) { - root, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - server := httptest.NewServer(postHandler{}) - defer server.Close() - - _, err = newWebhookNotify("1") - if err == nil { - t.Fatal("Unexpected should fail") - } - - globalServerConfig.Notify.SetWebhookByID("10", webhookNotify{Enable: true, Endpoint: server.URL}) - _, err = newWebhookNotify("10") - if err != nil { - t.Fatal("Unexpected should not fail with lookupEndpoint", err) - } - - globalServerConfig.Notify.SetWebhookByID("15", webhookNotify{Enable: true, Endpoint: "http://%"}) - _, err = newWebhookNotify("15") - if err == nil { - t.Fatal("Unexpected should fail with invalid URL escape") - } - - globalServerConfig.Notify.SetWebhookByID("20", webhookNotify{Enable: true, Endpoint: server.URL}) - webhook, err := newWebhookNotify("20") - if err != nil { - t.Fatal("Unexpected shouldn't fail", err) - } - - webhook.WithFields(logrus.Fields{ - "Key": path.Join("bucket", "object"), - "EventType": "s3:ObjectCreated:Put", - }).Info() -} - -// Add tests for lookup endpoint. -func TestLookupEndpoint(t *testing.T) { - server := httptest.NewServer(errorHandler{}) - defer server.Close() - - testCases := []struct { - endpoint string - err error - }{ - // Ignore endpoints which don't exist. - { - endpoint: "http://unknown", - err: nil, - }, - { - endpoint: "%%%", - err: errors.New("parse %%%: invalid URL escape \"%%%\""), - }, - { - endpoint: server.URL, - err: fmt.Errorf("Unable to lookup webhook endpoint %s response(400 Bad Request)", server.URL), - }, - } - for _, test := range testCases { - if err := lookupEndpoint(test.endpoint); err != nil { - if err.Error() != test.err.Error() { - t.Errorf("Expected %s, got %s", test.err, err) - } - } - } -} diff --git a/cmd/object-api-common.go b/cmd/object-api-common.go index bfdf42cdd..f120b4e7d 100644 --- a/cmd/object-api-common.go +++ b/cmd/object-api-common.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -89,19 +89,11 @@ func deleteBucketMetadata(bucket string, objAPI ObjectLayer) { // Delete bucket access policy, if present - ignore any errors. _ = removeBucketPolicy(bucket, objAPI) - // Notify all peers (including self) to update in-memory state - S3PeersUpdateBucketPolicy(bucket) - // Delete notification config, if present - ignore any errors. - _ = removeNotificationConfig(bucket, objAPI) + _ = removeNotificationConfig(objAPI, bucket) - // Notify all peers (including self) to update in-memory state - S3PeersUpdateBucketNotification(bucket, nil) // Delete listener config, if present - ignore any errors. - _ = removeListenerConfig(bucket, objAPI) - - // Notify all peers (including self) to update in-memory state - S3PeersUpdateBucketListener(bucket, []listenerConfig{}) + _ = removeListenerConfig(objAPI, bucket) } // House keeping code for FS/XL and distributed Minio setup. @@ -194,3 +186,23 @@ func cleanupDir(storage StorageAPI, volume, dirPath string) error { err := delFunc(retainSlash(pathJoin(dirPath))) return err } + +// Removes notification.xml for a given bucket, only used during DeleteBucket. +func removeNotificationConfig(objAPI ObjectLayer, bucket string) error { + // Verify bucket is valid. + if !IsValidBucketName(bucket) { + return BucketNameInvalid{Bucket: bucket} + } + + ncPath := path.Join(bucketConfigPrefix, bucket, bucketNotificationConfig) + + return objAPI.DeleteObject(minioMetaBucket, ncPath) +} + +// Remove listener configuration from storage layer. Used when a bucket is deleted. +func removeListenerConfig(objAPI ObjectLayer, bucket string) error { + // make the path + lcPath := path.Join(bucketConfigPrefix, bucket, bucketListenerConfig) + + return objAPI.DeleteObject(minioMetaBucket, lcPath) +} diff --git a/cmd/object-handlers-common.go b/cmd/object-handlers-common.go index 1d0123d79..4c49e6549 100644 --- a/cmd/object-handlers-common.go +++ b/cmd/object-handlers-common.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -21,6 +21,8 @@ import ( "net/http" "strings" "time" + + "github.com/minio/minio/pkg/event" ) // Validates the preconditions for CopyObjectPart, returns true if CopyObjectPart @@ -240,10 +242,10 @@ func deleteObject(obj ObjectLayer, bucket, object string, r *http.Request) (err host, port, _ := net.SplitHostPort(r.RemoteAddr) // Notify object deleted event. - eventNotify(eventData{ - Type: ObjectRemovedDelete, - Bucket: bucket, - ObjInfo: ObjectInfo{ + sendEvent(eventArgs{ + EventName: event.ObjectRemovedDelete, + BucketName: bucket, + Object: ObjectInfo{ Name: object, }, ReqParams: extractReqParams(r), diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 052a2f8a8..2368a827a 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -32,6 +32,7 @@ import ( mux "github.com/gorilla/mux" "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/handlers" "github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/ioutil" @@ -191,14 +192,14 @@ func (api objectAPIHandlers) GetObjectHandler(w http.ResponseWriter, r *http.Req } // Notify object accessed via a GET request. - eventNotify(eventData{ - Type: ObjectAccessedGet, - Bucket: bucket, - ObjInfo: objInfo, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: host, - Port: port, + sendEvent(eventArgs{ + EventName: event.ObjectAccessedGet, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, }) } @@ -267,14 +268,14 @@ func (api objectAPIHandlers) HeadObjectHandler(w http.ResponseWriter, r *http.Re } // Notify object accessed via a HEAD request. - eventNotify(eventData{ - Type: ObjectAccessedHead, - Bucket: bucket, - ObjInfo: objInfo, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: host, - Port: port, + sendEvent(eventArgs{ + EventName: event.ObjectAccessedHead, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, }) } @@ -523,14 +524,14 @@ func (api objectAPIHandlers) CopyObjectHandler(w http.ResponseWriter, r *http.Re } // Notify object created event. - eventNotify(eventData{ - Type: ObjectCreatedCopy, - Bucket: dstBucket, - ObjInfo: objInfo, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: host, - Port: port, + sendEvent(eventArgs{ + EventName: event.ObjectCreatedCopy, + BucketName: dstBucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, }) } @@ -706,14 +707,14 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req } // Notify object created event. - eventNotify(eventData{ - Type: ObjectCreatedPut, - Bucket: bucket, - ObjInfo: objInfo, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: host, - Port: port, + sendEvent(eventArgs{ + EventName: event.ObjectCreatedPut, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, }) } @@ -1303,14 +1304,14 @@ func (api objectAPIHandlers) CompleteMultipartUploadHandler(w http.ResponseWrite } // Notify object created event. - eventNotify(eventData{ - Type: ObjectCreatedCompleteMultipartUpload, - Bucket: bucket, - ObjInfo: objInfo, - ReqParams: extractReqParams(r), - UserAgent: r.UserAgent(), - Host: host, - Port: port, + sendEvent(eventArgs{ + EventName: event.ObjectCreatedCompleteMultipartUpload, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), + UserAgent: r.UserAgent(), + Host: host, + Port: port, }) } diff --git a/cmd/object-handlers_test.go b/cmd/object-handlers_test.go index 98546c9ba..48ab33fee 100644 --- a/cmd/object-handlers_test.go +++ b/cmd/object-handlers_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -478,11 +478,8 @@ func testAPIPutObjectStreamSigV4Handler(obj ObjectLayer, instanceType, bucketNam bytesData := bytes.Repeat([]byte{'a'}, bytesDataLen) oneKData := bytes.Repeat([]byte("a"), 1*humanize.KiByte) - err := initEventNotifier(obj) - if err != nil { - t.Fatalf("[%s] - Failed to initialize event notifiers %v", instanceType, err) + var err error - } type streamFault int const ( None streamFault = iota @@ -787,12 +784,7 @@ func TestAPIPutObjectHandler(t *testing.T) { func testAPIPutObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, credentials auth.Credentials, t *testing.T) { - // register event notifier. - err := initEventNotifier(obj) - - if err != nil { - t.Fatal("Notifier initialization failed.") - } + var err error objectName := "test-object" // byte data for PutObject. bytesData := generateBytesData(6 * humanize.KiByte) @@ -1041,11 +1033,7 @@ func testAPICopyObjectPartHandlerSanity(obj ObjectLayer, instanceType, bucketNam credentials auth.Credentials, t *testing.T) { objectName := "test-object" - // register event notifier. - err := initEventNotifier(obj) - if err != nil { - t.Fatalf("Initializing event notifiers failed") - } + var err error // set of byte data for PutObject. // object has to be created before running tests for Copy Object. @@ -1156,11 +1144,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri credentials auth.Credentials, t *testing.T) { objectName := "test-object" - // register event notifier. - err := initEventNotifier(obj) - if err != nil { - t.Fatalf("Initializing event notifiers failed") - } + var err error // set of byte data for PutObject. // object has to be created before running tests for Copy Object. @@ -1488,11 +1472,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, objectName := "test-object" // object used for anonymous HTTP request test. anonObject := "anon-object" - // register event notifier. - err := initEventNotifier(obj) - if err != nil { - t.Fatalf("Initializing event notifiers failed") - } + var err error // set of byte data for PutObject. // object has to be created before running tests for Copy Object. @@ -2110,12 +2090,6 @@ func testAPICompleteMultipartHandler(obj ObjectLayer, instanceType, bucketName s credentials auth.Credentials, t *testing.T) { var err error - // register event notifier. - err = initEventNotifier(obj) - - if err != nil { - t.Fatal("Notifier initialization failed.") - } // object used for the test. objectName := "test-object-new-multipart" @@ -2465,12 +2439,6 @@ func testAPIAbortMultipartHandler(obj ObjectLayer, instanceType, bucketName stri credentials auth.Credentials, t *testing.T) { var err error - // register event notifier. - err = initEventNotifier(obj) - - if err != nil { - t.Fatal("Notifier initialization failed.") - } // object used for the test. objectName := "test-object-new-multipart" @@ -2633,12 +2601,7 @@ func TestAPIDeleteObjectHandler(t *testing.T) { func testAPIDeleteObjectHandler(obj ObjectLayer, instanceType, bucketName string, apiRouter http.Handler, credentials auth.Credentials, t *testing.T) { - // register event notifier. - err := initEventNotifier(obj) - - if err != nil { - t.Fatal("Notifier initialization failed.") - } + var err error objectName := "test-object" // Object used for anonymous API request test. diff --git a/cmd/peer-rpc.go b/cmd/peer-rpc.go new file mode 100644 index 000000000..0b6186a83 --- /dev/null +++ b/cmd/peer-rpc.go @@ -0,0 +1,311 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "fmt" + "path" + + "github.com/gorilla/mux" + xerrors "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" +) + +const s3Path = "/s3/remote" + +// PeerRPCReceiver - Peer RPC receiver for peer RPC server. +type PeerRPCReceiver struct { + AuthRPCServer +} + +// DeleteBucketArgs - delete bucket RPC arguments. +type DeleteBucketArgs struct { + AuthRPCArgs + BucketName string +} + +// DeleteBucket - handles delete bucket RPC call which removes all values of given bucket in global NotificationSys object. +func (receiver *PeerRPCReceiver) DeleteBucket(args *DeleteBucketArgs, reply *AuthRPCArgs) error { + globalNotificationSys.RemoveNotification(args.BucketName) + return nil +} + +// UpdateBucketPolicyArgs - update bucket policy RPC arguments. +type UpdateBucketPolicyArgs struct { + AuthRPCArgs + BucketName string +} + +// UpdateBucketPolicy - handles update bucket policy RPC call which sets bucket policies to given bucket in global BucketPolicies object. +func (receiver *PeerRPCReceiver) UpdateBucketPolicy(args *UpdateBucketPolicyArgs, reply *AuthRPCArgs) error { + objectAPI := newObjectLayerFn() + if objectAPI == nil { + // If the object layer is just coming up then it will load the policy from the disk. + return nil + } + return objectAPI.RefreshBucketPolicy(args.BucketName) +} + +// PutBucketNotificationArgs - put bucket notification RPC arguments. +type PutBucketNotificationArgs struct { + AuthRPCArgs + BucketName string + RulesMap event.RulesMap +} + +// PutBucketNotification - handles put bucket notification RPC call which adds rules to given bucket to global NotificationSys object. +func (receiver *PeerRPCReceiver) PutBucketNotification(args *PutBucketNotificationArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err + } + + globalNotificationSys.AddRulesMap(args.BucketName, args.RulesMap) + return nil +} + +// ListenBucketNotificationArgs - listen bucket notification RPC arguments. +type ListenBucketNotificationArgs struct { + AuthRPCArgs `json:"-"` + BucketName string `json:"-"` + EventNames []event.Name `json:"eventNames"` + Pattern string `json:"pattern"` + TargetID event.TargetID `json:"targetId"` + Addr xnet.Host `json:"addr"` +} + +// ListenBucketNotification - handles listen bucket notification RPC call. It creates PeerRPCClient target which pushes requested events to target in remote peer. +func (receiver *PeerRPCReceiver) ListenBucketNotification(args *ListenBucketNotificationArgs, reply *AuthRPCReply) error { + if err := args.IsAuthenticated(); err != nil { + return err + } + + rpcClient := globalNotificationSys.GetPeerRPCClient(args.Addr) + if rpcClient == nil { + return fmt.Errorf("unable to find PeerRPCClient for provided address %v. This happens only if remote and this minio run with different set of endpoints", args.Addr) + } + + target := NewPeerRPCClientTarget(args.BucketName, args.TargetID, rpcClient) + rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID()) + if err := globalNotificationSys.AddRemoteTarget(args.BucketName, target, rulesMap); err != nil { + errorIf(err, "Unable to add PeerRPCClientTarget %v to globalNotificationSys.targetList.", target) + return err + } + return nil +} + +// RemoteTargetExistArgs - remote target ID exist RPC arguments. +type RemoteTargetExistArgs struct { + AuthRPCArgs + BucketName string + TargetID event.TargetID +} + +// RemoteTargetExistReply - remote target ID exist RPC reply. +type RemoteTargetExistReply struct { + AuthRPCReply + Exist bool +} + +// RemoteTargetExist - handles target ID exist RPC call which checks whether given target ID is a HTTP client target or not. +func (receiver *PeerRPCReceiver) RemoteTargetExist(args *RemoteTargetExistArgs, reply *RemoteTargetExistReply) error { + reply.Exist = globalNotificationSys.RemoteTargetExist(args.BucketName, args.TargetID) + return nil +} + +// SendEventArgs - send event RPC arguments. +type SendEventArgs struct { + AuthRPCArgs + Event event.Event + TargetID event.TargetID + BucketName string +} + +// SendEventReply - send event RPC reply. +type SendEventReply struct { + AuthRPCReply + Error error +} + +// SendEvent - handles send event RPC call which sends given event to target by given target ID. +func (receiver *PeerRPCReceiver) SendEvent(args *SendEventArgs, reply *SendEventReply) error { + if err := args.IsAuthenticated(); err != nil { + return err + } + + var err error + if errMap := globalNotificationSys.send(args.BucketName, args.Event, args.TargetID); len(errMap) != 0 { + var found bool + if err, found = errMap[args.TargetID]; !found { + // errMap must be zero or one element map because we sent to only one target ID. + panic(fmt.Errorf("error for target %v not found in error map %+v", args.TargetID, errMap)) + } + } + + if err != nil { + errorIf(err, "unable to send event %v to target %v", args.Event, args.TargetID) + } + + reply.Error = err + return nil +} + +// registerS3PeerRPCRouter - creates and registers Peer RPC server and its router. +func registerS3PeerRPCRouter(router *mux.Router) error { + peerRPCServer := newRPCServer() + if err := peerRPCServer.RegisterName("Peer", &PeerRPCReceiver{}); err != nil { + return xerrors.Trace(err) + } + + subrouter := router.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() + subrouter.Path(s3Path).Handler(peerRPCServer) + return nil +} + +// PeerRPCClient - peer RPC client talks to peer RPC server. +type PeerRPCClient struct { + *AuthRPCClient +} + +// DeleteBucket - calls delete bucket RPC. +func (rpcClient *PeerRPCClient) DeleteBucket(bucketName string) error { + args := DeleteBucketArgs{BucketName: bucketName} + reply := AuthRPCReply{} + return rpcClient.Call("Peer.DeleteBucket", &args, &reply) +} + +// UpdateBucketPolicy - calls update bucket policy RPC. +func (rpcClient *PeerRPCClient) UpdateBucketPolicy(bucketName string) error { + args := UpdateBucketPolicyArgs{ + BucketName: bucketName, + } + reply := AuthRPCReply{} + return rpcClient.Call("Peer.UpdateBucketPolicy", &args, &reply) +} + +// PutBucketNotification - calls put bukcet notification RPC. +func (rpcClient *PeerRPCClient) PutBucketNotification(bucketName string, rulesMap event.RulesMap) error { + args := PutBucketNotificationArgs{ + BucketName: bucketName, + RulesMap: rulesMap, + } + reply := AuthRPCReply{} + return rpcClient.Call("Peer.PutBucketNotification", &args, &reply) +} + +// ListenBucketNotification - calls listen bucket notification RPC. +func (rpcClient *PeerRPCClient) ListenBucketNotification(bucketName string, eventNames []event.Name, + pattern string, targetID event.TargetID, addr xnet.Host) error { + args := ListenBucketNotificationArgs{ + BucketName: bucketName, + EventNames: eventNames, + Pattern: pattern, + TargetID: targetID, + Addr: addr, + } + reply := AuthRPCReply{} + return rpcClient.Call("Peer.ListenBucketNotification", &args, &reply) +} + +// RemoteTargetExist - calls remote target ID exist RPC. +func (rpcClient *PeerRPCClient) RemoteTargetExist(bucketName string, targetID event.TargetID) (bool, error) { + args := RemoteTargetExistArgs{ + BucketName: bucketName, + TargetID: targetID, + } + + reply := RemoteTargetExistReply{} + if err := rpcClient.Call("Peer.RemoteTargetExist", &args, &reply); err != nil { + return false, err + } + + return reply.Exist, nil +} + +// SendEvent - calls send event RPC. +func (rpcClient *PeerRPCClient) SendEvent(bucketName string, targetID, remoteTargetID event.TargetID, eventData event.Event) error { + args := SendEventArgs{ + BucketName: bucketName, + TargetID: remoteTargetID, + Event: eventData, + } + reply := SendEventReply{} + if err := rpcClient.Call("Peer.SendEvent", &args, &reply); err != nil { + return err + } + + if reply.Error != nil { + errorIf(reply.Error, "unable to send event %v to rpc target %v of bucket %v", args, targetID, bucketName) + globalNotificationSys.RemoveRemoteTarget(bucketName, targetID) + } + + return reply.Error +} + +// makeRemoteRPCClients - creates Peer RPCClients for given endpoint list. +func makeRemoteRPCClients(endpoints EndpointList) map[xnet.Host]*PeerRPCClient { + peerRPCClientMap := make(map[xnet.Host]*PeerRPCClient) + + cred := globalServerConfig.GetCredential() + serviceEndpoint := path.Join(minioReservedBucketPath, s3Path) + for _, hostStr := range GetRemotePeers(endpoints) { + host := xnet.MustParseHost(hostStr) + peerRPCClientMap[*host] = &PeerRPCClient{newAuthRPCClient(authConfig{ + accessKey: cred.AccessKey, + secretKey: cred.SecretKey, + serverAddr: hostStr, + serviceEndpoint: serviceEndpoint, + secureConn: globalIsSSL, + serviceName: "Peer", + })} + } + + return peerRPCClientMap +} + +// PeerRPCClientTarget - RPCClient is an event.Target which sends event to target of remote peer. +type PeerRPCClientTarget struct { + id event.TargetID + remoteTargetID event.TargetID + rpcClient *PeerRPCClient + bucketName string +} + +// ID - returns target ID. +func (target *PeerRPCClientTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to remote peer by making RPC call. +func (target *PeerRPCClientTarget) Send(eventData event.Event) error { + return target.rpcClient.SendEvent(target.bucketName, target.id, target.remoteTargetID, eventData) +} + +// Close - does nothing and available for interface compatibility. +func (target *PeerRPCClientTarget) Close() error { + return nil +} + +// NewPeerRPCClientTarget - creates RPCClient target with given target ID available in remote peer. +func NewPeerRPCClientTarget(bucketName string, targetID event.TargetID, rpcClient *PeerRPCClient) *PeerRPCClientTarget { + return &PeerRPCClientTarget{ + id: event.TargetID{targetID.ID, targetID.Name + "+" + mustGetUUID()}, + remoteTargetID: targetID, + bucketName: bucketName, + rpcClient: rpcClient, + } +} diff --git a/cmd/post-policy_test.go b/cmd/post-policy_test.go index 2475a2392..decf0b992 100644 --- a/cmd/post-policy_test.go +++ b/cmd/post-policy_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -123,12 +123,6 @@ func testPostPolicyBucketHandler(obj ObjectLayer, instanceType string, t TestErr } defer os.RemoveAll(root) - // Register event notifier. - err = initEventNotifier(obj) - if err != nil { - t.Fatalf("Initializing event notifiers failed") - } - // get random bucket name. bucketName := getRandomBucketName() @@ -431,12 +425,6 @@ func testPostPolicyBucketHandlerRedirect(obj ObjectLayer, instanceType string, t } defer os.RemoveAll(root) - // Register event notifier. - err = initEventNotifier(obj) - if err != nil { - t.Fatalf("Initializing event notifiers failed") - } - // get random bucket name. bucketName := getRandomBucketName() diff --git a/cmd/s3-peer-client.go b/cmd/s3-peer-client.go deleted file mode 100644 index af96d174d..000000000 --- a/cmd/s3-peer-client.go +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "fmt" - "path" - "sync" - - "github.com/minio/minio-go/pkg/set" -) - -// s3Peer structs contains the address of a peer in the cluster, and -// its BucketMetaState interface objects. -type s3Peer struct { - // address in `host:port` format - addr string - // BucketMetaState client interface - bmsClient BucketMetaState -} - -// type representing all peers in the cluster -type s3Peers []s3Peer - -// makeS3Peers makes an s3Peers struct value from the given urls -// slice. The urls slice is assumed to be non-empty and free of nil -// values. -func makeS3Peers(endpoints EndpointList) (s3PeerList s3Peers) { - localAddr := GetLocalPeer(endpoints) - s3PeerList = append(s3PeerList, s3Peer{ - localAddr, - &localBucketMetaState{ObjectAPI: newObjectLayerFn}, - }) - - hostSet := set.CreateStringSet(localAddr) - cred := globalServerConfig.GetCredential() - serviceEndpoint := path.Join(minioReservedBucketPath, s3Path) - for _, host := range GetRemotePeers(endpoints) { - if hostSet.Contains(host) { - continue - } - hostSet.Add(host) - s3PeerList = append(s3PeerList, s3Peer{ - addr: host, - bmsClient: &remoteBucketMetaState{ - newAuthRPCClient(authConfig{ - accessKey: cred.AccessKey, - secretKey: cred.SecretKey, - serverAddr: host, - serviceEndpoint: serviceEndpoint, - secureConn: globalIsSSL, - serviceName: "S3", - })}, - }) - } - return s3PeerList -} - -// initGlobalS3Peers - initialize globalS3Peers by passing in -// endpoints - intended to be called early in program start-up. -func initGlobalS3Peers(endpoints EndpointList) { - globalS3Peers = makeS3Peers(endpoints) -} - -// GetPeerClient - fetch BucketMetaState interface by peer address -func (s3p s3Peers) GetPeerClient(peer string) BucketMetaState { - for _, p := range s3p { - if p.addr == peer { - return p.bmsClient - } - } - return nil -} - -// SendUpdate sends bucket metadata updates to all given peer -// indices. The update calls are sent in parallel, and errors are -// returned per peer in an array. The returned error arrayslice is -// always as long as s3p.peers.addr. -// -// The input peerIndex slice can be nil if the update is to be sent to -// all peers. This is the common case. -// -// The updates are sent via a type implementing the BucketMetaState -// interface. This makes sure that the local node is directly updated, -// and remote nodes are updated via RPC calls. -func (s3p s3Peers) SendUpdate(peerIndex []int, args BucketUpdater) []error { - - // peer error array - errs := make([]error, len(s3p)) - - // Start a wait group and make RPC requests to peers. - var wg sync.WaitGroup - - // Function that sends update to peer at `index` - sendUpdateToPeer := func(index int) { - defer wg.Done() - errs[index] = args.BucketUpdate(s3p[index].bmsClient) - } - - // Special (but common) case of peerIndex == nil, implies send - // update to all peers. - if peerIndex == nil { - for idx := 0; idx < len(s3p); idx++ { - wg.Add(1) - go sendUpdateToPeer(idx) - } - } else { - // Send update only to given peer indices. - for _, idx := range peerIndex { - // check idx is in array bounds. - if !(idx >= 0 && idx < len(s3p)) { - errorIf( - fmt.Errorf("Bad peer index %d input to SendUpdate()", idx), - "peerIndex out of bounds", - ) - continue - } - wg.Add(1) - go sendUpdateToPeer(idx) - } - } - - // Wait for requests to complete and return - wg.Wait() - return errs -} - -// S3PeersUpdateBucketNotification - Sends Update Bucket notification -// request to all peers. Currently we log an error and continue. -func S3PeersUpdateBucketNotification(bucket string, ncfg *notificationConfig) { - setBNPArgs := &SetBucketNotificationPeerArgs{Bucket: bucket, NCfg: ncfg} - errs := globalS3Peers.SendUpdate(nil, setBNPArgs) - for idx, err := range errs { - errorIf( - err, - "Error sending update bucket notification to %s - %v", - globalS3Peers[idx].addr, err, - ) - } -} - -// S3PeersUpdateBucketListener - Sends Update Bucket listeners request -// to all peers. Currently we log an error and continue. -func S3PeersUpdateBucketListener(bucket string, lcfg []listenerConfig) { - setBLPArgs := &SetBucketListenerPeerArgs{Bucket: bucket, LCfg: lcfg} - errs := globalS3Peers.SendUpdate(nil, setBLPArgs) - for idx, err := range errs { - errorIf( - err, - "Error sending update bucket listener to %s - %v", - globalS3Peers[idx].addr, err, - ) - } -} - -// S3PeersUpdateBucketPolicy - Sends update bucket policy request to -// all peers. Currently we log an error and continue. -func S3PeersUpdateBucketPolicy(bucket string) { - setBPPArgs := &SetBucketPolicyPeerArgs{Bucket: bucket} - errs := globalS3Peers.SendUpdate(nil, setBPPArgs) - for idx, err := range errs { - errorIf( - err, - "Error sending update bucket policy to %s - %v", - globalS3Peers[idx].addr, err, - ) - } -} diff --git a/cmd/s3-peer-client_test.go b/cmd/s3-peer-client_test.go deleted file mode 100644 index 70e79ed37..000000000 --- a/cmd/s3-peer-client_test.go +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "os" - "reflect" - "testing" -) - -// Validates makeS3Peers, fetches all peers based on list of storage -// endpoints. -func TestMakeS3Peers(t *testing.T) { - // Initialize configuration - root, err := newTestConfig(globalMinioDefaultRegion) - if err != nil { - t.Fatalf("%s", err) - } - defer os.RemoveAll(root) - - // test cases - testCases := []struct { - gMinioAddr string - eps EndpointList - peers []string - }{ - {"127.0.0.1:9000", mustGetNewEndpointList("/mnt/disk1"), []string{"127.0.0.1:9000"}}, - {"example.org:9000", mustGetNewEndpointList("http://example.org:9000/d1", "http://example.com:9000/d1", "http://example.net:9000/d1", "http://example.edu:9000/d1"), []string{"example.org:9000", "example.com:9000", "example.edu:9000", "example.net:9000"}}, - } - - getPeersHelper := func(s3p s3Peers) []string { - r := []string{} - for _, p := range s3p { - r = append(r, p.addr) - } - return r - } - - // execute tests - for i, testCase := range testCases { - globalMinioAddr = testCase.gMinioAddr - s3peers := makeS3Peers(testCase.eps) - referencePeers := getPeersHelper(s3peers) - if !reflect.DeepEqual(testCase.peers, referencePeers) { - t.Errorf("Test %d: Expected %v, got %v", i+1, testCase.peers, referencePeers) - } - } -} diff --git a/cmd/s3-peer-router.go b/cmd/s3-peer-router.go deleted file mode 100644 index 20fb41cf3..000000000 --- a/cmd/s3-peer-router.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - router "github.com/gorilla/mux" - "github.com/minio/minio/pkg/errors" -) - -const ( - s3Path = "/s3/remote" -) - -type s3PeerAPIHandlers struct { - AuthRPCServer - bms BucketMetaState -} - -func registerS3PeerRPCRouter(mux *router.Router) error { - s3PeerHandlers := &s3PeerAPIHandlers{ - AuthRPCServer{}, - &localBucketMetaState{ - ObjectAPI: newObjectLayerFn, - }, - } - - s3PeerRPCServer := newRPCServer() - err := s3PeerRPCServer.RegisterName("S3", s3PeerHandlers) - if err != nil { - return errors.Trace(err) - } - - s3PeerRouter := mux.NewRoute().PathPrefix(minioReservedBucketPath).Subrouter() - s3PeerRouter.Path(s3Path).Handler(s3PeerRPCServer) - return nil -} diff --git a/cmd/s3-peer-rpc-handlers.go b/cmd/s3-peer-rpc-handlers.go deleted file mode 100644 index 7500fa61f..000000000 --- a/cmd/s3-peer-rpc-handlers.go +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -// SetBucketNotificationPeerArgs - Arguments collection to SetBucketNotificationPeer RPC -// call -type SetBucketNotificationPeerArgs struct { - // For Auth - AuthRPCArgs - - Bucket string - - // Notification config for the given bucket. - NCfg *notificationConfig -} - -// BucketUpdate - implements bucket notification updates, -// the underlying operation is a network call updates all -// the peers participating in bucket notification. -func (s *SetBucketNotificationPeerArgs) BucketUpdate(client BucketMetaState) error { - return client.UpdateBucketNotification(s) -} - -func (s3 *s3PeerAPIHandlers) SetBucketNotificationPeer(args *SetBucketNotificationPeerArgs, reply *AuthRPCReply) error { - if err := args.IsAuthenticated(); err != nil { - return err - } - - return s3.bms.UpdateBucketNotification(args) -} - -// SetBucketListenerPeerArgs - Arguments collection to SetBucketListenerPeer RPC call -type SetBucketListenerPeerArgs struct { - // For Auth - AuthRPCArgs - - Bucket string - - // Listener config for a given bucket. - LCfg []listenerConfig -} - -// BucketUpdate - implements bucket listener updates, -// the underlying operation is a network call updates all -// the peers participating in listen bucket notification. -func (s *SetBucketListenerPeerArgs) BucketUpdate(client BucketMetaState) error { - return client.UpdateBucketListener(s) -} - -func (s3 *s3PeerAPIHandlers) SetBucketListenerPeer(args *SetBucketListenerPeerArgs, reply *AuthRPCReply) error { - if err := args.IsAuthenticated(); err != nil { - return err - } - - return s3.bms.UpdateBucketListener(args) -} - -// EventArgs - Arguments collection for Event RPC call -type EventArgs struct { - // For Auth - AuthRPCArgs - - // event being sent - Event []NotificationEvent - - // client that it is meant for - Arn string -} - -// submit an event to the receiving server. -func (s3 *s3PeerAPIHandlers) Event(args *EventArgs, reply *AuthRPCReply) error { - if err := args.IsAuthenticated(); err != nil { - return err - } - - return s3.bms.SendEvent(args) -} - -// SetBucketPolicyPeerArgs - Arguments collection for SetBucketPolicyPeer RPC call -type SetBucketPolicyPeerArgs struct { - // For Auth - AuthRPCArgs - - Bucket string -} - -// BucketUpdate - implements bucket policy updates, -// the underlying operation is a network call updates all -// the peers participating for new set/unset policies. -func (s *SetBucketPolicyPeerArgs) BucketUpdate(client BucketMetaState) error { - return client.UpdateBucketPolicy(s) -} - -// tell receiving server to update a bucket policy -func (s3 *s3PeerAPIHandlers) SetBucketPolicyPeer(args *SetBucketPolicyPeerArgs, reply *AuthRPCReply) error { - if err := args.IsAuthenticated(); err != nil { - return err - } - - return s3.bms.UpdateBucketPolicy(args) -} diff --git a/cmd/s3-peer-rpc-handlers_test.go b/cmd/s3-peer-rpc-handlers_test.go deleted file mode 100644 index 8d25c2ef7..000000000 --- a/cmd/s3-peer-rpc-handlers_test.go +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Minio Cloud Storage, (C) 2014-2016 Minio, Inc. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cmd - -import ( - "os" - "path" - "testing" -) - -type TestRPCS3PeerSuite struct { - testServer TestServer - testAuthConf authConfig - disks []string -} - -// Set up the suite and start the test server. -func (s *TestRPCS3PeerSuite) SetUpSuite(t *testing.T) { - s.testServer, s.disks = StartTestS3PeerRPCServer(t) - s.testAuthConf = authConfig{ - serverAddr: s.testServer.Server.Listener.Addr().String(), - accessKey: s.testServer.AccessKey, - secretKey: s.testServer.SecretKey, - serviceEndpoint: path.Join(minioReservedBucketPath, s3Path), - serviceName: "S3", - } -} - -func (s *TestRPCS3PeerSuite) TearDownSuite(t *testing.T) { - s.testServer.Stop() - removeRoots(s.disks) - os.RemoveAll(s.testServer.Root) -} - -func TestS3PeerRPC(t *testing.T) { - // setup - s := &TestRPCS3PeerSuite{} - s.SetUpSuite(t) - - // run test - s.testS3PeerRPC(t) - - // teardown - s.TearDownSuite(t) -} - -// Test S3 RPC handlers -func (s *TestRPCS3PeerSuite) testS3PeerRPC(t *testing.T) { - // Validate for invalid token. - args := AuthRPCArgs{} - rclient := newAuthRPCClient(s.testAuthConf) - defer rclient.Close() - - if err := rclient.Login(); err != nil { - t.Fatal(err) - } - - rclient.authToken = "garbage" - err := rclient.Call("S3.SetBucketNotificationPeer", &args, &AuthRPCReply{}) - if err != nil { - if err.Error() != errInvalidToken.Error() { - t.Fatal(err) - } - } - - // Check bucket notification call works. - BNPArgs := SetBucketNotificationPeerArgs{Bucket: "bucket", NCfg: ¬ificationConfig{}} - client := newAuthRPCClient(s.testAuthConf) - defer client.Close() - err = client.Call("S3.SetBucketNotificationPeer", &BNPArgs, &AuthRPCReply{}) - if err != nil { - t.Fatal(err) - } - - // Check bucket listener update call works. - BLPArgs := SetBucketListenerPeerArgs{Bucket: "bucket", LCfg: nil} - err = client.Call("S3.SetBucketListenerPeer", &BLPArgs, &AuthRPCReply{}) - if err != nil { - t.Fatal(err) - } - - BPPArgs := SetBucketPolicyPeerArgs{Bucket: "bucket"} - err = client.Call("S3.SetBucketPolicyPeer", &BPPArgs, &AuthRPCReply{}) - if err != nil { - t.Fatal(err) - } - - // Check event send event call works. - evArgs := EventArgs{Event: nil, Arn: "localhost:9000"} - err = client.Call("S3.Event", &evArgs, &AuthRPCReply{}) - if err != nil { - t.Fatal(err) - } -} diff --git a/cmd/server-main.go b/cmd/server-main.go index cb0f4a509..97b386f1f 100644 --- a/cmd/server-main.go +++ b/cmd/server-main.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -223,8 +223,9 @@ func serverMain(ctx *cli.Context) { handler, err = configureServerHandler(globalEndpoints) fatalIf(err, "Unable to configure one of server's RPC services.") - // Initialize S3 Peers inter-node communication only in distributed setup. - initGlobalS3Peers(globalEndpoints) + // Initialize notification system. + globalNotificationSys, err = NewNotificationSys(globalServerConfig, globalEndpoints) + fatalIf(err, "Unable to initialize notification system.") // Initialize Admin Peers inter-node communication only in distributed setup. initGlobalAdminPeers(globalEndpoints) diff --git a/cmd/server-startup-msg.go b/cmd/server-startup-msg.go index 2b4fac434..e0a1ea747 100644 --- a/cmd/server-startup-msg.go +++ b/cmd/server-startup-msg.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -125,19 +125,16 @@ func printServerCommonMsg(apiEndpoints []string) { // Prints bucket notification configurations. func printEventNotifiers() { - if globalEventNotifier == nil { - // In case initEventNotifier() was not done or failed. - return - } - // Get all configured external notification targets - externalTargets := globalEventNotifier.GetAllExternalTargets() - if len(externalTargets) == 0 { + arns := globalNotificationSys.GetARNList() + if len(arns) == 0 { return } + arnMsg := colorBlue("SQS ARNs: ") - for queueArn := range externalTargets { - arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(queueArn), 1), queueArn)) + for _, arn := range arns { + arnMsg += colorBold(fmt.Sprintf(getFormatStr(len(arn), 1), arn)) } + log.Println(arnMsg) } diff --git a/cmd/server_test.go b/cmd/server_test.go index 191cf5432..a3d8b6a9e 100644 --- a/cmd/server_test.go +++ b/cmd/server_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -75,13 +75,10 @@ func verifyError(c *check, response *http.Response, code, description string, st func runAllTests(suite *TestSuiteCommon, c *check) { suite.SetUpSuite(c) - suite.TestBucketSQSNotificationWebHook(c) suite.TestObjectDir(c) - suite.TestBucketSQSNotificationAMQP(c) suite.TestBucketPolicy(c) suite.TestDeleteBucket(c) suite.TestDeleteBucketNotEmpty(c) - suite.TestListenBucketNotificationHandler(c) suite.TestDeleteMultipleObjects(c) suite.TestDeleteObject(c) suite.TestNonExistentBucket(c) diff --git a/cmd/signals.go b/cmd/signals.go index 9739fcc47..041493a09 100644 --- a/cmd/signals.go +++ b/cmd/signals.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -38,6 +38,10 @@ func handleSignals() { stopProcess := func() bool { var err, oerr error + if globalNotificationSys != nil { + globalNotificationSys.RemoveAllRemoteTargets() + } + err = globalHTTPServer.Shutdown() errorIf(err, "Unable to shutdown http server") diff --git a/cmd/test-utils_test.go b/cmd/test-utils_test.go index eab51ce5e..859624788 100644 --- a/cmd/test-utils_test.go +++ b/cmd/test-utils_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2015, 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2015, 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -351,7 +351,10 @@ func UnstartedTestServer(t TestErrHandler, instanceType string) TestServer { globalMinioHost = host globalMinioPort = port globalMinioAddr = getEndpointsLocalAddr(testServer.Disks) - initGlobalS3Peers(testServer.Disks) + globalNotificationSys, err = NewNotificationSys(globalServerConfig, testServer.Disks) + if err != nil { + t.Fatalf("Unable to initialize queue configuration") + } return testServer } @@ -511,11 +514,6 @@ func resetGlobalNSLock() { } } -// reset Global event notifier. -func resetGlobalEventnotify() { - globalEventNotifier = nil -} - func resetGlobalEndpoints() { globalEndpoints = EndpointList{} } @@ -558,8 +556,6 @@ func resetTestGlobals() { resetGlobalConfig() // Reset global NSLock. resetGlobalNSLock() - // Reset global event notifier. - resetGlobalEventnotify() // Reset global endpoints. resetGlobalEndpoints() // Reset global isXL flag. @@ -1637,18 +1633,6 @@ func getCompleteMultipartUploadURL(endPoint, bucketName, objectName, uploadID st return makeTestTargetURL(endPoint, bucketName, objectName, queryValue) } -// return URL for put bucket notification. -func getPutBucketNotificationURL(endPoint, bucketName string) string { - return getGetBucketNotificationURL(endPoint, bucketName) -} - -// return URL for get bucket notification. -func getGetBucketNotificationURL(endPoint, bucketName string) string { - queryValue := url.Values{} - queryValue.Set("notification", "") - return makeTestTargetURL(endPoint, bucketName, "", queryValue) -} - // return URL for listen bucket notification. func getListenBucketNotificationURL(endPoint, bucketName string, prefixes, suffixes, events []string) string { queryValue := url.Values{} @@ -1720,7 +1704,7 @@ func newTestObjectLayer(endpoints EndpointList) (newObject ObjectLayer, err erro } // Initialize a new event notifier. - if err = initEventNotifier(xl); err != nil { + if globalNotificationSys, err = NewNotificationSys(globalServerConfig, endpoints); err != nil { return nil, err } diff --git a/cmd/web-handlers.go b/cmd/web-handlers.go index 3e1773e80..a7bb72199 100644 --- a/cmd/web-handlers.go +++ b/cmd/web-handlers.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -36,6 +36,7 @@ import ( "github.com/minio/minio/browser" "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/errors" + "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" ) @@ -568,11 +569,11 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { } // Notify object created event. - eventNotify(eventData{ - Type: ObjectCreatedPut, - Bucket: bucket, - ObjInfo: objInfo, - ReqParams: extractReqParams(r), + sendEvent(eventArgs{ + EventName: event.ObjectCreatedPut, + BucketName: bucket, + Object: objInfo, + ReqParams: extractReqParams(r), }) } diff --git a/cmd/web-handlers_test.go b/cmd/web-handlers_test.go index 65e2e54e3..e8f3309d5 100644 --- a/cmd/web-handlers_test.go +++ b/cmd/web-handlers_test.go @@ -1,5 +1,5 @@ /* - * Minio Cloud Storage, (C) 2016, 2017 Minio, Inc. + * Minio Cloud Storage, (C) 2016, 2017, 2018 Minio, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -41,6 +41,21 @@ import ( "github.com/minio/minio/pkg/hash" ) +// Implement a dummy flush writer. +type flushWriter struct { + io.Writer +} + +// Flush writer is a dummy writer compatible with http.Flusher and http.ResponseWriter. +func (f *flushWriter) Flush() {} +func (f *flushWriter) Write(b []byte) (n int, err error) { return f.Writer.Write(b) } +func (f *flushWriter) Header() http.Header { return http.Header{} } +func (f *flushWriter) WriteHeader(code int) {} + +func newFlushWriter(writer io.Writer) http.ResponseWriter { + return &flushWriter{writer} +} + // Tests private function writeWebErrorResponse. func TestWriteWebErrorResponse(t *testing.T) { var buffer bytes.Buffer diff --git a/cmd/xl-sets.go b/cmd/xl-sets.go index 9e8056197..ef2cf2e5f 100644 --- a/cmd/xl-sets.go +++ b/cmd/xl-sets.go @@ -228,9 +228,9 @@ func newXLSets(endpoints EndpointList, format *formatXLV2, setCount int, drivesP return nil, err } - // Initialize a new event notifier. - if err := initEventNotifier(s); err != nil { - return nil, err + // Initialize notification system. + if err = globalNotificationSys.Init(s); err != nil { + return nil, fmt.Errorf("Unable to initialize event notification. %s", err) } // Start the disk monitoring and connect routine. diff --git a/pkg/event/arn.go b/pkg/event/arn.go new file mode 100644 index 000000000..44ecee54e --- /dev/null +++ b/pkg/event/arn.go @@ -0,0 +1,83 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "encoding/xml" + "strings" +) + +// ARN - SQS resource name representation. +type ARN struct { + TargetID + region string +} + +// String - returns string representation. +func (arn ARN) String() string { + if arn.TargetID.ID == "" && arn.TargetID.Name == "" && arn.region == "" { + return "" + } + + return "arn:minio:sqs:" + arn.region + ":" + arn.TargetID.String() +} + +// MarshalXML - encodes to XML data. +func (arn ARN) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(arn.String(), start) +} + +// UnmarshalXML - decodes XML data. +func (arn *ARN) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var s string + if err := d.DecodeElement(&s, &start); err != nil { + return err + } + + parsedARN, err := parseARN(s) + if err != nil { + return err + } + + *arn = *parsedARN + return nil +} + +// parseARN - parses string to ARN. +func parseARN(s string) (*ARN, error) { + // ARN must be in the format of arn:minio:sqs::: + if !strings.HasPrefix(s, "arn:minio:sqs:") { + return nil, &ErrInvalidARN{s} + } + + tokens := strings.Split(s, ":") + if len(tokens) != 6 { + return nil, &ErrInvalidARN{s} + } + + if tokens[4] == "" || tokens[5] == "" { + return nil, &ErrInvalidARN{s} + } + + return &ARN{ + region: tokens[3], + TargetID: TargetID{ + ID: tokens[4], + Name: tokens[5], + }, + }, nil +} diff --git a/pkg/event/arn_test.go b/pkg/event/arn_test.go new file mode 100644 index 000000000..dc1e0c7b6 --- /dev/null +++ b/pkg/event/arn_test.go @@ -0,0 +1,129 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "encoding/xml" + "reflect" + "testing" +) + +func TestARNString(t *testing.T) { + testCases := []struct { + arn ARN + expectedResult string + }{ + {ARN{}, ""}, + {ARN{TargetID{"1", "webhook"}, ""}, "arn:minio:sqs::1:webhook"}, + {ARN{TargetID{"1", "webhook"}, "us-east-1"}, "arn:minio:sqs:us-east-1:1:webhook"}, + } + + for i, testCase := range testCases { + result := testCase.arn.String() + + if result != testCase.expectedResult { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestARNMarshalXML(t *testing.T) { + testCases := []struct { + arn ARN + expectedData []byte + expectErr bool + }{ + {ARN{}, []byte(""), false}, + {ARN{TargetID{"1", "webhook"}, ""}, []byte("arn:minio:sqs::1:webhook"), false}, + {ARN{TargetID{"1", "webhook"}, "us-east-1"}, []byte("arn:minio:sqs:us-east-1:1:webhook"), false}, + } + + for i, testCase := range testCases { + data, err := xml.Marshal(testCase.arn) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(data, testCase.expectedData) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) + } + } + } +} + +func TestARNUnmarshalXML(t *testing.T) { + testCases := []struct { + data []byte + expectedARN *ARN + expectErr bool + }{ + {[]byte(""), nil, true}, + {[]byte("arn:minio:sqs:::"), nil, true}, + {[]byte("arn:minio:sqs::1:webhook"), &ARN{TargetID{"1", "webhook"}, ""}, false}, + {[]byte("arn:minio:sqs:us-east-1:1:webhook"), &ARN{TargetID{"1", "webhook"}, "us-east-1"}, false}, + } + + for i, testCase := range testCases { + arn := &ARN{} + err := xml.Unmarshal(testCase.data, &arn) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if *arn != *testCase.expectedARN { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedARN, arn) + } + } + } +} + +func TestParseARN(t *testing.T) { + testCases := []struct { + s string + expectedARN *ARN + expectErr bool + }{ + {"", nil, true}, + {"arn:minio:sqs:::", nil, true}, + {"arn:minio:sqs::1:webhook:remote", nil, true}, + {"arn:aws:sqs::1:webhook", nil, true}, + {"arn:minio:sns::1:webhook", nil, true}, + {"arn:minio:sqs::1:webhook", &ARN{TargetID{"1", "webhook"}, ""}, false}, + {"arn:minio:sqs:us-east-1:1:webhook", &ARN{TargetID{"1", "webhook"}, "us-east-1"}, false}, + } + + for i, testCase := range testCases { + arn, err := parseARN(testCase.s) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if *arn != *testCase.expectedARN { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedARN, arn) + } + } + } +} diff --git a/pkg/event/config.go b/pkg/event/config.go new file mode 100644 index 000000000..b10a45d2b --- /dev/null +++ b/pkg/event/config.go @@ -0,0 +1,292 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "encoding/xml" + "errors" + "io" + "reflect" + "strings" + "unicode/utf8" + + "github.com/minio/minio-go/pkg/set" +) + +// ValidateFilterRuleValue - checks if given value is filter rule value or not. +func ValidateFilterRuleValue(value string) error { + for _, segment := range strings.Split(value, "/") { + if segment == "." || segment == ".." { + return &ErrInvalidFilterValue{value} + } + } + + if len(value) <= 1024 && utf8.ValidString(value) && !strings.Contains(value, `\`) { + return nil + } + + return &ErrInvalidFilterValue{value} +} + +// FilterRule - represents elements inside ... +type FilterRule struct { + Name string `xml:"Name"` + Value string `xml:"Value"` +} + +// UnmarshalXML - decodes XML data. +func (filter *FilterRule) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + // Make subtype to avoid recursive UnmarshalXML(). + type filterRule FilterRule + rule := filterRule{} + if err := d.DecodeElement(&rule, &start); err != nil { + return err + } + + if rule.Name != "prefix" && rule.Name != "suffix" { + return &ErrInvalidFilterName{rule.Name} + } + + if err := ValidateFilterRuleValue(filter.Value); err != nil { + return err + } + + *filter = FilterRule(rule) + + return nil +} + +// FilterRuleList - represents multiple ... +type FilterRuleList struct { + Rules []FilterRule `xml:"FilterRule,omitempty"` +} + +// UnmarshalXML - decodes XML data. +func (ruleList *FilterRuleList) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + // Make subtype to avoid recursive UnmarshalXML(). + type filterRuleList FilterRuleList + rules := filterRuleList{} + if err := d.DecodeElement(&rules, &start); err != nil { + return err + } + + // FilterRuleList must have only one prefix and/or suffix. + nameSet := set.NewStringSet() + for _, rule := range rules.Rules { + if nameSet.Contains(rule.Name) { + if rule.Name == "prefix" { + return &ErrFilterNamePrefix{} + } + + return &ErrFilterNameSuffix{} + } + + nameSet.Add(rule.Name) + } + + *ruleList = FilterRuleList(rules) + return nil +} + +// Pattern - returns pattern using prefix and suffix values. +func (ruleList FilterRuleList) Pattern() string { + var prefix string + var suffix string + + for _, rule := range ruleList.Rules { + switch rule.Name { + case "prefix": + prefix = rule.Value + case "suffix": + suffix = rule.Value + } + } + + return NewPattern(prefix, suffix) +} + +// S3Key - represents elements inside ... +type S3Key struct { + RuleList FilterRuleList `xml:"S3Key,omitempty" json:"S3Key,omitempty"` +} + +// common - represents common elements inside , +// and +type common struct { + ID string `xml:"Id" json:"Id"` + Filter S3Key `xml:"Filter" json:"Filter"` + Events []Name `xml:"Event" json:"Event"` +} + +// Queue - represents elements inside +type Queue struct { + common + ARN ARN `xml:"Queue"` +} + +// UnmarshalXML - decodes XML data. +func (q *Queue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + // Make subtype to avoid recursive UnmarshalXML(). + type queue Queue + parsedQueue := queue{} + if err := d.DecodeElement(&parsedQueue, &start); err != nil { + return err + } + + if len(parsedQueue.Events) == 0 { + return errors.New("missing event name(s)") + } + + eventStringSet := set.NewStringSet() + for _, eventName := range parsedQueue.Events { + if eventStringSet.Contains(eventName.String()) { + return &ErrDuplicateEventName{eventName} + } + + eventStringSet.Add(eventName.String()) + } + + *q = Queue(parsedQueue) + + return nil +} + +// Validate - checks whether queue has valid values or not. +func (q Queue) Validate(region string, targetList *TargetList) error { + if region != "" && q.ARN.region != region { + return &ErrUnknownRegion{q.ARN.region} + } + + if !targetList.Exists(q.ARN.TargetID) { + return &ErrARNNotFound{q.ARN} + } + + return nil +} + +// SetRegion - sets region value to queue's ARN. +func (q *Queue) SetRegion(region string) { + q.ARN.region = region +} + +// ToRulesMap - converts Queue to RulesMap +func (q Queue) ToRulesMap() RulesMap { + pattern := q.Filter.RuleList.Pattern() + return NewRulesMap(q.Events, pattern, q.ARN.TargetID) +} + +// Unused. Available for completion. +type lambda struct { + common + ARN string `xml:"CloudFunction"` +} + +// Unused. Available for completion. +type topic struct { + common + ARN string `xml:"Topic" json:"Topic"` +} + +// Config - notification configuration described in +// http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html +type Config struct { + XMLName xml.Name `xml:"NotificationConfiguration"` + QueueList []Queue `xml:"QueueConfiguration,omitempty"` + LambdaList []lambda `xml:"CloudFunctionConfiguration,omitempty"` + TopicList []topic `xml:"TopicConfiguration,omitempty"` +} + +// UnmarshalXML - decodes XML data. +func (conf *Config) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + // Make subtype to avoid recursive UnmarshalXML(). + type config Config + parsedConfig := config{} + if err := d.DecodeElement(&parsedConfig, &start); err != nil { + return err + } + + if len(parsedConfig.QueueList) == 0 { + return errors.New("missing queue configuration(s)") + } + + for i, q1 := range parsedConfig.QueueList[:len(parsedConfig.QueueList)-1] { + for _, q2 := range parsedConfig.QueueList[i+1:] { + if reflect.DeepEqual(q1, q2) { + return &ErrDuplicateQueueConfiguration{q1} + } + } + } + + if len(parsedConfig.LambdaList) > 0 || len(parsedConfig.TopicList) > 0 { + return &ErrUnsupportedConfiguration{} + } + + *conf = Config(parsedConfig) + + return nil +} + +// Validate - checks whether config has valid values or not. +func (conf Config) Validate(region string, targetList *TargetList) error { + for _, queue := range conf.QueueList { + if err := queue.Validate(region, targetList); err != nil { + return err + } + + // TODO: Need to discuss/check why same ARN cannot be used in another queue configuration. + } + + return nil +} + +// SetRegion - sets region to all queue configuration. +func (conf *Config) SetRegion(region string) { + for i := range conf.QueueList { + conf.QueueList[i].SetRegion(region) + } +} + +// ToRulesMap - converts all queue configuration to RulesMap. +func (conf *Config) ToRulesMap() RulesMap { + rulesMap := make(RulesMap) + + for _, queue := range conf.QueueList { + rulesMap.Add(queue.ToRulesMap()) + } + + return rulesMap +} + +// ParseConfig - parses data in reader to notification configuration. +func ParseConfig(reader io.Reader, region string, targetList *TargetList) (*Config, error) { + var config Config + if err := xml.NewDecoder(reader).Decode(&config); err != nil { + return nil, err + } + + if len(config.QueueList) == 0 { + return nil, errors.New("missing queue configuration(s)") + } + + if err := config.Validate(region, targetList); err != nil { + return nil, err + } + + config.SetRegion(region) + + return &config, nil +} diff --git a/pkg/event/config_test.go b/pkg/event/config_test.go new file mode 100644 index 000000000..72b9e2436 --- /dev/null +++ b/pkg/event/config_test.go @@ -0,0 +1,961 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "encoding/xml" + "reflect" + "strings" + "testing" +) + +func TestValidateFilterRuleValue(t *testing.T) { + testCases := []struct { + value string + expectErr bool + }{ + {"foo/.", true}, + {"../foo", true}, + {`foo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/bazfoo/bar/baz`, true}, + {string([]byte{0xff, 0xfe, 0xfd}), true}, + {`foo\bar`, true}, + {"Hello/世界", false}, + } + + for i, testCase := range testCases { + err := ValidateFilterRuleValue(testCase.value) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestFilterRuleUnmarshalXML(t *testing.T) { + testCases := []struct { + data []byte + expectedResult *FilterRule + expectErr bool + }{ + {[]byte(``), nil, true}, + {[]byte(``), nil, true}, + {[]byte(``), nil, true}, + {[]byte(``), nil, true}, + {[]byte(`PrefixHello/世界`), nil, true}, + {[]byte(`endsfoo/bar`), nil, true}, + {[]byte(`prefixHello/世界`), &FilterRule{"prefix", "Hello/世界"}, false}, + {[]byte(`suffixfoo/bar`), &FilterRule{"suffix", "foo/bar"}, false}, + } + + for i, testCase := range testCases { + result := &FilterRule{} + err := xml.Unmarshal(testCase.data, result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestFilterRuleListUnmarshalXML(t *testing.T) { + testCases := []struct { + data []byte + expectedResult *FilterRuleList + expectErr bool + }{ + {[]byte(`suffixHello/世界suffixfoo/bar`), nil, true}, + {[]byte(`prefixHello/世界prefixfoo/bar`), nil, true}, + {[]byte(`prefixHello/世界`), &FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}}}, false}, + {[]byte(`suffixfoo/bar`), &FilterRuleList{[]FilterRule{{"suffix", "foo/bar"}}}, false}, + {[]byte(`prefixHello/世界suffixfoo/bar`), &FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}, {"suffix", "foo/bar"}}}, false}, + } + + for i, testCase := range testCases { + result := &FilterRuleList{} + err := xml.Unmarshal(testCase.data, result) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestFilterRuleListPattern(t *testing.T) { + testCases := []struct { + filterRuleList FilterRuleList + expectedResult string + }{ + {FilterRuleList{}, ""}, + {FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}}}, "Hello/世界*"}, + {FilterRuleList{[]FilterRule{{"suffix", "foo/bar"}}}, "*foo/bar"}, + {FilterRuleList{[]FilterRule{{"prefix", "Hello/世界"}, {"suffix", "foo/bar"}}}, "Hello/世界*foo/bar"}, + } + + for i, testCase := range testCases { + result := testCase.filterRuleList.Pattern() + + if result != testCase.expectedResult { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestQueueUnmarshalXML(t *testing.T) { + dataCase1 := []byte(` + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* +`) + + dataCase2 := []byte(` + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put +`) + + dataCase3 := []byte(` + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put + s3:ObjectCreated:Put +`) + + testCases := []struct { + data []byte + expectErr bool + }{ + {dataCase1, false}, + {dataCase2, false}, + {dataCase3, true}, + } + + for i, testCase := range testCases { + err := xml.Unmarshal(testCase.data, &Queue{}) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestQueueValidate(t *testing.T) { + var data []byte + data = []byte(` + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* +`) + queue1 := &Queue{} + if err := xml.Unmarshal(data, queue1); err != nil { + panic(err) + } + + data = []byte(` + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put +`) + queue2 := &Queue{} + if err := xml.Unmarshal(data, queue2); err != nil { + panic(err) + } + + data = []byte(` + + 1 + + arn:minio:sqs:eu-west-2:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* +`) + queue3 := &Queue{} + if err := xml.Unmarshal(data, queue3); err != nil { + panic(err) + } + + targetList1 := NewTargetList() + + targetList2 := NewTargetList() + if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { + panic(err) + } + + testCases := []struct { + queue *Queue + region string + targetList *TargetList + expectErr bool + }{ + {queue1, "eu-west-1", nil, true}, + {queue2, "us-east-1", targetList1, true}, + {queue3, "", targetList2, false}, + {queue2, "us-east-1", targetList2, false}, + } + + for i, testCase := range testCases { + err := testCase.queue.Validate(testCase.region, testCase.targetList) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestQueueSetRegion(t *testing.T) { + var data []byte + data = []byte(` + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* +`) + queue1 := &Queue{} + if err := xml.Unmarshal(data, queue1); err != nil { + panic(err) + } + + data = []byte(` + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs::1:webhook + s3:ObjectCreated:Put +`) + queue2 := &Queue{} + if err := xml.Unmarshal(data, queue2); err != nil { + panic(err) + } + + testCases := []struct { + queue *Queue + region string + expectedResult ARN + }{ + {queue1, "eu-west-1", ARN{TargetID{"1", "webhook"}, "eu-west-1"}}, + {queue1, "", ARN{TargetID{"1", "webhook"}, ""}}, + {queue2, "us-east-1", ARN{TargetID{"1", "webhook"}, "us-east-1"}}, + {queue2, "", ARN{TargetID{"1", "webhook"}, ""}}, + } + + for i, testCase := range testCases { + testCase.queue.SetRegion(testCase.region) + result := testCase.queue.ARN + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestQueueToRulesMap(t *testing.T) { + var data []byte + data = []byte(` + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* +`) + queueCase1 := &Queue{} + if err := xml.Unmarshal(data, queueCase1); err != nil { + panic(err) + } + + data = []byte(` + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put +`) + queueCase2 := &Queue{} + if err := xml.Unmarshal(data, queueCase2); err != nil { + panic(err) + } + + rulesMapCase1 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"}) + rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"1", "webhook"}) + + testCases := []struct { + queue *Queue + expectedResult RulesMap + }{ + {queueCase1, rulesMapCase1}, + {queueCase2, rulesMapCase2}, + } + + for i, testCase := range testCases { + result := testCase.queue.ToRulesMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestConfigUnmarshalXML(t *testing.T) { + dataCase1 := []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + +`) + + dataCase2 := []byte(` + + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put + + +`) + + dataCase3 := []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + + 2 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put + + +`) + + dataCase4 := []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + + 1 + + + + suffix + .jpg + + + + arn:aws:lambda:us-west-2:444455556666:cloud-function-A + s3:ObjectCreated:Put + + + arn:aws:sns:us-west-2:444455556666:sns-notification-one + s3:ObjectCreated:* + + +`) + testCases := []struct { + data []byte + expectErr bool + }{ + {dataCase1, false}, + {dataCase2, false}, + {dataCase3, false}, + {dataCase4, true}, + } + + for i, testCase := range testCases { + err := xml.Unmarshal(testCase.data, &Config{}) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestConfigValidate(t *testing.T) { + var data []byte + data = []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + +`) + config1 := &Config{} + if err := xml.Unmarshal(data, config1); err != nil { + panic(err) + } + + data = []byte(` + + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put + + +`) + config2 := &Config{} + if err := xml.Unmarshal(data, config2); err != nil { + panic(err) + } + + data = []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + + 2 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put + + +`) + config3 := &Config{} + if err := xml.Unmarshal(data, config3); err != nil { + panic(err) + } + + targetList1 := NewTargetList() + + targetList2 := NewTargetList() + if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { + panic(err) + } + + testCases := []struct { + config *Config + region string + targetList *TargetList + expectErr bool + }{ + {config1, "eu-west-1", nil, true}, + {config2, "us-east-1", targetList1, true}, + {config3, "", targetList2, false}, + {config2, "us-east-1", targetList2, false}, + } + + for i, testCase := range testCases { + err := testCase.config.Validate(testCase.region, testCase.targetList) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestConfigSetRegion(t *testing.T) { + var data []byte + data = []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + +`) + config1 := &Config{} + if err := xml.Unmarshal(data, config1); err != nil { + panic(err) + } + + data = []byte(` + + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs::1:webhook + s3:ObjectCreated:Put + + +`) + config2 := &Config{} + if err := xml.Unmarshal(data, config2); err != nil { + panic(err) + } + + data = []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + + 2 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:2:amqp + s3:ObjectCreated:Put + + +`) + config3 := &Config{} + if err := xml.Unmarshal(data, config3); err != nil { + panic(err) + } + + testCases := []struct { + config *Config + region string + expectedResult []ARN + }{ + {config1, "eu-west-1", []ARN{{TargetID{"1", "webhook"}, "eu-west-1"}}}, + {config1, "", []ARN{{TargetID{"1", "webhook"}, ""}}}, + {config2, "us-east-1", []ARN{{TargetID{"1", "webhook"}, "us-east-1"}}}, + {config2, "", []ARN{{TargetID{"1", "webhook"}, ""}}}, + {config3, "us-east-1", []ARN{{TargetID{"1", "webhook"}, "us-east-1"}, {TargetID{"2", "amqp"}, "us-east-1"}}}, + {config3, "", []ARN{{TargetID{"1", "webhook"}, ""}, {TargetID{"2", "amqp"}, ""}}}, + } + + for i, testCase := range testCases { + testCase.config.SetRegion(testCase.region) + result := []ARN{} + for _, queue := range testCase.config.QueueList { + result = append(result, queue.ARN) + } + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestConfigToRulesMap(t *testing.T) { + var data []byte + data = []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + +`) + config1 := &Config{} + if err := xml.Unmarshal(data, config1); err != nil { + panic(err) + } + + data = []byte(` + + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs::1:webhook + s3:ObjectCreated:Put + + +`) + config2 := &Config{} + if err := xml.Unmarshal(data, config2); err != nil { + panic(err) + } + + data = []byte(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + + 2 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:2:amqp + s3:ObjectCreated:Put + + +`) + config3 := &Config{} + if err := xml.Unmarshal(data, config3); err != nil { + panic(err) + } + + rulesMapCase1 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"}) + + rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"1", "webhook"}) + + rulesMapCase3 := NewRulesMap([]Name{ObjectAccessedAll, ObjectCreatedAll, ObjectRemovedAll}, "*", TargetID{"1", "webhook"}) + rulesMapCase3.add([]Name{ObjectCreatedPut}, "images/*jpg", TargetID{"2", "amqp"}) + + testCases := []struct { + config *Config + expectedResult RulesMap + }{ + {config1, rulesMapCase1}, + {config2, rulesMapCase2}, + {config3, rulesMapCase3}, + } + + for i, testCase := range testCases { + result := testCase.config.ToRulesMap() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestParseConfig(t *testing.T) { + reader1 := strings.NewReader(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + +`) + + reader2 := strings.NewReader(` + + + 1 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put + + +`) + + reader3 := strings.NewReader(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + + 2 + + + + prefix + images/ + + + suffix + jpg + + + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectCreated:Put + + +`) + + reader4 := strings.NewReader(` + + + 1 + + arn:minio:sqs:us-east-1:1:webhook + s3:ObjectAccessed:* + s3:ObjectCreated:* + s3:ObjectRemoved:* + + + 1 + + + + suffix + .jpg + + + + arn:aws:lambda:us-west-2:444455556666:cloud-function-A + s3:ObjectCreated:Put + + + arn:aws:sns:us-west-2:444455556666:sns-notification-one + s3:ObjectCreated:* + + +`) + + targetList1 := NewTargetList() + + targetList2 := NewTargetList() + if err := targetList2.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { + panic(err) + } + + testCases := []struct { + reader *strings.Reader + region string + targetList *TargetList + expectErr bool + }{ + {reader1, "eu-west-1", nil, true}, + {reader2, "us-east-1", targetList1, true}, + {reader4, "us-east-1", targetList1, true}, + {reader3, "", targetList2, false}, + {reader2, "us-east-1", targetList2, false}, + } + + for i, testCase := range testCases { + if _, err := testCase.reader.Seek(0, 0); err != nil { + panic(err) + } + _, err := ParseConfig(testCase.reader, testCase.region, testCase.targetList) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} diff --git a/pkg/event/errors.go b/pkg/event/errors.go new file mode 100644 index 000000000..0c584a0c9 --- /dev/null +++ b/pkg/event/errors.go @@ -0,0 +1,152 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "encoding/xml" + "fmt" +) + +// IsEventError - checks whether given error is event error or not. +func IsEventError(err error) bool { + switch err.(type) { + case ErrInvalidFilterName, *ErrInvalidFilterName: + return true + case ErrFilterNamePrefix, *ErrFilterNamePrefix: + return true + case ErrFilterNameSuffix, *ErrFilterNameSuffix: + return true + case ErrInvalidFilterValue, *ErrInvalidFilterValue: + return true + case ErrDuplicateEventName, *ErrDuplicateEventName: + return true + case ErrUnsupportedConfiguration, *ErrUnsupportedConfiguration: + return true + case ErrDuplicateQueueConfiguration, *ErrDuplicateQueueConfiguration: + return true + case ErrUnknownRegion, *ErrUnknownRegion: + return true + case ErrARNNotFound, *ErrARNNotFound: + return true + case ErrInvalidARN, *ErrInvalidARN: + return true + case ErrInvalidEventName, *ErrInvalidEventName: + return true + } + + return false +} + +// ErrInvalidFilterName - invalid filter name error. +type ErrInvalidFilterName struct { + FilterName string +} + +func (err ErrInvalidFilterName) Error() string { + return fmt.Sprintf("invalid filter name '%v'", err.FilterName) +} + +// ErrFilterNamePrefix - more than one prefix usage error. +type ErrFilterNamePrefix struct{} + +func (err ErrFilterNamePrefix) Error() string { + return fmt.Sprintf("more than one prefix in filter rule") +} + +// ErrFilterNameSuffix - more than one suffix usage error. +type ErrFilterNameSuffix struct{} + +func (err ErrFilterNameSuffix) Error() string { + return fmt.Sprintf("more than one suffix in filter rule") +} + +// ErrInvalidFilterValue - invalid filter value error. +type ErrInvalidFilterValue struct { + FilterValue string +} + +func (err ErrInvalidFilterValue) Error() string { + return fmt.Sprintf("invalid filter value '%v'", err.FilterValue) +} + +// ErrDuplicateEventName - duplicate event name error. +type ErrDuplicateEventName struct { + EventName Name +} + +func (err ErrDuplicateEventName) Error() string { + return fmt.Sprintf("duplicate event name '%v' found", err.EventName) +} + +// ErrUnsupportedConfiguration - unsupported configuration error. +type ErrUnsupportedConfiguration struct{} + +func (err ErrUnsupportedConfiguration) Error() string { + return "topic or cloud function configuration is not supported" +} + +// ErrDuplicateQueueConfiguration - duplicate queue configuration error. +type ErrDuplicateQueueConfiguration struct { + Queue Queue +} + +func (err ErrDuplicateQueueConfiguration) Error() string { + var message string + if data, xerr := xml.Marshal(err.Queue); xerr != nil { + message = fmt.Sprintf("%+v", err.Queue) + } else { + message = string(data) + } + + return fmt.Sprintf("duplicate queue configuration %v", message) +} + +// ErrUnknownRegion - unknown region error. +type ErrUnknownRegion struct { + Region string +} + +func (err ErrUnknownRegion) Error() string { + return fmt.Sprintf("unknown region '%v'", err.Region) +} + +// ErrARNNotFound - ARN not found error. +type ErrARNNotFound struct { + ARN ARN +} + +func (err ErrARNNotFound) Error() string { + return fmt.Sprintf("ARN '%v' not found", err.ARN) +} + +// ErrInvalidARN - invalid ARN error. +type ErrInvalidARN struct { + ARN string +} + +func (err ErrInvalidARN) Error() string { + return fmt.Sprintf("invalid ARN '%v'", err.ARN) +} + +// ErrInvalidEventName - invalid event name error. +type ErrInvalidEventName struct { + Name string +} + +func (err ErrInvalidEventName) Error() string { + return fmt.Sprintf("invalid event name '%v'", err.Name) +} diff --git a/pkg/event/event.go b/pkg/event/event.go new file mode 100644 index 000000000..288b82153 --- /dev/null +++ b/pkg/event/event.go @@ -0,0 +1,88 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +const ( + // NamespaceFormat - namespace log format used in some event targets. + NamespaceFormat = "namespace" + + // AccessFormat - access log format used in some event targets. + AccessFormat = "access" + + // AMZTimeFormat - event time format. + AMZTimeFormat = "2006-01-02T15:04:05Z" +) + +// Identity represents access key who caused the event. +type Identity struct { + PrincipalID string `json:"principalId"` +} + +// Bucket represents bucket metadata of the event. +type Bucket struct { + Name string `json:"name"` + OwnerIdentity Identity `json:"ownerIdentity"` + ARN string `json:"arn"` +} + +// Object represents object metadata of the event. +type Object struct { + Key string `json:"key"` + Size int64 `json:"size,omitempty"` + ETag string `json:"eTag,omitempty"` + ContentType string `json:"contentType,omitempty"` + UserMetadata map[string]string `json:"userMetadata,omitempty"` + VersionID string `json:"versionId,omitempty"` + Sequencer string `json:"sequencer"` +} + +// Metadata represents event metadata. +type Metadata struct { + SchemaVersion string `json:"s3SchemaVersion"` + ConfigurationID string `json:"configurationId"` + Bucket Bucket `json:"bucket"` + Object Object `json:"object"` +} + +// Source represents client information who triggered the event. +type Source struct { + Host string `json:"host"` + Port string `json:"port"` + UserAgent string `json:"userAgent"` +} + +// Event represents event notification information defined in +// http://docs.aws.amazon.com/AmazonS3/latest/dev/notification-content-structure.html. +type Event struct { + EventVersion string `json:"eventVersion"` + EventSource string `json:"eventSource"` + AwsRegion string `json:"awsRegion"` + EventTime string `json:"eventTime"` + EventName Name `json:"eventName"` + UserIdentity Identity `json:"userIdentity"` + RequestParameters map[string]string `json:"requestParameters"` + ResponseElements map[string]string `json:"responseElements"` + S3 Metadata `json:"s3"` + Source Source `json:"source"` +} + +// Log represents event information for some event targets. +type Log struct { + EventName Name + Key string + Records []Event +} diff --git a/pkg/event/name.go b/pkg/event/name.go new file mode 100644 index 000000000..24d78176e --- /dev/null +++ b/pkg/event/name.go @@ -0,0 +1,152 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "encoding/json" + "encoding/xml" +) + +// Name - event type enum. +// Refer http://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html#notification-how-to-event-types-and-destinations +type Name int + +// Values of Name +const ( + ObjectAccessedAll Name = 1 + iota + ObjectAccessedGet + ObjectAccessedHead + ObjectCreatedAll + ObjectCreatedCompleteMultipartUpload + ObjectCreatedCopy + ObjectCreatedPost + ObjectCreatedPut + ObjectRemovedAll + ObjectRemovedDelete +) + +// Expand - returns expanded values of abbreviated event type. +func (name Name) Expand() []Name { + switch name { + case ObjectAccessedAll: + return []Name{ObjectAccessedGet, ObjectAccessedHead} + case ObjectCreatedAll: + return []Name{ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut} + case ObjectRemovedAll: + return []Name{ObjectRemovedDelete} + default: + return []Name{name} + } +} + +// String - returns string representation of event type. +func (name Name) String() string { + switch name { + case ObjectAccessedAll: + return "s3:ObjectAccessed:*" + case ObjectAccessedGet: + return "s3:ObjectAccessed:Get" + case ObjectAccessedHead: + return "s3:ObjectAccessed:Head" + case ObjectCreatedAll: + return "s3:ObjectCreated:*" + case ObjectCreatedCompleteMultipartUpload: + return "s3:ObjectCreated:CompleteMultipartUpload" + case ObjectCreatedCopy: + return "s3:ObjectCreated:Copy" + case ObjectCreatedPost: + return "s3:ObjectCreated:Post" + case ObjectCreatedPut: + return "s3:ObjectCreated:Put" + case ObjectRemovedAll: + return "s3:ObjectRemoved:*" + case ObjectRemovedDelete: + return "s3:ObjectRemoved:Delete" + } + + return "" +} + +// MarshalXML - encodes to XML data. +func (name Name) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + return e.EncodeElement(name.String(), start) +} + +// UnmarshalXML - decodes XML data. +func (name *Name) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + var s string + if err := d.DecodeElement(&s, &start); err != nil { + return err + } + + eventName, err := ParseName(s) + if err != nil { + return err + } + + *name = eventName + return nil +} + +// MarshalJSON - encodes to JSON data. +func (name Name) MarshalJSON() ([]byte, error) { + return json.Marshal(name.String()) +} + +// UnmarshalJSON - decodes JSON data. +func (name *Name) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + eventName, err := ParseName(s) + if err != nil { + return err + } + + *name = eventName + return nil +} + +// ParseName - parses string to Name. +func ParseName(s string) (Name, error) { + switch s { + case "s3:ObjectAccessed:*": + return ObjectAccessedAll, nil + case "s3:ObjectAccessed:Get": + return ObjectAccessedGet, nil + case "s3:ObjectAccessed:Head": + return ObjectAccessedHead, nil + case "s3:ObjectCreated:*": + return ObjectCreatedAll, nil + case "s3:ObjectCreated:CompleteMultipartUpload": + return ObjectCreatedCompleteMultipartUpload, nil + case "s3:ObjectCreated:Copy": + return ObjectCreatedCopy, nil + case "s3:ObjectCreated:Post": + return ObjectCreatedPost, nil + case "s3:ObjectCreated:Put": + return ObjectCreatedPut, nil + case "s3:ObjectRemoved:*": + return ObjectRemovedAll, nil + case "s3:ObjectRemoved:Delete": + return ObjectRemovedDelete, nil + default: + return 0, &ErrInvalidEventName{s} + } +} diff --git a/pkg/event/name_test.go b/pkg/event/name_test.go new file mode 100644 index 000000000..ad8d30681 --- /dev/null +++ b/pkg/event/name_test.go @@ -0,0 +1,220 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "encoding/json" + "encoding/xml" + "reflect" + "testing" +) + +func TestNameExpand(t *testing.T) { + testCases := []struct { + name Name + expectedResult []Name + }{ + {ObjectAccessedAll, []Name{ObjectAccessedGet, ObjectAccessedHead}}, + {ObjectCreatedAll, []Name{ObjectCreatedCompleteMultipartUpload, ObjectCreatedCopy, ObjectCreatedPost, ObjectCreatedPut}}, + {ObjectRemovedAll, []Name{ObjectRemovedDelete}}, + {ObjectAccessedHead, []Name{ObjectAccessedHead}}, + } + + for i, testCase := range testCases { + result := testCase.name.Expand() + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestNameString(t *testing.T) { + var blankName Name + + testCases := []struct { + name Name + expectedResult string + }{ + {ObjectAccessedAll, "s3:ObjectAccessed:*"}, + {ObjectAccessedGet, "s3:ObjectAccessed:Get"}, + {ObjectAccessedHead, "s3:ObjectAccessed:Head"}, + {ObjectCreatedAll, "s3:ObjectCreated:*"}, + {ObjectCreatedCompleteMultipartUpload, "s3:ObjectCreated:CompleteMultipartUpload"}, + {ObjectCreatedCopy, "s3:ObjectCreated:Copy"}, + {ObjectCreatedPost, "s3:ObjectCreated:Post"}, + {ObjectCreatedPut, "s3:ObjectCreated:Put"}, + {ObjectRemovedAll, "s3:ObjectRemoved:*"}, + {ObjectRemovedDelete, "s3:ObjectRemoved:Delete"}, + {blankName, ""}, + } + + for i, testCase := range testCases { + result := testCase.name.String() + + if result != testCase.expectedResult { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestNameMarshalXML(t *testing.T) { + var blankName Name + + testCases := []struct { + name Name + expectedData []byte + expectErr bool + }{ + {ObjectAccessedAll, []byte("s3:ObjectAccessed:*"), false}, + {ObjectRemovedDelete, []byte("s3:ObjectRemoved:Delete"), false}, + {blankName, []byte(""), false}, + } + + for i, testCase := range testCases { + data, err := xml.Marshal(testCase.name) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(data, testCase.expectedData) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) + } + } + } +} + +func TestNameUnmarshalXML(t *testing.T) { + var blankName Name + + testCases := []struct { + data []byte + expectedName Name + expectErr bool + }{ + {[]byte("s3:ObjectAccessed:*"), ObjectAccessedAll, false}, + {[]byte("s3:ObjectRemoved:Delete"), ObjectRemovedDelete, false}, + {[]byte(""), blankName, true}, + } + + for i, testCase := range testCases { + var name Name + err := xml.Unmarshal(testCase.data, &name) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(name, testCase.expectedName) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name) + } + } + } +} + +func TestNameMarshalJSON(t *testing.T) { + var blankName Name + + testCases := []struct { + name Name + expectedData []byte + expectErr bool + }{ + {ObjectAccessedAll, []byte(`"s3:ObjectAccessed:*"`), false}, + {ObjectRemovedDelete, []byte(`"s3:ObjectRemoved:Delete"`), false}, + {blankName, []byte(`""`), false}, + } + + for i, testCase := range testCases { + data, err := json.Marshal(testCase.name) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(data, testCase.expectedData) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) + } + } + } +} + +func TestNameUnmarshalJSON(t *testing.T) { + var blankName Name + + testCases := []struct { + data []byte + expectedName Name + expectErr bool + }{ + {[]byte(`"s3:ObjectAccessed:*"`), ObjectAccessedAll, false}, + {[]byte(`"s3:ObjectRemoved:Delete"`), ObjectRemovedDelete, false}, + {[]byte(`""`), blankName, true}, + } + + for i, testCase := range testCases { + var name Name + err := json.Unmarshal(testCase.data, &name) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(name, testCase.expectedName) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name) + } + } + } +} + +func TestParseName(t *testing.T) { + var blankName Name + + testCases := []struct { + s string + expectedName Name + expectErr bool + }{ + {"s3:ObjectAccessed:*", ObjectAccessedAll, false}, + {"s3:ObjectRemoved:Delete", ObjectRemovedDelete, false}, + {"", blankName, true}, + } + + for i, testCase := range testCases { + name, err := ParseName(testCase.s) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(name, testCase.expectedName) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedName, name) + } + } + } +} diff --git a/pkg/event/rules.go b/pkg/event/rules.go new file mode 100644 index 000000000..bbec94124 --- /dev/null +++ b/pkg/event/rules.go @@ -0,0 +1,102 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "strings" + + "github.com/minio/minio/pkg/wildcard" +) + +// NewPattern - create new pattern for prefix/suffix. +func NewPattern(prefix, suffix string) (pattern string) { + if prefix != "" { + if !strings.HasSuffix(prefix, "*") { + prefix += "*" + } + + pattern = prefix + } + + if suffix != "" { + if !strings.HasPrefix(suffix, "*") { + suffix = "*" + suffix + } + + pattern += suffix + } + + pattern = strings.Replace(pattern, "**", "*", -1) + + return pattern +} + +// Rules - event rules +type Rules map[string]TargetIDSet + +// Add - adds pattern and target ID. +func (rules Rules) Add(pattern string, targetID TargetID) { + rules[pattern] = NewTargetIDSet(targetID).Union(rules[pattern]) +} + +// Match - returns TargetIDSet matching object name in rules. +func (rules Rules) Match(objectName string) TargetIDSet { + targetIDs := NewTargetIDSet() + + for pattern, targetIDSet := range rules { + if wildcard.MatchSimple(pattern, objectName) { + targetIDs = targetIDs.Union(targetIDSet) + } + } + + return targetIDs +} + +// Clone - returns copy of this rules. +func (rules Rules) Clone() Rules { + rulesCopy := make(Rules) + + for pattern, targetIDSet := range rules { + rulesCopy[pattern] = targetIDSet.Clone() + } + + return rulesCopy +} + +// Union - returns union with given rules as new rules. +func (rules Rules) Union(rules2 Rules) Rules { + nrules := rules.Clone() + + for pattern, targetIDSet := range rules2 { + nrules[pattern] = nrules[pattern].Union(targetIDSet) + } + + return nrules +} + +// Difference - returns diffrence with given rules as new rules. +func (rules Rules) Difference(rules2 Rules) Rules { + nrules := make(Rules) + + for pattern, targetIDSet := range rules { + if nv := targetIDSet.Difference(rules2[pattern]); len(nv) > 0 { + nrules[pattern] = nv + } + } + + return nrules +} diff --git a/pkg/event/rules_test.go b/pkg/event/rules_test.go new file mode 100644 index 000000000..dea4ecba3 --- /dev/null +++ b/pkg/event/rules_test.go @@ -0,0 +1,275 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "reflect" + "testing" +) + +func TestNewPattern(t *testing.T) { + testCases := []struct { + prefix string + suffix string + expectedResult string + }{ + {"", "", ""}, + {"*", "", "*"}, + {"", "*", "*"}, + {"images/", "", "images/*"}, + {"images/*", "", "images/*"}, + {"", "jpg", "*jpg"}, + {"", "*jpg", "*jpg"}, + {"images/", "jpg", "images/*jpg"}, + {"images/*", "jpg", "images/*jpg"}, + {"images/", "*jpg", "images/*jpg"}, + {"images/*", "*jpg", "images/*jpg"}, + {"201*/images/", "jpg", "201*/images/*jpg"}, + } + + for i, testCase := range testCases { + result := NewPattern(testCase.prefix, testCase.suffix) + + if result != testCase.expectedResult { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestRulesAdd(t *testing.T) { + rulesCase1 := make(Rules) + + rulesCase2 := make(Rules) + rulesCase2.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + + rulesCase3 := make(Rules) + rulesCase3.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + + rulesCase4 := make(Rules) + rulesCase4.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"}) + + rulesCase5 := make(Rules) + + rulesCase6 := make(Rules) + rulesCase6.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"}) + + rulesCase7 := make(Rules) + rulesCase7.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"}) + + rulesCase8 := make(Rules) + rulesCase8.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + + testCases := []struct { + rules Rules + pattern string + targetID TargetID + expectedResult int + }{ + {rulesCase1, NewPattern("*", ""), TargetID{"1", "webhook"}, 1}, + {rulesCase2, NewPattern("*", ""), TargetID{"2", "amqp"}, 2}, + {rulesCase3, NewPattern("2010*", ""), TargetID{"1", "webhook"}, 1}, + {rulesCase4, NewPattern("*", ""), TargetID{"1", "webhook"}, 2}, + {rulesCase5, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 1}, + {rulesCase6, NewPattern("", "*"), TargetID{"2", "amqp"}, 2}, + {rulesCase7, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 1}, + {rulesCase8, NewPattern("", "*.jpg"), TargetID{"1", "webhook"}, 2}, + } + + for i, testCase := range testCases { + testCase.rules.Add(testCase.pattern, testCase.targetID) + result := len(testCase.rules) + + if result != testCase.expectedResult { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestRulesMatch(t *testing.T) { + rulesCase1 := make(Rules) + + rulesCase2 := make(Rules) + rulesCase2.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) + + rulesCase3 := make(Rules) + rulesCase3.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + rulesCase3.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"}) + + rulesCase4 := make(Rules) + rulesCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + + testCases := []struct { + rules Rules + objectName string + expectedResult TargetIDSet + }{ + {rulesCase1, "photos.jpg", NewTargetIDSet()}, + {rulesCase2, "photos.jpg", NewTargetIDSet(TargetID{"1", "webhook"})}, + {rulesCase3, "2010/photos.jpg", NewTargetIDSet(TargetID{"1", "webhook"})}, + {rulesCase4, "2000/photos.jpg", NewTargetIDSet()}, + } + + for i, testCase := range testCases { + result := testCase.rules.Match(testCase.objectName) + + if !reflect.DeepEqual(testCase.expectedResult, result) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestRulesClone(t *testing.T) { + rulesCase1 := make(Rules) + + rulesCase2 := make(Rules) + rulesCase2.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + + rulesCase3 := make(Rules) + rulesCase3.Add(NewPattern("", "*.jpg"), TargetID{"1", "webhook"}) + + testCases := []struct { + rules Rules + prefix string + targetID TargetID + }{ + {rulesCase1, "2010*", TargetID{"1", "webhook"}}, + {rulesCase2, "2000*", TargetID{"2", "amqp"}}, + {rulesCase3, "2010*", TargetID{"1", "webhook"}}, + } + + for i, testCase := range testCases { + result := testCase.rules.Clone() + + if !reflect.DeepEqual(result, testCase.rules) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.rules, result) + } + + result.Add(NewPattern(testCase.prefix, ""), testCase.targetID) + if reflect.DeepEqual(result, testCase.rules) { + t.Fatalf("test %v: result: expected: not equal, got: equal", i+1) + } + } +} + +func TestRulesUnion(t *testing.T) { + rulesCase1 := make(Rules) + rules2Case1 := make(Rules) + expectedResultCase1 := make(Rules) + + rulesCase2 := make(Rules) + rules2Case2 := make(Rules) + rules2Case2.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) + expectedResultCase2 := make(Rules) + expectedResultCase2.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) + + rulesCase3 := make(Rules) + rulesCase3.Add(NewPattern("", "*"), TargetID{"1", "webhook"}) + rules2Case3 := make(Rules) + expectedResultCase3 := make(Rules) + expectedResultCase3.Add(NewPattern("", "*"), TargetID{"1", "webhook"}) + + rulesCase4 := make(Rules) + rulesCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + rules2Case4 := make(Rules) + rules2Case4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + expectedResultCase4 := make(Rules) + expectedResultCase4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + + rulesCase5 := make(Rules) + rulesCase5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + rulesCase5.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"}) + rules2Case5 := make(Rules) + rules2Case5.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) + expectedResultCase5 := make(Rules) + expectedResultCase5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + expectedResultCase5.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"}) + expectedResultCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) + + testCases := []struct { + rules Rules + rules2 Rules + expectedResult Rules + }{ + {rulesCase1, rules2Case1, expectedResultCase1}, + {rulesCase2, rules2Case2, expectedResultCase2}, + {rulesCase3, rules2Case3, expectedResultCase3}, + {rulesCase4, rules2Case4, expectedResultCase4}, + {rulesCase5, rules2Case5, expectedResultCase5}, + } + + for i, testCase := range testCases { + result := testCase.rules.Union(testCase.rules2) + + if !reflect.DeepEqual(testCase.expectedResult, result) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestRulesDifference(t *testing.T) { + rulesCase1 := make(Rules) + rules2Case1 := make(Rules) + expectedResultCase1 := make(Rules) + + rulesCase2 := make(Rules) + rules2Case2 := make(Rules) + rules2Case2.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) + expectedResultCase2 := make(Rules) + + rulesCase3 := make(Rules) + rulesCase3.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) + rules2Case3 := make(Rules) + expectedResultCase3 := make(Rules) + expectedResultCase3.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) + + rulesCase4 := make(Rules) + rulesCase4.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) + rules2Case4 := make(Rules) + rules2Case4.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + rules2Case4.Add(NewPattern("", "*.png"), TargetID{"2", "amqp"}) + expectedResultCase4 := make(Rules) + expectedResultCase4.Add(NewPattern("*", "*"), TargetID{"1", "webhook"}) + + rulesCase5 := make(Rules) + rulesCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) + rulesCase5.Add(NewPattern("", "*"), TargetID{"2", "amqp"}) + rules2Case5 := make(Rules) + rules2Case5.Add(NewPattern("2010*", ""), TargetID{"1", "webhook"}) + rules2Case5.Add(NewPattern("", "*"), TargetID{"2", "amqp"}) + expectedResultCase5 := make(Rules) + expectedResultCase5.Add(NewPattern("*", ""), TargetID{"1", "webhook"}) + + testCases := []struct { + rules Rules + rules2 Rules + expectedResult Rules + }{ + {rulesCase1, rules2Case1, expectedResultCase1}, + {rulesCase2, rules2Case2, expectedResultCase2}, + {rulesCase3, rules2Case3, expectedResultCase3}, + {rulesCase4, rules2Case4, expectedResultCase4}, + {rulesCase5, rules2Case5, expectedResultCase5}, + } + + for i, testCase := range testCases { + result := testCase.rules.Difference(testCase.rules2) + + if !reflect.DeepEqual(testCase.expectedResult, result) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} diff --git a/pkg/event/rulesmap.go b/pkg/event/rulesmap.go new file mode 100644 index 000000000..84b2acd5c --- /dev/null +++ b/pkg/event/rulesmap.go @@ -0,0 +1,78 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +// RulesMap - map of rules for every event name. +type RulesMap map[Name]Rules + +// add - adds event names, prefixes, suffixes and target ID to rules map. +func (rulesMap RulesMap) add(eventNames []Name, pattern string, targetID TargetID) { + rules := make(Rules) + rules.Add(pattern, targetID) + + for _, eventName := range eventNames { + for _, name := range eventName.Expand() { + rulesMap[name] = rulesMap[name].Union(rules) + } + } +} + +// Clone - returns copy of this rules map. +func (rulesMap RulesMap) Clone() RulesMap { + rulesMapCopy := make(RulesMap) + + for eventName, rules := range rulesMap { + rulesMapCopy[eventName] = rules.Clone() + } + + return rulesMapCopy +} + +// Add - adds given rules map. +func (rulesMap RulesMap) Add(rulesMap2 RulesMap) { + for eventName, rules := range rulesMap2 { + rulesMap[eventName] = rules.Union(rulesMap[eventName]) + } +} + +// Remove - removes given rules map. +func (rulesMap RulesMap) Remove(rulesMap2 RulesMap) { + for eventName, rules := range rulesMap { + if nr := rules.Difference(rulesMap2[eventName]); len(nr) != 0 { + rulesMap[eventName] = nr + } else { + delete(rulesMap, eventName) + } + } +} + +// Match - returns TargetIDSet matching object name and event name in rules map. +func (rulesMap RulesMap) Match(eventName Name, objectName string) TargetIDSet { + return rulesMap[eventName].Match(objectName) +} + +// NewRulesMap - creates new rules map with given values. +func NewRulesMap(eventNames []Name, pattern string, targetID TargetID) RulesMap { + // If pattern is empty, add '*' wildcard to match all. + if pattern == "" { + pattern = "*" + } + + rulesMap := make(RulesMap) + rulesMap.add(eventNames, pattern, targetID) + return rulesMap +} diff --git a/pkg/event/rulesmap_test.go b/pkg/event/rulesmap_test.go new file mode 100644 index 000000000..e4d02bf3c --- /dev/null +++ b/pkg/event/rulesmap_test.go @@ -0,0 +1,182 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "reflect" + "testing" +) + +func TestRulesMapClone(t *testing.T) { + rulesMapCase1 := make(RulesMap) + rulesMapToAddCase1 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + + rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) + + rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) + rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + + testCases := []struct { + rulesMap RulesMap + rulesMapToAdd RulesMap + }{ + {rulesMapCase1, rulesMapToAddCase1}, + {rulesMapCase2, rulesMapToAddCase2}, + {rulesMapCase3, rulesMapToAddCase3}, + } + + for i, testCase := range testCases { + result := testCase.rulesMap.Clone() + + if !reflect.DeepEqual(result, testCase.rulesMap) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.rulesMap, result) + } + + result.Add(testCase.rulesMapToAdd) + if reflect.DeepEqual(result, testCase.rulesMap) { + t.Fatalf("test %v: result: expected: not equal, got: equal", i+1) + } + } +} + +func TestRulesMapAdd(t *testing.T) { + rulesMapCase1 := make(RulesMap) + rulesMapToAddCase1 := make(RulesMap) + expectedResultCase1 := make(RulesMap) + + rulesMapCase2 := make(RulesMap) + rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + expectedResultCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + + rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) + expectedResultCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) + expectedResultCase3.add([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + + testCases := []struct { + rulesMap RulesMap + rulesMapToAdd RulesMap + expectedResult RulesMap + }{ + {rulesMapCase1, rulesMapToAddCase1, expectedResultCase1}, + {rulesMapCase2, rulesMapToAddCase2, expectedResultCase2}, + {rulesMapCase3, rulesMapToAddCase3, expectedResultCase3}, + } + + for i, testCase := range testCases { + testCase.rulesMap.Add(testCase.rulesMapToAdd) + + if !reflect.DeepEqual(testCase.rulesMap, testCase.expectedResult) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.rulesMap) + } + } +} + +func TestRulesMapRemove(t *testing.T) { + rulesMapCase1 := make(RulesMap) + rulesMapToAddCase1 := make(RulesMap) + expectedResultCase1 := make(RulesMap) + + rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + rulesMapToAddCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + expectedResultCase2 := make(RulesMap) + + rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) + rulesMapCase3.add([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + rulesMapToAddCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) + expectedResultCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + + testCases := []struct { + rulesMap RulesMap + rulesMapToAdd RulesMap + expectedResult RulesMap + }{ + {rulesMapCase1, rulesMapToAddCase1, expectedResultCase1}, + {rulesMapCase2, rulesMapToAddCase2, expectedResultCase2}, + {rulesMapCase3, rulesMapToAddCase3, expectedResultCase3}, + } + + for i, testCase := range testCases { + testCase.rulesMap.Remove(testCase.rulesMapToAdd) + + if !reflect.DeepEqual(testCase.rulesMap, testCase.expectedResult) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, testCase.rulesMap) + } + } +} + +func TestRulesMapMatch(t *testing.T) { + rulesMapCase1 := make(RulesMap) + + rulesMapCase2 := NewRulesMap([]Name{ObjectCreatedAll}, "*", TargetID{"1", "webhook"}) + + rulesMapCase3 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) + + rulesMapCase4 := NewRulesMap([]Name{ObjectCreatedAll}, "2010*.jpg", TargetID{"1", "webhook"}) + rulesMapCase4.add([]Name{ObjectCreatedAll}, "*", TargetID{"2", "amqp"}) + + testCases := []struct { + rulesMap RulesMap + eventName Name + objectName string + expectedResult TargetIDSet + }{ + {rulesMapCase1, ObjectCreatedPut, "2010/photo.jpg", NewTargetIDSet()}, + {rulesMapCase2, ObjectCreatedPut, "2010/photo.jpg", NewTargetIDSet(TargetID{"1", "webhook"})}, + {rulesMapCase3, ObjectCreatedPut, "2000/photo.png", NewTargetIDSet()}, + {rulesMapCase4, ObjectCreatedPut, "2000/photo.png", NewTargetIDSet(TargetID{"2", "amqp"})}, + } + + for i, testCase := range testCases { + result := testCase.rulesMap.Match(testCase.eventName, testCase.objectName) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestNewRulesMap(t *testing.T) { + rulesMapCase1 := make(RulesMap) + rulesMapCase1.add([]Name{ObjectAccessedGet, ObjectAccessedHead}, "*", TargetID{"1", "webhook"}) + + rulesMapCase2 := make(RulesMap) + rulesMapCase2.add([]Name{ObjectAccessedGet, ObjectAccessedHead, ObjectCreatedPut}, "*", TargetID{"1", "webhook"}) + + rulesMapCase3 := make(RulesMap) + rulesMapCase3.add([]Name{ObjectRemovedDelete}, "2010*.jpg", TargetID{"1", "webhook"}) + + testCases := []struct { + eventNames []Name + pattern string + targetID TargetID + expectedResult RulesMap + }{ + {[]Name{ObjectAccessedAll}, "", TargetID{"1", "webhook"}, rulesMapCase1}, + {[]Name{ObjectAccessedAll, ObjectCreatedPut}, "", TargetID{"1", "webhook"}, rulesMapCase2}, + {[]Name{ObjectRemovedDelete}, "2010*.jpg", TargetID{"1", "webhook"}, rulesMapCase3}, + } + + for i, testCase := range testCases { + result := NewRulesMap(testCase.eventNames, testCase.pattern, testCase.targetID) + + if !reflect.DeepEqual(result, testCase.expectedResult) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} diff --git a/pkg/event/target/amqp.go b/pkg/event/target/amqp.go new file mode 100644 index 000000000..aaea20f4c --- /dev/null +++ b/pkg/event/target/amqp.go @@ -0,0 +1,150 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package target + +import ( + "encoding/json" + "net" + "net/url" + "sync" + + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" + "github.com/streadway/amqp" +) + +// AMQPArgs - AMQP target arguments. +type AMQPArgs struct { + Enable bool `json:"enable"` + URL xnet.URL `json:"url"` + Exchange string `json:"exchange"` + RoutingKey string `json:"routingKey"` + ExchangeType string `json:"exchangeType"` + DeliveryMode uint8 `json:"deliveryMode"` + Mandatory bool `json:"mandatory"` + Immediate bool `json:"immediate"` + Durable bool `json:"durable"` + Internal bool `json:"internal"` + NoWait bool `json:"noWait"` + AutoDeleted bool `json:"autoDeleted"` +} + +// AMQPTarget - AMQP target +type AMQPTarget struct { + id event.TargetID + args AMQPArgs + conn *amqp.Connection + connMutex sync.Mutex +} + +// ID - returns TargetID. +func (target *AMQPTarget) ID() event.TargetID { + return target.id +} + +func (target *AMQPTarget) channel() (*amqp.Channel, error) { + isAMQPClosedErr := func(err error) bool { + if err == amqp.ErrClosed { + return true + } + + if nerr, ok := err.(*net.OpError); ok { + return (nerr.Err.Error() == "use of closed network connection") + } + + return false + } + + target.connMutex.Lock() + defer target.connMutex.Unlock() + + ch, err := target.conn.Channel() + if err == nil { + return ch, nil + } + + if !isAMQPClosedErr(err) { + return nil, err + } + + var conn *amqp.Connection + if conn, err = amqp.Dial(target.args.URL.String()); err != nil { + return nil, err + } + + if ch, err = conn.Channel(); err != nil { + return nil, err + } + + target.conn = conn + + return ch, nil +} + +// Send - sends event to AMQP. +func (target *AMQPTarget) Send(eventData event.Event) error { + ch, err := target.channel() + if err != nil { + return err + } + defer func() { + // FIXME: log returned error. ignore time being. + _ = ch.Close() + }() + + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + key := eventData.S3.Bucket.Name + "/" + objectName + + data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}}) + if err != nil { + return err + } + + if err = ch.ExchangeDeclare(target.args.Exchange, target.args.ExchangeType, target.args.Durable, + target.args.AutoDeleted, target.args.Internal, target.args.NoWait, nil); err != nil { + return err + } + + return ch.Publish(target.args.Exchange, target.args.RoutingKey, target.args.Mandatory, + target.args.Immediate, amqp.Publishing{ + ContentType: "application/json", + DeliveryMode: target.args.DeliveryMode, + Body: data, + }) +} + +// Close - does nothing and available for interface compatibility. +func (target *AMQPTarget) Close() error { + return nil +} + +// NewAMQPTarget - creates new AMQP target. +func NewAMQPTarget(id string, args AMQPArgs) (*AMQPTarget, error) { + conn, err := amqp.Dial(args.URL.String()) + if err != nil { + return nil, err + } + + return &AMQPTarget{ + id: event.TargetID{id, "amqp"}, + args: args, + conn: conn, + }, nil +} diff --git a/pkg/event/target/elasticsearch.go b/pkg/event/target/elasticsearch.go new file mode 100644 index 000000000..3c1b5c1c1 --- /dev/null +++ b/pkg/event/target/elasticsearch.go @@ -0,0 +1,132 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package target + +import ( + "context" + "fmt" + "net/url" + "time" + + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" + + "gopkg.in/olivere/elastic.v5" +) + +// ElasticsearchArgs - Elasticsearch target arguments. +type ElasticsearchArgs struct { + Enable bool `json:"enable"` + Format string `json:"format"` + URL xnet.URL `json:"url"` + Index string `json:"index"` +} + +// ElasticsearchTarget - Elasticsearch target. +type ElasticsearchTarget struct { + id event.TargetID + args ElasticsearchArgs + client *elastic.Client +} + +// ID - returns target ID. +func (target *ElasticsearchTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to Elasticsearch. +func (target *ElasticsearchTarget) Send(eventData event.Event) (err error) { + var key string + + remove := func() error { + _, err := target.client.Delete().Index(target.args.Index).Type("event").Id(key).Do(context.Background()) + return err + } + + update := func() error { + _, err := target.client.Index().Index(target.args.Index).Type("event").BodyJson(map[string]interface{}{"Records": []event.Event{eventData}}).Id(key).Do(context.Background()) + return err + } + + add := func() error { + eventTime, err := time.Parse(event.AMZTimeFormat, eventData.EventTime) + if err != nil { + return err + } + + eventTimeMS := fmt.Sprintf("%d", eventTime.UnixNano()/1000000) + _, err = target.client.Index().Index(target.args.Index).Type("event").Timestamp(eventTimeMS).BodyJson(map[string]interface{}{"Records": []event.Event{eventData}}).Do(context.Background()) + return err + } + + if target.args.Format == event.NamespaceFormat { + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + + key = eventData.S3.Bucket.Name + "/" + objectName + if eventData.EventName == event.ObjectRemovedDelete { + err = remove() + } else { + err = update() + } + + return err + } + + if target.args.Format == event.AccessFormat { + return add() + } + + return nil +} + +// Close - does nothing and available for interface compatibility. +func (target *ElasticsearchTarget) Close() error { + return nil +} + +// NewElasticsearchTarget - creates new Elasticsearch target. +func NewElasticsearchTarget(id string, args ElasticsearchArgs) (*ElasticsearchTarget, error) { + client, err := elastic.NewClient(elastic.SetURL(args.URL.String()), elastic.SetSniff(false), elastic.SetMaxRetries(10)) + if err != nil { + return nil, err + } + + exists, err := client.IndexExists(args.Index).Do(context.Background()) + if err != nil { + return nil, err + } + + if !exists { + var createIndex *elastic.IndicesCreateResult + if createIndex, err = client.CreateIndex(args.Index).Do(context.Background()); err != nil { + return nil, err + } + + if !createIndex.Acknowledged { + return nil, fmt.Errorf("index %v not created", args.Index) + } + } + + return &ElasticsearchTarget{ + id: event.TargetID{id, "elasticsearch"}, + args: args, + client: client, + }, nil +} diff --git a/pkg/event/target/httpclient.go b/pkg/event/target/httpclient.go new file mode 100644 index 000000000..efa1adebd --- /dev/null +++ b/pkg/event/target/httpclient.go @@ -0,0 +1,141 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package target + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "sync/atomic" + "time" + + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" + "github.com/skyrings/skyring-common/tools/uuid" +) + +// HTTPClientTarget - HTTP client target. +type HTTPClientTarget struct { + id event.TargetID + w http.ResponseWriter + eventCh chan []byte + DoneCh chan struct{} + stopCh chan struct{} + isStopped uint32 + isRunning uint32 +} + +// ID - returns target ID. +func (target HTTPClientTarget) ID() event.TargetID { + return target.id +} + +func (target *HTTPClientTarget) start() { + go func() { + defer func() { + atomic.AddUint32(&target.isRunning, 1) + + // Close DoneCh to indicate we are done. + close(target.DoneCh) + }() + + write := func(event []byte) error { + if _, err := target.w.Write(event); err != nil { + return err + } + + target.w.(http.Flusher).Flush() + return nil + } + + for { + keepAliveTicker := time.NewTicker(500 * time.Millisecond) + select { + case <-target.stopCh: + // We are asked to stop. + return + case event, ok := <-target.eventCh: + if !ok { + // Got read error. Exit the goroutine. + return + } + if err := write(event); err != nil { + // Got write error to the client. Exit the goroutine. + return + } + case <-keepAliveTicker.C: + if err := write([]byte(" ")); err != nil { + // Got write error to the client. Exit the goroutine. + return + } + } + } + }() +} + +// Send - sends event to HTTP client. +func (target *HTTPClientTarget) Send(eventData event.Event) error { + if atomic.LoadUint32(&target.isRunning) != 0 { + return errors.New("closed http connection") + } + + data, err := json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}) + if err != nil { + return err + } + data = append(data, byte('\n')) + + select { + case target.eventCh <- data: + return nil + case <-target.DoneCh: + return errors.New("error in sending event") + } +} + +// Close - closes underneath goroutine. +func (target *HTTPClientTarget) Close() error { + atomic.AddUint32(&target.isStopped, 1) + if atomic.LoadUint32(&target.isStopped) == 1 { + close(target.stopCh) + } + + return nil +} + +func mustGetNewUUID() string { + uuid, err := uuid.New() + if err != nil { + panic(fmt.Sprintf("%s. Unable to generate random UUID", err)) + } + + return uuid.String() +} + +// NewHTTPClientTarget - creates new HTTP client target. +func NewHTTPClientTarget(host xnet.Host, w http.ResponseWriter) *HTTPClientTarget { + c := &HTTPClientTarget{ + id: event.TargetID{"httpclient" + "+" + mustGetNewUUID() + "+" + host.Name, host.Port.String()}, + w: w, + eventCh: make(chan []byte), + DoneCh: make(chan struct{}), + stopCh: make(chan struct{}), + } + c.start() + return c +} diff --git a/pkg/event/target/kafka.go b/pkg/event/target/kafka.go new file mode 100644 index 000000000..1e0989882 --- /dev/null +++ b/pkg/event/target/kafka.go @@ -0,0 +1,97 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package target + +import ( + "encoding/json" + "net/url" + + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" + + sarama "gopkg.in/Shopify/sarama.v1" +) + +// KafkaArgs - Kafka target arguments. +type KafkaArgs struct { + Enable bool `json:"enable"` + Brokers []xnet.Host `json:"brokers"` + Topic string `json:"topic"` +} + +// KafkaTarget - Kafka target. +type KafkaTarget struct { + id event.TargetID + args KafkaArgs + producer sarama.SyncProducer +} + +// ID - returns target ID. +func (target *KafkaTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to Kafka. +func (target *KafkaTarget) Send(eventData event.Event) error { + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + key := eventData.S3.Bucket.Name + "/" + objectName + + data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}}) + if err != nil { + return err + } + + msg := sarama.ProducerMessage{ + Topic: target.args.Topic, + Key: sarama.StringEncoder(key), + Value: sarama.ByteEncoder(data), + } + _, _, err = target.producer.SendMessage(&msg) + + return err +} + +// Close - closes underneath kafka connection. +func (target *KafkaTarget) Close() error { + return target.producer.Close() +} + +// NewKafkaTarget - creates new Kafka target. +func NewKafkaTarget(id string, args KafkaArgs) (*KafkaTarget, error) { + config := sarama.NewConfig() + config.Producer.RequiredAcks = sarama.WaitForAll + config.Producer.Retry.Max = 10 + config.Producer.Return.Successes = true + + brokers := []string{} + for _, broker := range args.Brokers { + brokers = append(brokers, broker.String()) + } + producer, err := sarama.NewSyncProducer(brokers, config) + if err != nil { + return nil, err + } + + return &KafkaTarget{ + id: event.TargetID{id, "kafka"}, + args: args, + producer: producer, + }, nil +} diff --git a/pkg/event/target/mqtt.go b/pkg/event/target/mqtt.go new file mode 100644 index 000000000..8acf1c0c5 --- /dev/null +++ b/pkg/event/target/mqtt.go @@ -0,0 +1,117 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package target + +import ( + "crypto/tls" + "crypto/x509" + "encoding/json" + "net/url" + "time" + + "github.com/eclipse/paho.mqtt.golang" + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" +) + +// MQTTArgs - MQTT target arguments. +type MQTTArgs struct { + Enable bool `json:"enable"` + Broker xnet.URL `json:"broker"` + Topic string `json:"topic"` + QoS byte `json:"qos"` + ClientID string `json:"clientId"` + User string `json:"username"` + Password string `json:"password"` + MaxReconnectInterval time.Duration `json:"reconnectInterval"` + KeepAlive time.Duration `json:"keepAliveInterval"` + RootCAs *x509.CertPool `json:"-"` +} + +// MQTTTarget - MQTT target. +type MQTTTarget struct { + id event.TargetID + args MQTTArgs + client mqtt.Client +} + +// ID - returns target ID. +func (target *MQTTTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to MQTT. +func (target *MQTTTarget) Send(eventData event.Event) error { + if !target.client.IsConnected() { + token := target.client.Connect() + if token.Wait() { + if err := token.Error(); err != nil { + return err + } + } + } + + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + key := eventData.S3.Bucket.Name + "/" + objectName + + data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}}) + if err != nil { + return err + } + + token := target.client.Publish(target.args.Topic, target.args.QoS, false, string(data)) + + if token.Wait() { + return token.Error() + } + + return nil +} + +// Close - does nothing and available for interface compatibility. +func (target *MQTTTarget) Close() error { + return nil +} + +// NewMQTTTarget - creates new MQTT target. +func NewMQTTTarget(id string, args MQTTArgs) (*MQTTTarget, error) { + options := &mqtt.ClientOptions{ + ClientID: args.ClientID, + CleanSession: true, + Username: args.User, + Password: args.Password, + MaxReconnectInterval: args.MaxReconnectInterval, + KeepAlive: args.KeepAlive, + TLSConfig: tls.Config{RootCAs: args.RootCAs}, + } + options.AddBroker(args.Broker.String()) + + client := mqtt.NewClient(options) + token := client.Connect() + if token.Wait() && token.Error() != nil { + return nil, token.Error() + } + + return &MQTTTarget{ + id: event.TargetID{id, "mqtt"}, + args: args, + client: client, + }, nil +} diff --git a/pkg/event/target/mysql.go b/pkg/event/target/mysql.go new file mode 100644 index 000000000..296aa8b73 --- /dev/null +++ b/pkg/event/target/mysql.go @@ -0,0 +1,226 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// MySQL Notifier implementation. Two formats, "namespace" and +// "access" are supported. +// +// * Namespace format +// +// On each create or update object event in Minio Object storage +// server, a row is created or updated in the table in MySQL. On each +// object removal, the corresponding row is deleted from the table. +// +// A table with a specific structure (column names, column types, and +// primary key/uniqueness constraint) is used. The user may set the +// table name in the configuration. A sample SQL command that creates +// a command with the required structure is: +// +// CREATE TABLE myminio ( +// key_name VARCHAR(2048), +// value JSONB, +// PRIMARY KEY (key_name), +// ); +// +// MySQL's "INSERT ... ON DUPLICATE ..." feature (UPSERT) is used +// here. The implementation has been tested with MySQL Ver 14.14 +// Distrib 5.7.17. +// +// * Access format +// +// On each event, a row is appended to the configured table. There is +// no deletion or modification of existing rows. +// +// A different table schema is used for this format. A sample SQL +// commant that creates a table with the required structure is: +// +// CREATE TABLE myminio ( +// event_time TIMESTAMP WITH TIME ZONE NOT NULL, +// event_data JSONB +// ); + +package target + +import ( + "database/sql" + "encoding/json" + "fmt" + "net/url" + "time" + + "github.com/go-sql-driver/mysql" + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" +) + +const ( + mysqlTableExists = `SELECT 1 FROM %s;` + mysqlCreateNamespaceTable = `CREATE TABLE %s (key_name VARCHAR(2048), value JSON, PRIMARY KEY (key_name));` + mysqlCreateAccessTable = `CREATE TABLE %s (event_time DATETIME NOT NULL, event_data JSON);` + + mysqlUpdateRow = `INSERT INTO %s (key_name, value) VALUES (?, ?) ON DUPLICATE KEY UPDATE value=VALUES(value);` + mysqlDeleteRow = `DELETE FROM %s WHERE key_name = ?;` + mysqlInsertRow = `INSERT INTO %s (event_time, event_data) VALUES (?, ?);` +) + +// MySQLArgs - MySQL target arguments. +type MySQLArgs struct { + Enable bool `json:"enable"` + Format string `json:"format"` + DSN string `json:"dsnString"` + Table string `json:"table"` + Host xnet.URL `json:"host"` + Port string `json:"port"` + User string `json:"user"` + Password string `json:"password"` + Database string `json:"database"` +} + +// MySQLTarget - MySQL target. +type MySQLTarget struct { + id event.TargetID + args MySQLArgs + updateStmt *sql.Stmt + deleteStmt *sql.Stmt + insertStmt *sql.Stmt + db *sql.DB +} + +// ID - returns target ID. +func (target *MySQLTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to MySQL. +func (target *MySQLTarget) Send(eventData event.Event) error { + if target.args.Format == event.NamespaceFormat { + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + key := eventData.S3.Bucket.Name + "/" + objectName + + if eventData.EventName == event.ObjectRemovedDelete { + _, err = target.deleteStmt.Exec(key) + } else { + var data []byte + if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil { + return err + } + + _, err = target.updateStmt.Exec(key, data) + } + return err + } + + if target.args.Format == event.AccessFormat { + eventTime, err := time.Parse(event.AMZTimeFormat, eventData.EventTime) + if err != nil { + return err + } + + data, err := json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}) + if err != nil { + return err + } + + _, err = target.insertStmt.Exec(eventTime, data) + return err + } + + return nil +} + +// Close - closes underneath connections to MySQL database. +func (target *MySQLTarget) Close() error { + if target.updateStmt != nil { + // FIXME: log returned error. ignore time being. + _ = target.updateStmt.Close() + } + + if target.deleteStmt != nil { + // FIXME: log returned error. ignore time being. + _ = target.deleteStmt.Close() + } + + if target.insertStmt != nil { + // FIXME: log returned error. ignore time being. + _ = target.insertStmt.Close() + } + + return target.db.Close() +} + +// NewMySQLTarget - creates new MySQL target. +func NewMySQLTarget(id string, args MySQLArgs) (*MySQLTarget, error) { + if args.DSN == "" { + config := mysql.Config{ + User: args.User, + Passwd: args.Password, + Net: "tcp", + Addr: args.Host.String() + ":" + args.Port, + DBName: args.Database, + } + + args.DSN = config.FormatDSN() + } + + db, err := sql.Open("mysql", args.DSN) + if err != nil { + return nil, err + } + + if err = db.Ping(); err != nil { + return nil, err + } + + if _, err = db.Exec(fmt.Sprintf(mysqlTableExists, args.Table)); err != nil { + createStmt := mysqlCreateNamespaceTable + if args.Format == event.AccessFormat { + createStmt = mysqlCreateAccessTable + } + + if _, err = db.Exec(fmt.Sprintf(createStmt, args.Table)); err != nil { + return nil, err + } + } + + var updateStmt, deleteStmt, insertStmt *sql.Stmt + switch args.Format { + case event.NamespaceFormat: + // insert or update statement + if updateStmt, err = db.Prepare(fmt.Sprintf(mysqlUpdateRow, args.Table)); err != nil { + return nil, err + } + // delete statement + if deleteStmt, err = db.Prepare(fmt.Sprintf(mysqlDeleteRow, args.Table)); err != nil { + return nil, err + } + case event.AccessFormat: + // insert statement + if insertStmt, err = db.Prepare(fmt.Sprintf(mysqlInsertRow, args.Table)); err != nil { + return nil, err + } + } + + return &MySQLTarget{ + id: event.TargetID{id, "mysql"}, + args: args, + updateStmt: updateStmt, + deleteStmt: deleteStmt, + insertStmt: insertStmt, + db: db, + }, nil +} diff --git a/pkg/event/target/nats.go b/pkg/event/target/nats.go new file mode 100644 index 000000000..c6f641ed8 --- /dev/null +++ b/pkg/event/target/nats.go @@ -0,0 +1,143 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package target + +import ( + "encoding/json" + "net/url" + + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" + "github.com/nats-io/go-nats-streaming" + "github.com/nats-io/nats" +) + +// NATSArgs - NATS target arguments. +type NATSArgs struct { + Enable bool `json:"enable"` + Address xnet.Host `json:"address"` + Subject string `json:"subject"` + Username string `json:"username"` + Password string `json:"password"` + Token string `json:"token"` + Secure bool `json:"secure"` + PingInterval int64 `json:"pingInterval"` + Streaming struct { + Enable bool `json:"enable"` + ClusterID string `json:"clusterID"` + ClientID string `json:"clientID"` + Async bool `json:"async"` + MaxPubAcksInflight int `json:"maxPubAcksInflight"` + } `json:"streaming"` +} + +// NATSTarget - NATS target. +type NATSTarget struct { + id event.TargetID + args NATSArgs + natsConn *nats.Conn + stanConn stan.Conn +} + +// ID - returns target ID. +func (target *NATSTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to NATS. +func (target *NATSTarget) Send(eventData event.Event) (err error) { + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + key := eventData.S3.Bucket.Name + "/" + objectName + + data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}}) + if err != nil { + return err + } + + if target.stanConn != nil { + if target.args.Streaming.Async { + _, err = target.stanConn.PublishAsync(target.args.Subject, data, nil) + } else { + err = target.stanConn.Publish(target.args.Subject, data) + } + } else { + err = target.natsConn.Publish(target.args.Subject, data) + } + + return err +} + +// Close - closes underneath connections to NATS server. +func (target *NATSTarget) Close() (err error) { + if target.stanConn != nil { + err = target.stanConn.Close() + } + + if target.natsConn != nil { + target.natsConn.Close() + } + + return err +} + +// NewNATSTarget - creates new NATS target. +func NewNATSTarget(id string, args NATSArgs) (*NATSTarget, error) { + var natsConn *nats.Conn + var stanConn stan.Conn + var err error + + if args.Streaming.Enable { + scheme := "nats" + if args.Secure { + scheme = "tls" + } + addressURL := scheme + "://" + args.Username + ":" + args.Password + "@" + args.Address.String() + + clientID := args.Streaming.ClientID + if clientID == "" { + clientID = mustGetNewUUID() + } + + connOpts := []stan.Option{stan.NatsURL(addressURL)} + if args.Streaming.MaxPubAcksInflight > 0 { + connOpts = append(connOpts, stan.MaxPubAcksInflight(args.Streaming.MaxPubAcksInflight)) + } + + stanConn, err = stan.Connect(args.Streaming.ClusterID, clientID, connOpts...) + } else { + options := nats.DefaultOptions + options.Url = "nats://" + args.Address.String() + options.User = args.Username + options.Password = args.Password + options.Token = args.Token + options.Secure = args.Secure + natsConn, err = options.Connect() + } + if err != nil { + return nil, err + } + + return &NATSTarget{ + id: event.TargetID{id, "nats"}, + args: args, + stanConn: stanConn, + natsConn: natsConn, + }, nil +} diff --git a/pkg/event/target/postgresql.go b/pkg/event/target/postgresql.go new file mode 100644 index 000000000..efb7b8474 --- /dev/null +++ b/pkg/event/target/postgresql.go @@ -0,0 +1,233 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +// PostgreSQL Notifier implementation. Two formats, "namespace" and +// "access" are supported. +// +// * Namespace format +// +// On each create or update object event in Minio Object storage +// server, a row is created or updated in the table in Postgres. On +// each object removal, the corresponding row is deleted from the +// table. +// +// A table with a specific structure (column names, column types, and +// primary key/uniqueness constraint) is used. The user may set the +// table name in the configuration. A sample SQL command that creates +// a table with the required structure is: +// +// CREATE TABLE myminio ( +// key VARCHAR PRIMARY KEY, +// value JSONB +// ); +// +// PostgreSQL's "INSERT ... ON CONFLICT ... DO UPDATE ..." feature +// (UPSERT) is used here, so the minimum version of PostgreSQL +// required is 9.5. +// +// * Access format +// +// On each event, a row is appended to the configured table. There is +// no deletion or modification of existing rows. +// +// A different table schema is used for this format. A sample SQL +// commant that creates a table with the required structure is: +// +// CREATE TABLE myminio ( +// event_time TIMESTAMP WITH TIME ZONE NOT NULL, +// event_data JSONB +// ); + +package target + +import ( + "database/sql" + "encoding/json" + "fmt" + "net/url" + "strings" + "time" + + _ "github.com/lib/pq" // Register postgres driver + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" +) + +const ( + psqlTableExists = `SELECT 1 FROM %s;` + psqlCreateNamespaceTable = `CREATE TABLE %s (key VARCHAR PRIMARY KEY, value JSONB);` + psqlCreateAccessTable = `CREATE TABLE %s (event_time TIMESTAMP WITH TIME ZONE NOT NULL, event_data JSONB);` + + psqlUpdateRow = `INSERT INTO %s (key, value) VALUES ($1, $2) ON CONFLICT (key) DO UPDATE SET value = EXCLUDED.value;` + psqlDeleteRow = `DELETE FROM %s WHERE key = $1;` + psqlInsertRow = `INSERT INTO %s (event_time, event_data) VALUES ($1, $2);` +) + +// PostgreSQLArgs - PostgreSQL target arguments. +type PostgreSQLArgs struct { + Enable bool `json:"enable"` + Format string `json:"format"` + ConnectionString string `json:"connectionString"` + Table string `json:"table"` + Host xnet.URL `json:"host"` // default: localhost + Port string `json:"port"` // default: 5432 + User string `json:"user"` // default: user running minio + Password string `json:"password"` // default: no password + Database string `json:"database"` // default: same as user +} + +// PostgreSQLTarget - PostgreSQL target. +type PostgreSQLTarget struct { + id event.TargetID + args PostgreSQLArgs + updateStmt *sql.Stmt + deleteStmt *sql.Stmt + insertStmt *sql.Stmt + db *sql.DB +} + +// ID - returns target ID. +func (target *PostgreSQLTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to PostgreSQL. +func (target *PostgreSQLTarget) Send(eventData event.Event) error { + if target.args.Format == event.NamespaceFormat { + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + key := eventData.S3.Bucket.Name + "/" + objectName + + if eventData.EventName == event.ObjectRemovedDelete { + _, err = target.deleteStmt.Exec(key) + } else { + var data []byte + if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil { + return err + } + + _, err = target.updateStmt.Exec(key, data) + } + return err + } + + if target.args.Format == event.AccessFormat { + eventTime, err := time.Parse(event.AMZTimeFormat, eventData.EventTime) + if err != nil { + return err + } + + data, err := json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}) + if err != nil { + return err + } + + _, err = target.insertStmt.Exec(eventTime, data) + return err + } + + return nil +} + +// Close - closes underneath connections to PostgreSQL database. +func (target *PostgreSQLTarget) Close() error { + if target.updateStmt != nil { + // FIXME: log returned error. ignore time being. + _ = target.updateStmt.Close() + } + + if target.deleteStmt != nil { + // FIXME: log returned error. ignore time being. + _ = target.deleteStmt.Close() + } + + if target.insertStmt != nil { + // FIXME: log returned error. ignore time being. + _ = target.insertStmt.Close() + } + + return target.db.Close() +} + +// NewPostgreSQLTarget - creates new PostgreSQL target. +func NewPostgreSQLTarget(id string, args PostgreSQLArgs) (*PostgreSQLTarget, error) { + params := []string{args.ConnectionString} + if !args.Host.IsEmpty() { + params = append(params, "host="+args.Host.String()) + } + if args.Port != "" { + params = append(params, "port="+args.Port) + } + if args.User != "" { + params = append(params, "user="+args.User) + } + if args.Password != "" { + params = append(params, "password="+args.Password) + } + if args.Database != "" { + params = append(params, "dbname="+args.Database) + } + connStr := strings.Join(params, " ") + + db, err := sql.Open("postgres", connStr) + if err != nil { + return nil, err + } + + if err = db.Ping(); err != nil { + return nil, err + } + + if _, err = db.Exec(fmt.Sprintf(psqlTableExists, args.Table)); err != nil { + createStmt := psqlCreateNamespaceTable + if args.Format == event.AccessFormat { + createStmt = psqlCreateAccessTable + } + + if _, err = db.Exec(fmt.Sprintf(createStmt, args.Table)); err != nil { + return nil, err + } + } + + var updateStmt, deleteStmt, insertStmt *sql.Stmt + switch args.Format { + case event.NamespaceFormat: + // insert or update statement + if updateStmt, err = db.Prepare(fmt.Sprintf(psqlUpdateRow, args.Table)); err != nil { + return nil, err + } + // delete statement + if deleteStmt, err = db.Prepare(fmt.Sprintf(psqlDeleteRow, args.Table)); err != nil { + return nil, err + } + case event.AccessFormat: + // insert statement + if insertStmt, err = db.Prepare(fmt.Sprintf(psqlInsertRow, args.Table)); err != nil { + return nil, err + } + } + + return &PostgreSQLTarget{ + id: event.TargetID{id, "postgresql"}, + args: args, + updateStmt: updateStmt, + deleteStmt: deleteStmt, + insertStmt: insertStmt, + db: db, + }, nil +} diff --git a/pkg/event/target/redis.go b/pkg/event/target/redis.go new file mode 100644 index 000000000..76c8bbeb3 --- /dev/null +++ b/pkg/event/target/redis.go @@ -0,0 +1,156 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package target + +import ( + "encoding/json" + "fmt" + "net/url" + "time" + + "github.com/garyburd/redigo/redis" + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" +) + +// RedisArgs - Redis target arguments. +type RedisArgs struct { + Enable bool `json:"enable"` + Format string `json:"format"` + Addr xnet.Host `json:"address"` + Password string `json:"password"` + Key string `json:"key"` +} + +// RedisTarget - Redis target. +type RedisTarget struct { + id event.TargetID + args RedisArgs + pool *redis.Pool +} + +// ID - returns target ID. +func (target *RedisTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to Redis. +func (target *RedisTarget) Send(eventData event.Event) error { + conn := target.pool.Get() + defer func() { + // FIXME: log returned error. ignore time being. + _ = conn.Close() + }() + + if target.args.Format == event.NamespaceFormat { + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + key := eventData.S3.Bucket.Name + "/" + objectName + + if eventData.EventName == event.ObjectRemovedDelete { + _, err = conn.Do("HDEL", target.args.Key, key) + } else { + var data []byte + if data, err = json.Marshal(struct{ Records []event.Event }{[]event.Event{eventData}}); err != nil { + return err + } + + _, err = conn.Do("HSET", target.args.Key, key, data) + } + return err + } + + if target.args.Format == event.AccessFormat { + data, err := json.Marshal([]interface{}{eventData.EventTime, []event.Event{eventData}}) + if err != nil { + return err + } + _, err = conn.Do("RPUSH", target.args.Key, data) + return err + } + + return nil +} + +// Close - does nothing and available for interface compatibility. +func (target *RedisTarget) Close() error { + return nil +} + +// NewRedisTarget - creates new Redis target. +func NewRedisTarget(id string, args RedisArgs) (*RedisTarget, error) { + pool := &redis.Pool{ + MaxIdle: 3, + IdleTimeout: 2 * 60 * time.Second, + Dial: func() (redis.Conn, error) { + conn, err := redis.Dial("tcp", args.Addr.String()) + if err != nil { + return nil, err + } + + if args.Password == "" { + return conn, nil + } + + if _, err = conn.Do("AUTH", args.Password); err != nil { + // FIXME: log returned error. ignore time being. + _ = conn.Close() + return nil, err + } + + return conn, nil + }, + TestOnBorrow: func(c redis.Conn, t time.Time) error { + _, err := c.Do("PING") + return err + }, + } + + conn := pool.Get() + defer func() { + // FIXME: log returned error. ignore time being. + _ = conn.Close() + }() + + if _, err := conn.Do("PING"); err != nil { + return nil, err + } + + typeAvailable, err := redis.String(conn.Do("TYPE", args.Key)) + if err != nil { + return nil, err + } + + if typeAvailable != "none" { + expectedType := "hash" + if args.Format == event.AccessFormat { + expectedType = "list" + } + + if typeAvailable != expectedType { + return nil, fmt.Errorf("expected type %v does not match with available type %v", expectedType, typeAvailable) + } + } + + return &RedisTarget{ + id: event.TargetID{id, "redis"}, + args: args, + pool: pool, + }, nil +} diff --git a/pkg/event/target/webhook.go b/pkg/event/target/webhook.go new file mode 100644 index 000000000..1a9897a96 --- /dev/null +++ b/pkg/event/target/webhook.go @@ -0,0 +1,113 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package target + +import ( + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "fmt" + "net" + "net/http" + "net/url" + "time" + + "github.com/minio/minio/pkg/event" + xnet "github.com/minio/minio/pkg/net" +) + +// WebhookArgs - Webhook target arguments. +type WebhookArgs struct { + Enable bool `json:"enable"` + Endpoint xnet.URL `json:"endpoint"` + RootCAs *x509.CertPool `json:"-"` +} + +// WebhookTarget - Webhook target. +type WebhookTarget struct { + id event.TargetID + args WebhookArgs + httpClient *http.Client +} + +// ID - returns target ID. +func (target WebhookTarget) ID() event.TargetID { + return target.id +} + +// Send - sends event to Webhook. +func (target *WebhookTarget) Send(eventData event.Event) error { + objectName, err := url.QueryUnescape(eventData.S3.Object.Key) + if err != nil { + return err + } + key := eventData.S3.Bucket.Name + "/" + objectName + + data, err := json.Marshal(event.Log{eventData.EventName, key, []event.Event{eventData}}) + if err != nil { + return err + } + + req, err := http.NewRequest("POST", target.args.Endpoint.String(), bytes.NewReader(data)) + if err != nil { + return err + } + + // req.Header.Set("User-Agent", globalServerUserAgent) + req.Header.Set("Content-Type", "application/json") + + resp, err := target.httpClient.Do(req) + if err != nil { + return err + } + + // FIXME: log returned error. ignore time being. + _ = resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted, http.StatusContinue: + return nil + default: + return fmt.Errorf("sending event failed with %v", resp.Status) + } +} + +// Close - does nothing and available for interface compatibility. +func (target *WebhookTarget) Close() error { + return nil +} + +// NewWebhookTarget - creates new Webhook target. +func NewWebhookTarget(id string, args WebhookArgs) *WebhookTarget { + return &WebhookTarget{ + id: event.TargetID{id, "webhook"}, + args: args, + httpClient: &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{RootCAs: args.RootCAs}, + DialContext: (&net.Dialer{ + Timeout: 5 * time.Second, + KeepAlive: 5 * time.Second, + }).DialContext, + TLSHandshakeTimeout: 3 * time.Second, + ResponseHeaderTimeout: 3 * time.Second, + ExpectContinueTimeout: 2 * time.Second, + }, + }, + } +} diff --git a/pkg/event/targetid.go b/pkg/event/targetid.go new file mode 100644 index 000000000..a90e541d8 --- /dev/null +++ b/pkg/event/targetid.go @@ -0,0 +1,73 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "encoding/json" + "fmt" + "strings" +) + +// TargetID - holds identification and name strings of notification target. +type TargetID struct { + ID string + Name string +} + +// String - returns string representation. +func (tid TargetID) String() string { + return tid.ID + ":" + tid.Name +} + +// ToARN - converts to ARN. +func (tid TargetID) ToARN(region string) ARN { + return ARN{TargetID: tid, region: region} +} + +// MarshalJSON - encodes to JSON data. +func (tid TargetID) MarshalJSON() ([]byte, error) { + return json.Marshal(tid.String()) +} + +// UnmarshalJSON - decodes JSON data. +func (tid *TargetID) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + + targetID, err := parseTargetID(s) + if err != nil { + return err + } + + *tid = *targetID + return nil +} + +// parseTargetID - parses string to TargetID. +func parseTargetID(s string) (*TargetID, error) { + tokens := strings.Split(s, ":") + if len(tokens) != 2 { + return nil, fmt.Errorf("invalid TargetID format '%v'", s) + } + + return &TargetID{ + ID: tokens[0], + Name: tokens[1], + }, nil +} diff --git a/pkg/event/targetid_test.go b/pkg/event/targetid_test.go new file mode 100644 index 000000000..9f8283105 --- /dev/null +++ b/pkg/event/targetid_test.go @@ -0,0 +1,117 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "reflect" + "testing" +) + +func TestTargetDString(t *testing.T) { + testCases := []struct { + tid TargetID + expectedResult string + }{ + {TargetID{}, ":"}, + {TargetID{"1", "webhook"}, "1:webhook"}, + {TargetID{"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531", "localhost:55638"}, "httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"}, + } + + for i, testCase := range testCases { + result := testCase.tid.String() + + if result != testCase.expectedResult { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestTargetDToARN(t *testing.T) { + tid := TargetID{"1", "webhook"} + testCases := []struct { + tid TargetID + region string + expectedARN ARN + }{ + {tid, "", ARN{TargetID: tid, region: ""}}, + {tid, "us-east-1", ARN{TargetID: tid, region: "us-east-1"}}, + } + + for i, testCase := range testCases { + arn := testCase.tid.ToARN(testCase.region) + + if arn != testCase.expectedARN { + t.Fatalf("test %v: ARN: expected: %v, got: %v", i+1, testCase.expectedARN, arn) + } + } +} + +func TestTargetDMarshalJSON(t *testing.T) { + testCases := []struct { + tid TargetID + expectedData []byte + expectErr bool + }{ + {TargetID{}, []byte(`":"`), false}, + {TargetID{"1", "webhook"}, []byte(`"1:webhook"`), false}, + {TargetID{"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531", "localhost:55638"}, []byte(`"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"`), false}, + } + + for i, testCase := range testCases { + data, err := testCase.tid.MarshalJSON() + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(data, testCase.expectedData) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) + } + } + } +} + +func TestTargetDUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedTargetID *TargetID + expectErr bool + }{ + {[]byte(`""`), nil, true}, + {[]byte(`"httpclient+2e33cdee-fbec-4bdd-917e-7d8e3c5a2531:localhost:55638"`), nil, true}, + {[]byte(`":"`), &TargetID{}, false}, + {[]byte(`"1:webhook"`), &TargetID{"1", "webhook"}, false}, + } + + for i, testCase := range testCases { + targetID := &TargetID{} + err := targetID.UnmarshalJSON(testCase.data) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if *targetID != *testCase.expectedTargetID { + t.Fatalf("test %v: TargetID: expected: %v, got: %v", i+1, testCase.expectedTargetID, targetID) + } + } + } +} diff --git a/pkg/event/targetidset.go b/pkg/event/targetidset.go new file mode 100644 index 000000000..2ceeb3a45 --- /dev/null +++ b/pkg/event/targetidset.go @@ -0,0 +1,82 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import "fmt" + +// TargetIDSet - Set representation of TargetIDs. +type TargetIDSet map[TargetID]struct{} + +// ToSlice - returns TargetID slice from TargetIDSet. +func (set TargetIDSet) ToSlice() []TargetID { + keys := make([]TargetID, 0, len(set)) + for k := range set { + keys = append(keys, k) + } + return keys +} + +// String - returns string representation. +func (set TargetIDSet) String() string { + return fmt.Sprintf("%v", set.ToSlice()) +} + +// Clone - returns copy of this set. +func (set TargetIDSet) Clone() TargetIDSet { + setCopy := NewTargetIDSet() + for k, v := range set { + setCopy[k] = v + } + return setCopy +} + +// add - adds TargetID to the set. +func (set TargetIDSet) add(targetID TargetID) { + set[targetID] = struct{}{} +} + +// Union - returns union with given set as new set. +func (set TargetIDSet) Union(sset TargetIDSet) TargetIDSet { + nset := set.Clone() + + for k := range sset { + nset.add(k) + } + + return nset +} + +// Difference - returns diffrence with given set as new set. +func (set TargetIDSet) Difference(sset TargetIDSet) TargetIDSet { + nset := NewTargetIDSet() + for k := range set { + if _, ok := sset[k]; !ok { + nset.add(k) + } + } + + return nset +} + +// NewTargetIDSet - creates new TargetID set with given TargetIDs. +func NewTargetIDSet(targetIDs ...TargetID) TargetIDSet { + set := make(TargetIDSet) + for _, targetID := range targetIDs { + set.add(targetID) + } + return set +} diff --git a/pkg/event/targetidset_test.go b/pkg/event/targetidset_test.go new file mode 100644 index 000000000..fbdb9af09 --- /dev/null +++ b/pkg/event/targetidset_test.go @@ -0,0 +1,159 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "reflect" + "testing" +) + +func TestTargetIDSetToSlice(t *testing.T) { + testCases := []struct { + set TargetIDSet + expectedResult []TargetID + }{ + {NewTargetIDSet(), []TargetID{}}, + {NewTargetIDSet(TargetID{"1", "webhook"}), []TargetID{{"1", "webhook"}}}, + {NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"}), []TargetID{{"1", "webhook"}, {"2", "amqp"}}}, + } + + for i, testCase := range testCases { + result := testCase.set.ToSlice() + + if len(result) != len(testCase.expectedResult) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + + for _, targetID1 := range result { + var found bool + for _, targetID2 := range testCase.expectedResult { + if reflect.DeepEqual(targetID1, targetID2) { + found = true + break + } + } + if !found { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestTargetIDSetString(t *testing.T) { + testCases := []struct { + set TargetIDSet + expectedResult string + }{ + {NewTargetIDSet(), "[]"}, + {NewTargetIDSet(TargetID{"1", "webhook"}), "[1:webhook]"}, + } + + for i, testCase := range testCases { + result := testCase.set.String() + + if result != testCase.expectedResult { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestTargetIDSetClone(t *testing.T) { + testCases := []struct { + set TargetIDSet + targetIDToAdd TargetID + }{ + {NewTargetIDSet(), TargetID{"1", "webhook"}}, + {NewTargetIDSet(TargetID{"1", "webhook"}), TargetID{"2", "webhook"}}, + {NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"}), TargetID{"2", "webhook"}}, + } + + for i, testCase := range testCases { + result := testCase.set.Clone() + + if !reflect.DeepEqual(result, testCase.set) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.set, result) + } + + result.add(testCase.targetIDToAdd) + if reflect.DeepEqual(result, testCase.set) { + t.Fatalf("test %v: result: expected: not equal, got: equal", i+1) + } + } +} + +func TestTargetIDSetUnion(t *testing.T) { + testCases := []struct { + set TargetIDSet + setToAdd TargetIDSet + expectedResult TargetIDSet + }{ + {NewTargetIDSet(), NewTargetIDSet(), NewTargetIDSet()}, + {NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"})}, + {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"})}, + {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"2", "amqp"}), NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"})}, + {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"})}, + } + + for i, testCase := range testCases { + result := testCase.set.Union(testCase.setToAdd) + + if !reflect.DeepEqual(testCase.expectedResult, result) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestTargetIDSetDifference(t *testing.T) { + testCases := []struct { + set TargetIDSet + setToRemove TargetIDSet + expectedResult TargetIDSet + }{ + {NewTargetIDSet(), NewTargetIDSet(), NewTargetIDSet()}, + {NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet()}, + {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(), NewTargetIDSet(TargetID{"1", "webhook"})}, + {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"2", "amqp"}), NewTargetIDSet(TargetID{"1", "webhook"})}, + {NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet(TargetID{"1", "webhook"}), NewTargetIDSet()}, + } + + for i, testCase := range testCases { + result := testCase.set.Difference(testCase.setToRemove) + + if !reflect.DeepEqual(testCase.expectedResult, result) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestNewTargetIDSet(t *testing.T) { + testCases := []struct { + targetIDs []TargetID + expectedResult TargetIDSet + }{ + {[]TargetID{}, NewTargetIDSet()}, + {[]TargetID{{"1", "webhook"}}, NewTargetIDSet(TargetID{"1", "webhook"})}, + {[]TargetID{{"1", "webhook"}, {"2", "amqp"}}, NewTargetIDSet(TargetID{"1", "webhook"}, TargetID{"2", "amqp"})}, + } + + for i, testCase := range testCases { + result := NewTargetIDSet(testCase.targetIDs...) + + if !reflect.DeepEqual(testCase.expectedResult, result) { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} diff --git a/pkg/event/targetlist.go b/pkg/event/targetlist.go new file mode 100644 index 000000000..35f5a4cb2 --- /dev/null +++ b/pkg/event/targetlist.go @@ -0,0 +1,127 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "fmt" + "sync" +) + +// Target - event target interface +type Target interface { + ID() TargetID + Send(Event) error + Close() error +} + +// TargetList - holds list of targets indexed by target ID. +type TargetList struct { + sync.RWMutex + targets map[TargetID]Target +} + +// Add - adds unique target to target list. +func (list *TargetList) Add(target Target) error { + list.Lock() + defer list.Unlock() + + if _, ok := list.targets[target.ID()]; ok { + return fmt.Errorf("target %v already exists", target.ID()) + } + + list.targets[target.ID()] = target + return nil +} + +// Exists - checks whether target by target ID exists or not. +func (list *TargetList) Exists(id TargetID) bool { + list.RLock() + defer list.RUnlock() + + _, found := list.targets[id] + return found +} + +// Remove - closes and removes targets by given target IDs. +func (list *TargetList) Remove(ids ...TargetID) map[TargetID]error { + list.Lock() + defer list.Unlock() + + errors := make(map[TargetID]error) + + var wg sync.WaitGroup + for _, id := range ids { + if target, ok := list.targets[id]; ok { + wg.Add(1) + go func(id TargetID, target Target) { + defer wg.Done() + if err := target.Close(); err != nil { + errors[id] = err + } + }(id, target) + } + } + wg.Wait() + + for _, id := range ids { + delete(list.targets, id) + } + + return errors +} + +// List - returns available target IDs. +func (list *TargetList) List() []TargetID { + list.RLock() + defer list.RUnlock() + + keys := []TargetID{} + for k := range list.targets { + keys = append(keys, k) + } + + return keys +} + +// Send - sends events to targets identified by target IDs. +func (list *TargetList) Send(event Event, targetIDs ...TargetID) map[TargetID]error { + list.Lock() + defer list.Unlock() + + errors := make(map[TargetID]error) + + var wg sync.WaitGroup + for _, id := range targetIDs { + if target, ok := list.targets[id]; ok { + wg.Add(1) + go func(id TargetID, target Target) { + defer wg.Done() + if err := target.Send(event); err != nil { + errors[id] = err + } + }(id, target) + } + } + wg.Wait() + + return errors +} + +// NewTargetList - creates TargetList. +func NewTargetList() *TargetList { + return &TargetList{targets: make(map[TargetID]Target)} +} diff --git a/pkg/event/targetlist_test.go b/pkg/event/targetlist_test.go new file mode 100644 index 000000000..56dc0d6dd --- /dev/null +++ b/pkg/event/targetlist_test.go @@ -0,0 +1,272 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package event + +import ( + "crypto/rand" + "errors" + "reflect" + "testing" + "time" +) + +type ExampleTarget struct { + id TargetID + sendErr bool + closeErr bool +} + +func (target ExampleTarget) ID() TargetID { + return target.id +} + +func (target ExampleTarget) Send(eventData Event) error { + b := make([]byte, 1) + if _, err := rand.Read(b); err != nil { + panic(err) + } + + time.Sleep(time.Duration(b[0]) * time.Millisecond) + + if target.sendErr { + return errors.New("send error") + } + + return nil +} + +func (target ExampleTarget) Close() error { + if target.closeErr { + return errors.New("close error") + } + + return nil +} + +func TestTargetListAdd(t *testing.T) { + targetListCase1 := NewTargetList() + + targetListCase2 := NewTargetList() + if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { + panic(err) + } + + targetListCase3 := NewTargetList() + if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { + panic(err) + } + + testCases := []struct { + targetList *TargetList + target Target + expectedResult []TargetID + expectErr bool + }{ + {targetListCase1, &ExampleTarget{TargetID{"1", "webhook"}, false, false}, []TargetID{{"1", "webhook"}}, false}, + {targetListCase2, &ExampleTarget{TargetID{"1", "webhook"}, false, false}, []TargetID{{"2", "testcase"}, {"1", "webhook"}}, false}, + {targetListCase3, &ExampleTarget{TargetID{"3", "testcase"}, false, false}, nil, true}, + } + + for i, testCase := range testCases { + err := testCase.targetList.Add(testCase.target) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + result := testCase.targetList.List() + + if len(result) != len(testCase.expectedResult) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + + for _, targetID1 := range result { + var found bool + for _, targetID2 := range testCase.expectedResult { + if reflect.DeepEqual(targetID1, targetID2) { + found = true + break + } + } + if !found { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } + } +} + +func TestTargetListExists(t *testing.T) { + targetListCase1 := NewTargetList() + + targetListCase2 := NewTargetList() + if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { + panic(err) + } + + targetListCase3 := NewTargetList() + if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { + panic(err) + } + + testCases := []struct { + targetList *TargetList + targetID TargetID + expectedResult bool + }{ + {targetListCase1, TargetID{"1", "webhook"}, false}, + {targetListCase2, TargetID{"1", "webhook"}, false}, + {targetListCase3, TargetID{"3", "testcase"}, true}, + } + + for i, testCase := range testCases { + result := testCase.targetList.Exists(testCase.targetID) + + if result != testCase.expectedResult { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestTargetListRemove(t *testing.T) { + targetListCase1 := NewTargetList() + + targetListCase2 := NewTargetList() + if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { + panic(err) + } + + targetListCase3 := NewTargetList() + if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, true}); err != nil { + panic(err) + } + + testCases := []struct { + targetList *TargetList + targetID TargetID + expectErr bool + }{ + {targetListCase1, TargetID{"1", "webhook"}, false}, + {targetListCase2, TargetID{"1", "webhook"}, false}, + {targetListCase3, TargetID{"3", "testcase"}, true}, + } + + for i, testCase := range testCases { + errors := testCase.targetList.Remove(testCase.targetID) + err := errors[testCase.targetID] + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestTargetListList(t *testing.T) { + targetListCase1 := NewTargetList() + + targetListCase2 := NewTargetList() + if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { + panic(err) + } + + targetListCase3 := NewTargetList() + if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { + panic(err) + } + if err := targetListCase3.Add(&ExampleTarget{TargetID{"1", "webhook"}, false, false}); err != nil { + panic(err) + } + + testCases := []struct { + targetList *TargetList + expectedResult []TargetID + }{ + {targetListCase1, []TargetID{}}, + {targetListCase2, []TargetID{{"2", "testcase"}}}, + {targetListCase3, []TargetID{{"3", "testcase"}, {"1", "webhook"}}}, + } + + for i, testCase := range testCases { + result := testCase.targetList.List() + + if len(result) != len(testCase.expectedResult) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + + for _, targetID1 := range result { + var found bool + for _, targetID2 := range testCase.expectedResult { + if reflect.DeepEqual(targetID1, targetID2) { + found = true + break + } + } + if !found { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } + } +} + +func TestTargetListSend(t *testing.T) { + targetListCase1 := NewTargetList() + + targetListCase2 := NewTargetList() + if err := targetListCase2.Add(&ExampleTarget{TargetID{"2", "testcase"}, false, false}); err != nil { + panic(err) + } + + targetListCase3 := NewTargetList() + if err := targetListCase3.Add(&ExampleTarget{TargetID{"3", "testcase"}, false, false}); err != nil { + panic(err) + } + + targetListCase4 := NewTargetList() + if err := targetListCase4.Add(&ExampleTarget{TargetID{"4", "testcase"}, true, false}); err != nil { + panic(err) + } + + testCases := []struct { + targetList *TargetList + targetID TargetID + expectErr bool + }{ + {targetListCase1, TargetID{"1", "webhook"}, false}, + {targetListCase2, TargetID{"1", "non-existent"}, false}, + {targetListCase3, TargetID{"3", "testcase"}, false}, + {targetListCase4, TargetID{"4", "testcase"}, true}, + } + + for i, testCase := range testCases { + errors := testCase.targetList.Send(Event{}, testCase.targetID) + err := errors[testCase.targetID] + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + } +} + +func TestNewTargetList(t *testing.T) { + if result := NewTargetList(); result == nil { + t.Fatalf("test: result: expected: , got: ") + } +} diff --git a/pkg/net/host.go b/pkg/net/host.go new file mode 100644 index 000000000..318c6c499 --- /dev/null +++ b/pkg/net/host.go @@ -0,0 +1,150 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net + +import ( + "encoding/json" + "errors" + "net" + "regexp" + "strings" +) + +var hostLabelRegexp = regexp.MustCompile("^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?$") + +// Host - holds network host IP/name and its port. +type Host struct { + Name string + Port Port + IsPortSet bool +} + +// IsEmpty - returns whether Host is empty or not +func (host Host) IsEmpty() bool { + return host.Name == "" +} + +// String - returns string representation of Host. +func (host Host) String() string { + if !host.IsPortSet { + return host.Name + } + + return host.Name + ":" + host.Port.String() +} + +// Equal - checks whether given host is equal or not. +func (host Host) Equal(compHost Host) bool { + return host.String() == compHost.String() +} + +// MarshalJSON - converts Host into JSON data +func (host Host) MarshalJSON() ([]byte, error) { + return json.Marshal(host.String()) +} + +// UnmarshalJSON - parses data into Host. +func (host *Host) UnmarshalJSON(data []byte) (err error) { + var s string + if err = json.Unmarshal(data, &s); err != nil { + return err + } + + // Allow empty string + if s == "" { + *host = Host{} + return nil + } + + var h *Host + if h, err = ParseHost(s); err != nil { + return err + } + + *host = *h + return nil +} + +// ParseHost - parses string into Host +func ParseHost(s string) (*Host, error) { + isValidHost := func(host string) bool { + if host == "" { + return false + } + + if ip := net.ParseIP(host); ip != nil { + return true + } + + // host is not a valid IPv4 or IPv6 address + // host may be a hostname + // refer https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names + // why checks are done like below + if len(host) < 1 || len(host) > 253 { + return false + } + + for _, label := range strings.Split(host, ".") { + if len(label) < 1 || len(label) > 63 { + return false + } + + if !hostLabelRegexp.MatchString(label) { + return false + } + } + + return true + } + + var port Port + var isPortSet bool + host, portStr, err := net.SplitHostPort(s) + if err != nil { + if !strings.Contains(err.Error(), "missing port in address") { + return nil, err + } + + host = s + portStr = "" + } else { + if port, err = ParsePort(portStr); err != nil { + return nil, err + } + + isPortSet = true + } + + if !isValidHost(host) { + return nil, errors.New("invalid hostname") + } + + return &Host{ + Name: host, + Port: port, + IsPortSet: isPortSet, + }, nil +} + +// MustParseHost - parses given string to Host, else panics. +func MustParseHost(s string) *Host { + host, err := ParseHost(s) + if err != nil { + panic(err) + } + return host +} diff --git a/pkg/net/host_test.go b/pkg/net/host_test.go new file mode 100644 index 000000000..21b94c345 --- /dev/null +++ b/pkg/net/host_test.go @@ -0,0 +1,236 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net + +import ( + "reflect" + "testing" +) + +func TestHostIsEmpty(t *testing.T) { + testCases := []struct { + host Host + expectedResult bool + }{ + {Host{"", 0, false}, true}, + {Host{"", 0, true}, true}, + {Host{"play", 9000, false}, false}, + {Host{"play", 9000, true}, false}, + } + + for i, testCase := range testCases { + result := testCase.host.IsEmpty() + + if result != testCase.expectedResult { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestHostString(t *testing.T) { + testCases := []struct { + host Host + expectedStr string + }{ + {Host{"", 0, false}, ""}, + {Host{"", 0, true}, ":0"}, + {Host{"play", 9000, false}, "play"}, + {Host{"play", 9000, true}, "play:9000"}, + } + + for i, testCase := range testCases { + str := testCase.host.String() + + if str != testCase.expectedStr { + t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedStr, str) + } + } +} + +func TestHostEqual(t *testing.T) { + testCases := []struct { + host Host + compHost Host + expectedResult bool + }{ + {Host{"", 0, false}, Host{"", 0, true}, false}, + {Host{"play", 9000, true}, Host{"play", 9000, false}, false}, + {Host{"", 0, true}, Host{"", 0, true}, true}, + {Host{"play", 9000, false}, Host{"play", 9000, false}, true}, + {Host{"play", 9000, true}, Host{"play", 9000, true}, true}, + } + + for i, testCase := range testCases { + result := testCase.host.Equal(testCase.compHost) + + if result != testCase.expectedResult { + t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestHostMarshalJSON(t *testing.T) { + testCases := []struct { + host Host + expectedData []byte + expectErr bool + }{ + {Host{}, []byte(`""`), false}, + {Host{"play", 0, false}, []byte(`"play"`), false}, + {Host{"play", 0, true}, []byte(`"play:0"`), false}, + {Host{"play", 9000, true}, []byte(`"play:9000"`), false}, + {Host{"play.minio.io", 0, false}, []byte(`"play.minio.io"`), false}, + {Host{"play.minio.io", 9000, true}, []byte(`"play.minio.io:9000"`), false}, + {Host{"147.75.201.93", 0, false}, []byte(`"147.75.201.93"`), false}, + {Host{"147.75.201.93", 9000, true}, []byte(`"147.75.201.93:9000"`), false}, + {Host{"play12", 0, false}, []byte(`"play12"`), false}, + {Host{"12play", 0, false}, []byte(`"12play"`), false}, + {Host{"play-minio-io", 0, false}, []byte(`"play-minio-io"`), false}, + {Host{"play--minio.io", 0, false}, []byte(`"play--minio.io"`), false}, + } + + for i, testCase := range testCases { + data, err := testCase.host.MarshalJSON() + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(data, testCase.expectedData) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) + } + } + } +} + +func TestHostUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedHost *Host + expectErr bool + }{ + {[]byte(`""`), &Host{}, false}, + {[]byte(`"play"`), &Host{"play", 0, false}, false}, + {[]byte(`"play:0"`), &Host{"play", 0, true}, false}, + {[]byte(`"play:9000"`), &Host{"play", 9000, true}, false}, + {[]byte(`"play.minio.io"`), &Host{"play.minio.io", 0, false}, false}, + {[]byte(`"play.minio.io:9000"`), &Host{"play.minio.io", 9000, true}, false}, + {[]byte(`"147.75.201.93"`), &Host{"147.75.201.93", 0, false}, false}, + {[]byte(`"147.75.201.93:9000"`), &Host{"147.75.201.93", 9000, true}, false}, + {[]byte(`"play12"`), &Host{"play12", 0, false}, false}, + {[]byte(`"12play"`), &Host{"12play", 0, false}, false}, + {[]byte(`"play-minio-io"`), &Host{"play-minio-io", 0, false}, false}, + {[]byte(`"play--minio.io"`), &Host{"play--minio.io", 0, false}, false}, + {[]byte(`":9000"`), nil, true}, + {[]byte(`"play:"`), nil, true}, + {[]byte(`"play::"`), nil, true}, + {[]byte(`"play:90000"`), nil, true}, + {[]byte(`"play:-10"`), nil, true}, + {[]byte(`"play-"`), nil, true}, + {[]byte(`"play.minio..io"`), nil, true}, + {[]byte(`":"`), nil, true}, + } + + for i, testCase := range testCases { + var host Host + err := host.UnmarshalJSON(testCase.data) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(&host, testCase.expectedHost) { + t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedHost, host) + } + } + } +} + +func TestParseHost(t *testing.T) { + testCases := []struct { + s string + expectedHost *Host + expectErr bool + }{ + {"play", &Host{"play", 0, false}, false}, + {"play:0", &Host{"play", 0, true}, false}, + {"play:9000", &Host{"play", 9000, true}, false}, + {"play.minio.io", &Host{"play.minio.io", 0, false}, false}, + {"play.minio.io:9000", &Host{"play.minio.io", 9000, true}, false}, + {"147.75.201.93", &Host{"147.75.201.93", 0, false}, false}, + {"147.75.201.93:9000", &Host{"147.75.201.93", 9000, true}, false}, + {"play12", &Host{"play12", 0, false}, false}, + {"12play", &Host{"12play", 0, false}, false}, + {"play-minio-io", &Host{"play-minio-io", 0, false}, false}, + {"play--minio.io", &Host{"play--minio.io", 0, false}, false}, + {":9000", nil, true}, + {"play:", nil, true}, + {"play::", nil, true}, + {"play:90000", nil, true}, + {"play:-10", nil, true}, + {"play-", nil, true}, + {"play.minio..io", nil, true}, + {":", nil, true}, + {"", nil, true}, + } + + for i, testCase := range testCases { + host, err := ParseHost(testCase.s) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(host, testCase.expectedHost) { + t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedHost, host) + } + } + } +} + +func TestMustParseHost(t *testing.T) { + testCases := []struct { + s string + expectedHost *Host + }{ + {"play", &Host{"play", 0, false}}, + {"play:0", &Host{"play", 0, true}}, + {"play:9000", &Host{"play", 9000, true}}, + {"play.minio.io", &Host{"play.minio.io", 0, false}}, + {"play.minio.io:9000", &Host{"play.minio.io", 9000, true}}, + {"147.75.201.93", &Host{"147.75.201.93", 0, false}}, + {"147.75.201.93:9000", &Host{"147.75.201.93", 9000, true}}, + {"play12", &Host{"play12", 0, false}}, + {"12play", &Host{"12play", 0, false}}, + {"play-minio-io", &Host{"play-minio-io", 0, false}}, + {"play--minio.io", &Host{"play--minio.io", 0, false}}, + } + + for i, testCase := range testCases { + host := MustParseHost(testCase.s) + + if !reflect.DeepEqual(host, testCase.expectedHost) { + t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedHost, host) + } + } +} diff --git a/pkg/net/port.go b/pkg/net/port.go new file mode 100644 index 000000000..08cfe7308 --- /dev/null +++ b/pkg/net/port.go @@ -0,0 +1,54 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net + +import ( + "errors" + "strconv" +) + +// Port - network port +type Port uint16 + +// String - returns string representation of port. +func (p Port) String() string { + return strconv.Itoa(int(p)) +} + +// ParsePort - parses string into Port +func ParsePort(s string) (p Port, err error) { + var i int + if i, err = strconv.Atoi(s); err != nil { + return p, errors.New("invalid port number") + } + + if i < 0 || i > 65535 { + return p, errors.New("port must be between 0 to 65535") + } + + return Port(i), nil +} + +// MustParsePort - parses string into Port, else panics +func MustParsePort(s string) Port { + p, err := ParsePort(s) + if err != nil { + panic(err) + } + + return p +} diff --git a/pkg/net/port_test.go b/pkg/net/port_test.go new file mode 100644 index 000000000..c13c32b80 --- /dev/null +++ b/pkg/net/port_test.go @@ -0,0 +1,92 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net + +import ( + "testing" +) + +func TestPortString(t *testing.T) { + testCases := []struct { + port Port + expectedStr string + }{ + {Port(0), "0"}, + {Port(9000), "9000"}, + {Port(65535), "65535"}, + {Port(1024), "1024"}, + } + + for i, testCase := range testCases { + str := testCase.port.String() + + if str != testCase.expectedStr { + t.Fatalf("test %v: error: port: %v, got: %v", i+1, testCase.expectedStr, str) + } + } +} + +func TestParsePort(t *testing.T) { + testCases := []struct { + s string + expectedPort Port + expectErr bool + }{ + {"0", Port(0), false}, + {"9000", Port(9000), false}, + {"65535", Port(65535), false}, + {"90000", Port(0), true}, + {"-10", Port(0), true}, + {"", Port(0), true}, + {"http", Port(0), true}, + {" 1024", Port(0), true}, + } + + for i, testCase := range testCases { + port, err := ParsePort(testCase.s) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if port != testCase.expectedPort { + t.Fatalf("test %v: error: port: %v, got: %v", i+1, testCase.expectedPort, port) + } + } + } +} + +func TestMustParsePort(t *testing.T) { + testCases := []struct { + s string + expectedPort Port + }{ + {"0", Port(0)}, + {"9000", Port(9000)}, + {"65535", Port(65535)}, + } + + for i, testCase := range testCases { + port := MustParsePort(testCase.s) + + if port != testCase.expectedPort { + t.Fatalf("test %v: error: port: %v, got: %v", i+1, testCase.expectedPort, port) + } + } +} diff --git a/pkg/net/url.go b/pkg/net/url.go new file mode 100644 index 000000000..b82db494b --- /dev/null +++ b/pkg/net/url.go @@ -0,0 +1,103 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net + +import ( + "encoding/json" + "errors" + "net/url" + "path" +) + +// URL - improved JSON friendly url.URL. +type URL url.URL + +// IsEmpty - checks URL is empty or not. +func (u URL) IsEmpty() bool { + return u.String() == "" +} + +// String - returns string representation of URL. +func (u URL) String() string { + // if port number 80 and 443, remove for http and https scheme respectively + if u.Host != "" { + host := MustParseHost(u.Host) + switch { + case u.Scheme == "http" && host.Port == 80: + fallthrough + case u.Scheme == "https" && host.Port == 443: + u.Host = host.Name + } + } + + uu := url.URL(u) + return uu.String() +} + +// MarshalJSON - converts to JSON string data. +func (u URL) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +// UnmarshalJSON - parses given data into URL. +func (u *URL) UnmarshalJSON(data []byte) (err error) { + var s string + if err = json.Unmarshal(data, &s); err != nil { + return err + } + + // Allow empty string + if s == "" { + *u = URL{} + return nil + } + + var ru *URL + if ru, err = ParseURL(s); err != nil { + return err + } + + *u = *ru + return nil +} + +// ParseURL - parses string into URL. +func ParseURL(s string) (u *URL, err error) { + var uu *url.URL + if uu, err = url.Parse(s); err != nil { + return nil, err + } + + if uu.Host == "" { + if uu.Scheme != "" { + return nil, errors.New("scheme appears with empty host") + } + } else if _, err = ParseHost(uu.Host); err != nil { + return nil, err + } + + // Clean path in the URL. + // Note: path.Clean() is used on purpose because in MS Windows filepath.Clean() converts + // `/` into `\` ie `/foo` becomes `\foo` + if uu.Path != "" { + uu.Path = path.Clean(uu.Path) + } + + v := URL(*uu) + u = &v + return u, nil +} diff --git a/pkg/net/url_test.go b/pkg/net/url_test.go new file mode 100644 index 000000000..9241935f4 --- /dev/null +++ b/pkg/net/url_test.go @@ -0,0 +1,167 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package net + +import ( + "reflect" + "testing" +) + +func TestURLIsEmpty(t *testing.T) { + testCases := []struct { + url URL + expectedResult bool + }{ + {URL{}, true}, + {URL{Scheme: "http", Host: "play"}, false}, + {URL{Path: "path/to/play"}, false}, + } + + for i, testCase := range testCases { + result := testCase.url.IsEmpty() + + if result != testCase.expectedResult { + t.Fatalf("test %v: result: expected: %v, got: %v", i+1, testCase.expectedResult, result) + } + } +} + +func TestURLString(t *testing.T) { + testCases := []struct { + url URL + expectedStr string + }{ + {URL{}, ""}, + {URL{Scheme: "http", Host: "play"}, "http://play"}, + {URL{Scheme: "https", Host: "play:443"}, "https://play"}, + {URL{Scheme: "https", Host: "play.minio.io:80"}, "https://play.minio.io:80"}, + {URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, "https://147.75.201.93:9000/"}, + {URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, "https://s3.amazonaws.com/?location"}, + {URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, "http://myminio:10000/mybucket/myobject"}, + {URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, "ftp://myftp.server:10000/myuser"}, + {URL{Path: "path/to/play"}, "path/to/play"}, + } + + for i, testCase := range testCases { + str := testCase.url.String() + + if str != testCase.expectedStr { + t.Fatalf("test %v: string: expected: %v, got: %v", i+1, testCase.expectedStr, str) + } + } +} + +func TestURLMarshalJSON(t *testing.T) { + testCases := []struct { + url URL + expectedData []byte + expectErr bool + }{ + {URL{}, []byte(`""`), false}, + {URL{Scheme: "http", Host: "play"}, []byte(`"http://play"`), false}, + {URL{Scheme: "https", Host: "play.minio.io:0"}, []byte(`"https://play.minio.io:0"`), false}, + {URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, []byte(`"https://147.75.201.93:9000/"`), false}, + {URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, []byte(`"https://s3.amazonaws.com/?location"`), false}, + {URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, []byte(`"http://myminio:10000/mybucket/myobject"`), false}, + {URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, []byte(`"ftp://myftp.server:10000/myuser"`), false}, + } + + for i, testCase := range testCases { + data, err := testCase.url.MarshalJSON() + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(data, testCase.expectedData) { + t.Fatalf("test %v: data: expected: %v, got: %v", i+1, string(testCase.expectedData), string(data)) + } + } + } +} + +func TestURLUnmarshalJSON(t *testing.T) { + testCases := []struct { + data []byte + expectedURL *URL + expectErr bool + }{ + {[]byte(`""`), &URL{}, false}, + {[]byte(`"http://play"`), &URL{Scheme: "http", Host: "play"}, false}, + {[]byte(`"https://play.minio.io:0"`), &URL{Scheme: "https", Host: "play.minio.io:0"}, false}, + {[]byte(`"https://147.75.201.93:9000/"`), &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false}, + {[]byte(`"https://s3.amazonaws.com/?location"`), &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false}, + {[]byte(`"http://myminio:10000/mybucket//myobject/"`), &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, false}, + {[]byte(`"ftp://myftp.server:10000/myuser"`), &URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, false}, + {[]byte(`"myserver:1000"`), nil, true}, + {[]byte(`"http://:1000/mybucket"`), nil, true}, + {[]byte(`"https://147.75.201.93:90000/"`), nil, true}, + {[]byte(`"http:/play"`), nil, true}, + } + + for i, testCase := range testCases { + var url URL + err := url.UnmarshalJSON(testCase.data) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(&url, testCase.expectedURL) { + t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedURL, url) + } + } + } +} + +func TestParseURL(t *testing.T) { + testCases := []struct { + s string + expectedURL *URL + expectErr bool + }{ + {"http://play", &URL{Scheme: "http", Host: "play"}, false}, + {"https://play.minio.io:0", &URL{Scheme: "https", Host: "play.minio.io:0"}, false}, + {"https://147.75.201.93:9000/", &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false}, + {"https://s3.amazonaws.com/?location", &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false}, + {"http://myminio:10000/mybucket//myobject/", &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, false}, + {"ftp://myftp.server:10000/myuser", &URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, false}, + {"myserver:1000", nil, true}, + {"http://:1000/mybucket", nil, true}, + {"https://147.75.201.93:90000/", nil, true}, + {"http:/play", nil, true}, + } + + for i, testCase := range testCases { + url, err := ParseURL(testCase.s) + expectErr := (err != nil) + + if expectErr != testCase.expectErr { + t.Fatalf("test %v: error: expected: %v, got: %v", i+1, testCase.expectErr, expectErr) + } + + if !testCase.expectErr { + if !reflect.DeepEqual(url, testCase.expectedURL) { + t.Fatalf("test %v: host: expected: %#v, got: %#v", i+1, testCase.expectedURL, url) + } + } + } +} diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index f2c2bc211..000000000 --- a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,66 +0,0 @@ -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/github.com/Sirupsen/logrus/LICENSE b/vendor/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f..000000000 --- a/vendor/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index ee8bd12be..000000000 --- a/vendor/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,386 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [Influxus] (http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB] (http://influxdata.com/) | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. - -Third party logging formatters: - -* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -logger, hook := NewNullLogger() -logger.Error("Hello error") - -assert.Equal(1, len(hook.Entries)) -assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) -assert.Equal("Hello error", hook.LastEntry().Message) - -hook.Reset() -assert.Nil(hook.LastEntry()) -``` diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go deleted file mode 100644 index dddd5f877..000000000 --- a/vendor/github.com/Sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/Sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/Sirupsen/logrus -*/ -package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 89e966e7b..000000000 --- a/vendor/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,264 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index 9a0120ac1..000000000 --- a/vendor/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,193 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index b5fbe934d..000000000 --- a/vendor/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,45 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - if t, ok := data["time"]; ok { - data["fields.time"] = t - } - - if m, ok := data["msg"]; ok { - data["fields.msg"] = m - } - - if l, ok := data["level"]; ok { - data["fields.level"] = l - } -} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc3..000000000 --- a/vendor/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5cf..000000000 --- a/vendor/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index 2fdb23176..000000000 --- a/vendor/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,212 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - return NewEntry(logger).WithError(err) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index e59669111..000000000 --- a/vendor/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,143 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 71f8d67a5..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40db..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index b343b3a37..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index 3e70bf7bf..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 0146845d1..000000000 --- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 6afd0e031..000000000 --- a/vendor/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,161 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys, timestampFormat) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return true - } - } - return false -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { - case string: - if !needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if !needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) - } - - b.WriteByte(' ') -} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index f74d2aa5f..000000000 --- a/vendor/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,53 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - return logger.WriterLevel(InfoLevel) -} - -func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { - reader, writer := io.Pipe() - - var printFunc func(args ...interface{}) - switch level { - case DebugLevel: - printFunc = logger.Debug - case InfoLevel: - printFunc = logger.Info - case WarnLevel: - printFunc = logger.Warn - case ErrorLevel: - printFunc = logger.Error - case FatalLevel: - printFunc = logger.Fatal - case PanicLevel: - printFunc = logger.Panic - default: - printFunc = logger.Print - } - - go logger.writerScanner(reader, printFunc) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - printFunc(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/vendor.json b/vendor/vendor.json index 9c384e9c3..367d40e00 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -80,11 +80,6 @@ "revision": "f6be1abbb5abd0517522f850dd785990d373da7e", "revisionTime": "2017-09-13T22:19:17Z" }, - { - "path": "github.com/Sirupsen/logrus", - "revision": "32055c351ea8b00b96d70f28db48d9840feaf0ec", - "revisionTime": "2016-07-12T20:17:32-04:00" - }, { "checksumSHA1": "tX0Bq1gzqskL98nnB1X2rDqxH18=", "path": "github.com/aliyun/aliyun-oss-go-sdk/oss",