Updated vendoring of AWS SDK.

Vendored github.com/aws/aws-sdk-go/aws/applicationautoscaling.
This commit is contained in:
Russell Jones 2020-10-30 21:42:30 +00:00 committed by Russell Jones
parent c623aa4dc5
commit e94e4b5147
56 changed files with 12914 additions and 1079 deletions

3
go.mod
View file

@ -13,7 +13,7 @@ require (
github.com/alecthomas/colour v0.1.0 // indirect
github.com/alecthomas/repr v0.0.0-20200325044227-4184120f674c // indirect
github.com/armon/go-radix v1.0.0
github.com/aws/aws-sdk-go v1.32.7
github.com/aws/aws-sdk-go v1.35.19
github.com/beevik/etree v1.1.0
github.com/boombuler/barcode v0.0.0-20161226211916-fe0f26ff6d26 // indirect
github.com/cjbassi/drawille-go v0.1.0 // indirect
@ -26,6 +26,7 @@ require (
github.com/fsouza/fake-gcs-server v1.11.6
github.com/ghodss/yaml v1.0.0
github.com/gizak/termui v0.0.0-20190224181052-63c2a0d70943
github.com/go-sql-driver/mysql v1.5.0 // indirect
github.com/gogo/protobuf v1.3.1
github.com/gokyle/hotp v0.0.0-20160218004637-c180d57d286b
github.com/golang/protobuf v1.4.2

7
go.sum
View file

@ -90,6 +90,10 @@ github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgI
github.com/aws/aws-sdk-go v1.17.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.32.7 h1:H4VgdCSF1cHw0VD8zGc98T1bGdACoLkh/vK2L6wgOUU=
github.com/aws/aws-sdk-go v1.32.7/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.35.18 h1:Gka1bopihF2e9XFhuVZPrgafmOFpCsRtAPMYLp/0AfA=
github.com/aws/aws-sdk-go v1.35.18/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/aws/aws-sdk-go v1.35.19 h1:vdIqQnOIqTNtvnOdt9r3Bf/FiCJ7KV/7O2BIj4TPx2w=
github.com/aws/aws-sdk-go v1.35.19/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/beevik/etree v1.1.0 h1:T0xke/WvNtMoCqgzPhkX2r4rjY3GDZFi+FjpRZY2Jbs=
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -351,6 +355,9 @@ github.com/jirfag/go-printf-func-name v0.0.0-20191110105641-45db9963cdd3/go.mod
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2yCeUc=
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/johannesboyne/gofakes3 v0.0.0-20191228161223-9aee1c78a252 h1:ZABLXRnnFNP5nkVzVBx2kQ/4GvSLUqcD2YUc+9Uc2Mo=

View file

@ -43,7 +43,7 @@ type Config struct {
// An optional endpoint URL (hostname only or fully qualified URI)
// that overrides the default generated endpoint for a client. Set this
// to `nil` to use the default generated endpoint.
// to `nil` or the value to `""` to use the default generated endpoint.
//
// Note: You must still provide a `Region` value when specifying an
// endpoint for a client.
@ -138,7 +138,7 @@ type Config struct {
// `ExpectContinueTimeout` for information on adjusting the continue wait
// timeout. https://golang.org/pkg/net/http/#Transport
//
// You should use this flag to disble 100-Continue if you experience issues
// You should use this flag to disable 100-Continue if you experience issues
// with proxies or third party S3 compatible services.
S3Disable100Continue *bool
@ -183,7 +183,7 @@ type Config struct {
//
// Example:
// sess := session.Must(session.NewSession(aws.NewConfig()
// .WithEC2MetadataDiableTimeoutOverride(true)))
// .WithEC2MetadataDisableTimeoutOverride(true)))
//
// svc := s3.New(sess)
//
@ -194,7 +194,7 @@ type Config struct {
// both IPv4 and IPv6 addressing.
//
// Setting this for a service which does not support dual stack will fail
// to make requets. It is not recommended to set this value on the session
// to make requests. It is not recommended to set this value on the session
// as it will apply to all service clients created with the session. Even
// services which don't support dual stack endpoints.
//

View file

@ -225,6 +225,8 @@ var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointH
if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" {
r.Error = aws.ErrMissingRegion
} else if r.ClientInfo.Endpoint == "" {
// Was any endpoint provided by the user, or one was derived by the
// SDK's endpoint resolver?
r.Error = aws.ErrMissingEndpoint
}
}}

View file

@ -50,7 +50,7 @@ package credentials
import (
"fmt"
"sync/atomic"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/awserr"
@ -173,7 +173,9 @@ type Expiry struct {
// the expiration time given to ensure no requests are made with expired
// tokens.
func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) {
e.expiration = expiration
// Passed in expirations should have the monotonic clock values stripped.
// This ensures time comparisons will be based on wall-time.
e.expiration = expiration.Round(0)
if window > 0 {
e.expiration = e.expiration.Add(-window)
}
@ -205,9 +207,10 @@ func (e *Expiry) ExpiresAt() time.Time {
// first instance of the credentials Value. All calls to Get() after that
// will return the cached credentials Value until IsExpired() returns true.
type Credentials struct {
creds atomic.Value
sf singleflight.Group
sf singleflight.Group
m sync.RWMutex
creds Value
provider Provider
}
@ -216,7 +219,6 @@ func NewCredentials(provider Provider) *Credentials {
c := &Credentials{
provider: provider,
}
c.creds.Store(Value{})
return c
}
@ -233,8 +235,17 @@ func NewCredentials(provider Provider) *Credentials {
//
// Passed in Context is equivalent to aws.Context, and context.Context.
func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
return curCreds.(Value), nil
// Check if credentials are cached, and not expired.
select {
case curCreds, ok := <-c.asyncIsExpired():
// ok will only be true, of the credentials were not expired. ok will
// be false and have no value if the credentials are expired.
if ok {
return curCreds, nil
}
case <-ctx.Done():
return Value{}, awserr.New("RequestCanceled",
"request context canceled", ctx.Err())
}
// Cannot pass context down to the actual retrieve, because the first
@ -252,18 +263,23 @@ func (c *Credentials) GetWithContext(ctx Context) (Value, error) {
}
}
func (c *Credentials) singleRetrieve(ctx Context) (creds interface{}, err error) {
if curCreds := c.creds.Load(); !c.isExpired(curCreds) {
return curCreds.(Value), nil
func (c *Credentials) singleRetrieve(ctx Context) (interface{}, error) {
c.m.Lock()
defer c.m.Unlock()
if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
return curCreds, nil
}
var creds Value
var err error
if p, ok := c.provider.(ProviderWithContext); ok {
creds, err = p.RetrieveWithContext(ctx)
} else {
creds, err = c.provider.Retrieve()
}
if err == nil {
c.creds.Store(creds)
c.creds = creds
}
return creds, err
@ -288,7 +304,10 @@ func (c *Credentials) Get() (Value, error) {
// This will override the Provider's expired state, and force Credentials
// to call the Provider's Retrieve().
func (c *Credentials) Expire() {
c.creds.Store(Value{})
c.m.Lock()
defer c.m.Unlock()
c.creds = Value{}
}
// IsExpired returns if the credentials are no longer valid, and need
@ -297,11 +316,32 @@ func (c *Credentials) Expire() {
// If the Credentials were forced to be expired with Expire() this will
// reflect that override.
func (c *Credentials) IsExpired() bool {
return c.isExpired(c.creds.Load())
c.m.RLock()
defer c.m.RUnlock()
return c.isExpiredLocked(c.creds)
}
// isExpired helper method wrapping the definition of expired credentials.
func (c *Credentials) isExpired(creds interface{}) bool {
// asyncIsExpired returns a channel of credentials Value. If the channel is
// closed the credentials are expired and credentials value are not empty.
func (c *Credentials) asyncIsExpired() <-chan Value {
ch := make(chan Value, 1)
go func() {
c.m.RLock()
defer c.m.RUnlock()
if curCreds := c.creds; !c.isExpiredLocked(curCreds) {
ch <- curCreds
}
close(ch)
}()
return ch
}
// isExpiredLocked helper method wrapping the definition of expired credentials.
func (c *Credentials) isExpiredLocked(creds interface{}) bool {
return creds == nil || creds.(Value) == Value{} || c.provider.IsExpired()
}
@ -309,13 +349,17 @@ func (c *Credentials) isExpired(creds interface{}) bool {
// the underlying Provider, if it supports that interface. Otherwise, it returns
// an error.
func (c *Credentials) ExpiresAt() (time.Time, error) {
c.m.RLock()
defer c.m.RUnlock()
expirer, ok := c.provider.(Expirer)
if !ok {
return time.Time{}, awserr.New("ProviderNotExpirer",
fmt.Sprintf("provider %s does not support ExpiresAt()", c.creds.Load().(Value).ProviderName),
fmt.Sprintf("provider %s does not support ExpiresAt()",
c.creds.ProviderName),
nil)
}
if c.creds.Load().(Value) == (Value{}) {
if c.creds == (Value{}) {
// set expiration time to the distant past
return time.Time{}, nil
}

View file

@ -52,9 +52,21 @@ type WebIdentityRoleProvider struct {
credentials.Expiry
PolicyArns []*sts.PolicyDescriptorType
client stsiface.STSAPI
// Duration the STS credentials will be valid for. Truncated to seconds.
// If unset, the assumed role will use AssumeRoleWithWebIdentity's default
// expiry duration. See
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#STS.AssumeRoleWithWebIdentity
// for more information.
Duration time.Duration
// The amount of time the credentials will be refreshed before they expire.
// This is useful refresh credentials before they expire to reduce risk of
// using credentials as they expire. If unset, will default to no expiry
// window.
ExpiryWindow time.Duration
client stsiface.STSAPI
tokenFetcher TokenFetcher
roleARN string
roleSessionName string
@ -107,11 +119,18 @@ func (p *WebIdentityRoleProvider) RetrieveWithContext(ctx credentials.Context) (
// uses unix time in nanoseconds to uniquely identify sessions.
sessionName = strconv.FormatInt(now().UnixNano(), 10)
}
var duration *int64
if p.Duration != 0 {
duration = aws.Int64(int64(p.Duration / time.Second))
}
req, resp := p.client.AssumeRoleWithWebIdentityRequest(&sts.AssumeRoleWithWebIdentityInput{
PolicyArns: p.PolicyArns,
RoleArn: &p.roleARN,
RoleSessionName: &sessionName,
WebIdentityToken: aws.String(string(b)),
DurationSeconds: duration,
})
req.SetContext(ctx)

View file

@ -20,7 +20,7 @@ func (c *EC2Metadata) getToken(ctx aws.Context, duration time.Duration) (tokenOu
op := &request.Operation{
Name: "GetToken",
HTTPMethod: "PUT",
HTTPPath: "/api/token",
HTTPPath: "/latest/api/token",
}
var output tokenOutput
@ -62,7 +62,7 @@ func (c *EC2Metadata) GetMetadataWithContext(ctx aws.Context, p string) (string,
op := &request.Operation{
Name: "GetMetadata",
HTTPMethod: "GET",
HTTPPath: sdkuri.PathJoin("/meta-data", p),
HTTPPath: sdkuri.PathJoin("/latest/meta-data", p),
}
output := &metadataOutput{}
@ -88,7 +88,7 @@ func (c *EC2Metadata) GetUserDataWithContext(ctx aws.Context) (string, error) {
op := &request.Operation{
Name: "GetUserData",
HTTPMethod: "GET",
HTTPPath: "/user-data",
HTTPPath: "/latest/user-data",
}
output := &metadataOutput{}
@ -113,7 +113,7 @@ func (c *EC2Metadata) GetDynamicDataWithContext(ctx aws.Context, p string) (stri
op := &request.Operation{
Name: "GetDynamicData",
HTTPMethod: "GET",
HTTPPath: sdkuri.PathJoin("/dynamic", p),
HTTPPath: sdkuri.PathJoin("/latest/dynamic", p),
}
output := &metadataOutput{}

View file

@ -5,6 +5,10 @@
// variable "AWS_EC2_METADATA_DISABLED=true". This environment variable set to
// true instructs the SDK to disable the EC2 Metadata client. The client cannot
// be used while the environment variable is set to true, (case insensitive).
//
// The endpoint of the EC2 IMDS client can be configured via the environment
// variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
// Session. See aws/session#Options.EC2IMDSEndpoint for more details.
package ec2metadata
import (
@ -12,6 +16,7 @@ import (
"errors"
"io"
"net/http"
"net/url"
"os"
"strconv"
"strings"
@ -41,7 +46,7 @@ const (
enableTokenProviderHandlerName = "enableTokenProviderHandler"
// TTL constants
defaultTTL = 21600 * time.Second
defaultTTL = 21600 * time.Second
ttlExpirationWindow = 30 * time.Second
)
@ -69,6 +74,9 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata {
// a client when not using a session. Generally using just New with a session
// is preferred.
//
// Will remove the URL path from the endpoint provided to ensure the EC2 IMDS
// client is able to communicate with the EC2 IMDS API.
//
// If an unmodified HTTP client is provided from the stdlib default, or no client
// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened.
// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default.
@ -86,6 +94,15 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio
cfg.MaxRetries = aws.Int(2)
}
if u, err := url.Parse(endpoint); err == nil {
// Remove path from the endpoint since it will be added by requests.
// This is an artifact of the SDK adding `/latest` to the endpoint for
// EC2 IMDS, but this is now moved to the operation definition.
u.Path = ""
u.RawPath = ""
endpoint = u.String()
}
svc := &EC2Metadata{
Client: client.New(
cfg,

View file

@ -87,6 +87,7 @@ func (t *tokenProvider) enableTokenProviderHandler(r *request.Request) {
// If the error code status is 401, we enable the token provider
if e, ok := r.Error.(awserr.RequestFailure); ok && e != nil &&
e.StatusCode() == http.StatusUnauthorized {
t.token.Store(ec2Token{})
atomic.StoreUint32(&t.disabled, 0)
}
}

File diff suppressed because it is too large Load diff

View file

@ -9,7 +9,8 @@ func isErrConnectionReset(err error) bool {
return false
}
if strings.Contains(err.Error(), "connection reset") ||
if strings.Contains(err.Error(), "use of closed network connection") ||
strings.Contains(err.Error(), "connection reset") ||
strings.Contains(err.Error(), "broken pipe") {
return true
}

View file

@ -241,5 +241,22 @@ over the AWS_CA_BUNDLE environment variable, and will be used if both are set.
Setting a custom HTTPClient in the aws.Config options will override this setting.
To use this option and custom HTTP client, the HTTP client needs to be provided
when creating the session. Not the service client.
The endpoint of the EC2 IMDS client can be configured via the environment
variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a
Session. See Options.EC2IMDSEndpoint for more details.
AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
If using an URL with an IPv6 address literal, the IPv6 address
component must be enclosed in square brackets.
AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
The custom EC2 IMDS endpoint can also be specified via the Session options.
sess, err := session.NewSessionWithOptions(session.Options{
EC2IMDSEndpoint: "http://[::1]",
})
*/
package session

View file

@ -148,6 +148,11 @@ type envConfig struct {
//
// AWS_S3_USE_ARN_REGION=true
S3UseARNRegion bool
// Specifies the alternative endpoint to use for EC2 IMDS.
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
EC2IMDSEndpoint string
}
var (
@ -211,6 +216,9 @@ var (
s3UseARNRegionEnvKey = []string{
"AWS_S3_USE_ARN_REGION",
}
ec2IMDSEndpointEnvKey = []string{
"AWS_EC2_METADATA_SERVICE_ENDPOINT",
}
)
// loadEnvConfig retrieves the SDK's environment configuration.
@ -332,6 +340,8 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) {
}
}
setFromEnvVal(&cfg.EC2IMDSEndpoint, ec2IMDSEndpointEnvKey)
return cfg, nil
}

View file

@ -48,6 +48,8 @@ var ErrSharedConfigInvalidCredSource = awserr.New(ErrCodeSharedConfig, "credenti
type Session struct {
Config *aws.Config
Handlers request.Handlers
options Options
}
// New creates a new instance of the handlers merging in the provided configs
@ -99,7 +101,7 @@ func New(cfgs ...*aws.Config) *Session {
return s
}
s := deprecatedNewSession(cfgs...)
s := deprecatedNewSession(envCfg, cfgs...)
if envErr != nil {
msg := "failed to load env config"
s.logDeprecatedNewSessionError(msg, envErr, cfgs)
@ -243,6 +245,23 @@ type Options struct {
// function to initialize this value before changing the handlers to be
// used by the SDK.
Handlers request.Handlers
// Allows specifying a custom endpoint to be used by the EC2 IMDS client
// when making requests to the EC2 IMDS API. The must endpoint value must
// include protocol prefix.
//
// If unset, will the EC2 IMDS client will use its default endpoint.
//
// Can also be specified via the environment variable,
// AWS_EC2_METADATA_SERVICE_ENDPOINT.
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://169.254.169.254
//
// If using an URL with an IPv6 address literal, the IPv6 address
// component must be enclosed in square brackets.
//
// AWS_EC2_METADATA_SERVICE_ENDPOINT=http://[::1]
EC2IMDSEndpoint string
}
// NewSessionWithOptions returns a new Session created from SDK defaults, config files,
@ -329,7 +348,25 @@ func Must(sess *Session, err error) *Session {
return sess
}
func deprecatedNewSession(cfgs ...*aws.Config) *Session {
// Wraps the endpoint resolver with a resolver that will return a custom
// endpoint for EC2 IMDS.
func wrapEC2IMDSEndpoint(resolver endpoints.Resolver, endpoint string) endpoints.Resolver {
return endpoints.ResolverFunc(
func(service, region string, opts ...func(*endpoints.Options)) (
endpoints.ResolvedEndpoint, error,
) {
if service == ec2MetadataServiceID {
return endpoints.ResolvedEndpoint{
URL: endpoint,
SigningName: ec2MetadataServiceID,
SigningRegion: region,
}, nil
}
return resolver.EndpointFor(service, region)
})
}
func deprecatedNewSession(envCfg envConfig, cfgs ...*aws.Config) *Session {
cfg := defaults.Config()
handlers := defaults.Handlers()
@ -341,6 +378,11 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
// endpoints for service client configurations.
cfg.EndpointResolver = endpoints.DefaultResolver()
}
if len(envCfg.EC2IMDSEndpoint) != 0 {
cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, envCfg.EC2IMDSEndpoint)
}
cfg.Credentials = defaults.CredChain(cfg, handlers)
// Reapply any passed in configs to override credentials if set
@ -349,6 +391,9 @@ func deprecatedNewSession(cfgs ...*aws.Config) *Session {
s := &Session{
Config: cfg,
Handlers: handlers,
options: Options{
EC2IMDSEndpoint: envCfg.EC2IMDSEndpoint,
},
}
initHandlers(s)
@ -418,6 +463,7 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session,
s := &Session{
Config: cfg,
Handlers: handlers,
options: opts,
}
initHandlers(s)
@ -570,6 +616,14 @@ func mergeConfigSrcs(cfg, userCfg *aws.Config,
endpoints.LegacyS3UsEast1Endpoint,
})
ec2IMDSEndpoint := sessOpts.EC2IMDSEndpoint
if len(ec2IMDSEndpoint) == 0 {
ec2IMDSEndpoint = envCfg.EC2IMDSEndpoint
}
if len(ec2IMDSEndpoint) != 0 {
cfg.EndpointResolver = wrapEC2IMDSEndpoint(cfg.EndpointResolver, ec2IMDSEndpoint)
}
// Configure credentials if not already set by the user when creating the
// Session.
if cfg.Credentials == credentials.AnonymousCredentials && userCfg.Credentials == nil {
@ -627,6 +681,7 @@ func (s *Session) Copy(cfgs ...*aws.Config) *Session {
newSession := &Session{
Config: s.Config.Copy(cfgs...),
Handlers: s.Handlers.Copy(),
options: s.options,
}
initHandlers(newSession)
@ -665,6 +720,8 @@ func (s *Session) ClientConfig(service string, cfgs ...*aws.Config) client.Confi
}
}
const ec2MetadataServiceID = "ec2metadata"
func (s *Session) resolveEndpoint(service, region string, cfg *aws.Config) (endpoints.ResolvedEndpoint, error) {
if ep := aws.StringValue(cfg.Endpoint); len(ep) != 0 {

View file

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.32.7"
const SDKVersion = "1.35.19"

View file

@ -63,9 +63,10 @@ var parseTable = map[ASTKind]map[TokenType]int{
TokenNone: MarkCompleteState,
},
ASTKindEqualExpr: map[TokenType]int{
TokenLit: ValueState,
TokenWS: SkipTokenState,
TokenNL: SkipState,
TokenLit: ValueState,
TokenWS: SkipTokenState,
TokenNL: SkipState,
TokenNone: SkipState,
},
ASTKindStatement: map[TokenType]int{
TokenLit: SectionState,

View file

@ -19,23 +19,28 @@ func (a AccessPointARN) GetARN() arn.ARN {
// ParseAccessPointResource attempts to parse the ARN's resource as an
// AccessPoint resource.
//
// Supported Access point resource format:
// - Access point format: arn:{partition}:s3:{region}:{accountId}:accesspoint/{accesspointName}
// - example: arn.aws.s3.us-west-2.012345678901:accesspoint/myaccesspoint
//
func ParseAccessPointResource(a arn.ARN, resParts []string) (AccessPointARN, error) {
if len(a.Region) == 0 {
return AccessPointARN{}, InvalidARNError{a, "region not set"}
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "region not set"}
}
if len(a.AccountID) == 0 {
return AccessPointARN{}, InvalidARNError{a, "account-id not set"}
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "account-id not set"}
}
if len(resParts) == 0 {
return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
}
if len(resParts) > 1 {
return AccessPointARN{}, InvalidARNError{a, "sub resource not supported"}
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
}
resID := resParts[0]
if len(strings.TrimSpace(resID)) == 0 {
return AccessPointARN{}, InvalidARNError{a, "resource-id not set"}
return AccessPointARN{}, InvalidARNError{ARN: a, Reason: "resource-id not set"}
}
return AccessPointARN{

View file

@ -1,6 +1,7 @@
package arn
import (
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws/arn"
@ -25,13 +26,14 @@ func ParseResource(s string, resParser ResourceParser) (resARN Resource, err err
}
if len(a.Partition) == 0 {
return nil, InvalidARNError{a, "partition not set"}
return nil, InvalidARNError{ARN: a, Reason: "partition not set"}
}
if a.Service != "s3" {
return nil, InvalidARNError{a, "service is not S3"}
if a.Service != "s3" && a.Service != "s3-outposts" {
return nil, InvalidARNError{ARN: a, Reason: "service is not supported"}
}
if len(a.Resource) == 0 {
return nil, InvalidARNError{a, "resource not set"}
return nil, InvalidARNError{ARN: a, Reason: "resource not set"}
}
return resParser(a)
@ -66,6 +68,7 @@ type InvalidARNError struct {
Reason string
}
// Error returns a string denoting the occurred InvalidARNError
func (e InvalidARNError) Error() string {
return "invalid Amazon S3 ARN, " + e.Reason + ", " + e.ARN.String()
return fmt.Sprintf("invalid Amazon %s ARN, %s, %s", e.ARN.Service, e.Reason, e.ARN.String())
}

View file

@ -0,0 +1,126 @@
package arn
import (
"strings"
"github.com/aws/aws-sdk-go/aws/arn"
)
// OutpostARN interface that should be satisfied by outpost ARNs
type OutpostARN interface {
Resource
GetOutpostID() string
}
// ParseOutpostARNResource will parse a provided ARNs resource using the appropriate ARN format
// and return a specific OutpostARN type
//
// Currently supported outpost ARN formats:
// * Outpost AccessPoint ARN format:
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
//
// * Outpost Bucket ARN format:
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/bucket/{bucketName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/bucket/mybucket
//
// Other outpost ARN formats may be supported and added in the future.
//
func ParseOutpostARNResource(a arn.ARN, resParts []string) (OutpostARN, error) {
if len(a.Region) == 0 {
return nil, InvalidARNError{ARN: a, Reason: "region not set"}
}
if len(a.AccountID) == 0 {
return nil, InvalidARNError{ARN: a, Reason: "account-id not set"}
}
// verify if outpost id is present and valid
if len(resParts) == 0 || len(strings.TrimSpace(resParts[0])) == 0 {
return nil, InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
}
// verify possible resource type exists
if len(resParts) < 3 {
return nil, InvalidARNError{
ARN: a, Reason: "incomplete outpost resource type. Expected bucket or access-point resource to be present",
}
}
// Since we know this is a OutpostARN fetch outpostID
outpostID := strings.TrimSpace(resParts[0])
switch resParts[1] {
case "accesspoint":
accesspointARN, err := ParseAccessPointResource(a, resParts[2:])
if err != nil {
return OutpostAccessPointARN{}, err
}
return OutpostAccessPointARN{
AccessPointARN: accesspointARN,
OutpostID: outpostID,
}, nil
case "bucket":
bucketName, err := parseBucketResource(a, resParts[2:])
if err != nil {
return nil, err
}
return OutpostBucketARN{
ARN: a,
BucketName: bucketName,
OutpostID: outpostID,
}, nil
default:
return nil, InvalidARNError{ARN: a, Reason: "unknown resource set for outpost ARN"}
}
}
// OutpostAccessPointARN represents outpost access point ARN.
type OutpostAccessPointARN struct {
AccessPointARN
OutpostID string
}
// GetOutpostID returns the outpost id of outpost access point arn
func (o OutpostAccessPointARN) GetOutpostID() string {
return o.OutpostID
}
// OutpostBucketARN represents the outpost bucket ARN.
type OutpostBucketARN struct {
arn.ARN
BucketName string
OutpostID string
}
// GetOutpostID returns the outpost id of outpost bucket arn
func (o OutpostBucketARN) GetOutpostID() string {
return o.OutpostID
}
// GetARN retrives the base ARN from outpost bucket ARN resource
func (o OutpostBucketARN) GetARN() arn.ARN {
return o.ARN
}
// parseBucketResource attempts to parse the ARN's bucket resource and retrieve the
// bucket resource id.
//
// parseBucketResource only parses the bucket resource id.
//
func parseBucketResource(a arn.ARN, resParts []string) (bucketName string, err error) {
if len(resParts) == 0 {
return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
}
if len(resParts) > 1 {
return bucketName, InvalidARNError{ARN: a, Reason: "sub resource not supported"}
}
bucketName = strings.TrimSpace(resParts[0])
if len(bucketName) == 0 {
return bucketName, InvalidARNError{ARN: a, Reason: "bucket resource-id not set"}
}
return bucketName, err
}

View file

@ -0,0 +1,189 @@
package s3shared
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
)
const (
invalidARNErrorErrCode = "InvalidARNError"
configurationErrorErrCode = "ConfigurationError"
)
// InvalidARNError denotes the error for Invalid ARN
type InvalidARNError struct {
message string
resource arn.Resource
origErr error
}
// Error returns the InvalidARNError
func (e InvalidARNError) Error() string {
var extra string
if e.resource != nil {
extra = "ARN: " + e.resource.String()
}
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
}
// Code returns the invalid ARN error code
func (e InvalidARNError) Code() string {
return invalidARNErrorErrCode
}
// Message returns the message for Invalid ARN error
func (e InvalidARNError) Message() string {
return e.message
}
// OrigErr is the original error wrapped by Invalid ARN Error
func (e InvalidARNError) OrigErr() error {
return e.origErr
}
// NewInvalidARNError denotes invalid arn error
func NewInvalidARNError(resource arn.Resource, err error) InvalidARNError {
return InvalidARNError{
message: "invalid ARN",
origErr: err,
resource: resource,
}
}
// NewInvalidARNWithCustomEndpointError ARN not supported for custom clients endpoints
func NewInvalidARNWithCustomEndpointError(resource arn.Resource, err error) InvalidARNError {
return InvalidARNError{
message: "resource ARN not supported with custom client endpoints",
origErr: err,
resource: resource,
}
}
// NewInvalidARNWithUnsupportedPartitionError ARN not supported for the target partition
func NewInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) InvalidARNError {
return InvalidARNError{
message: "resource ARN not supported for the target ARN partition",
origErr: err,
resource: resource,
}
}
// NewInvalidARNWithFIPSError ARN not supported for FIPS region
func NewInvalidARNWithFIPSError(resource arn.Resource, err error) InvalidARNError {
return InvalidARNError{
message: "resource ARN not supported for FIPS region",
resource: resource,
origErr: err,
}
}
// ConfigurationError is used to denote a client configuration error
type ConfigurationError struct {
message string
resource arn.Resource
clientPartitionID string
clientRegion string
origErr error
}
// Error returns the Configuration error string
func (e ConfigurationError) Error() string {
extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
e.resource, e.clientPartitionID, e.clientRegion)
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
}
// Code returns configuration error's error-code
func (e ConfigurationError) Code() string {
return configurationErrorErrCode
}
// Message returns the configuration error message
func (e ConfigurationError) Message() string {
return e.message
}
// OrigErr is the original error wrapped by Configuration Error
func (e ConfigurationError) OrigErr() error {
return e.origErr
}
// NewClientPartitionMismatchError stub
func NewClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
return ConfigurationError{
message: "client partition does not match provided ARN partition",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
// NewClientRegionMismatchError denotes cross region access error
func NewClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
return ConfigurationError{
message: "client region does not match provided ARN region",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
// NewFailedToResolveEndpointError denotes endpoint resolving error
func NewFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
return ConfigurationError{
message: "endpoint resolver failed to find an endpoint for the provided ARN region",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
// NewClientConfiguredForFIPSError denotes client config error for unsupported cross region FIPS access
func NewClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
return ConfigurationError{
message: "client configured for fips but cross-region resource ARN provided",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
// NewClientConfiguredForAccelerateError denotes client config error for unsupported S3 accelerate
func NewClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
return ConfigurationError{
message: "client configured for S3 Accelerate but is not supported with resource ARN",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
// NewClientConfiguredForCrossRegionFIPSError denotes client config error for unsupported cross region FIPS request
func NewClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
return ConfigurationError{
message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
// NewClientConfiguredForDualStackError denotes client config error for unsupported S3 Dual-stack
func NewClientConfiguredForDualStackError(resource arn.Resource, clientPartitionID, clientRegion string, err error) ConfigurationError {
return ConfigurationError{
message: "client configured for S3 Dual-stack but is not supported with resource ARN",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}

View file

@ -0,0 +1,62 @@
package s3shared
import (
"strings"
"github.com/aws/aws-sdk-go/aws"
awsarn "github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
)
// ResourceRequest represents the request and arn resource
type ResourceRequest struct {
Resource arn.Resource
Request *request.Request
}
// ARN returns the resource ARN
func (r ResourceRequest) ARN() awsarn.ARN {
return r.Resource.GetARN()
}
// AllowCrossRegion returns a bool value to denote if S3UseARNRegion flag is set
func (r ResourceRequest) AllowCrossRegion() bool {
return aws.BoolValue(r.Request.Config.S3UseARNRegion)
}
// UseFIPS returns true if request config region is FIPS
func (r ResourceRequest) UseFIPS() bool {
return IsFIPS(aws.StringValue(r.Request.Config.Region))
}
// ResourceConfiguredForFIPS returns true if resource ARNs region is FIPS
func (r ResourceRequest) ResourceConfiguredForFIPS() bool {
return IsFIPS(r.ARN().Region)
}
// IsCrossPartition returns true if client is configured for another partition, than
// the partition that resource ARN region resolves to.
func (r ResourceRequest) IsCrossPartition() bool {
return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition
}
// IsCrossRegion returns true if ARN region is different than client configured region
func (r ResourceRequest) IsCrossRegion() bool {
return IsCrossRegion(r.Request, r.Resource.GetARN().Region)
}
// HasCustomEndpoint returns true if custom client endpoint is provided
func (r ResourceRequest) HasCustomEndpoint() bool {
return len(aws.StringValue(r.Request.Config.Endpoint)) > 0
}
// IsFIPS returns true if region is a fips region
func IsFIPS(clientRegion string) bool {
return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips")
}
// IsCrossRegion returns true if request signing region is not same as configured region
func IsCrossRegion(req *request.Request, otherRegion string) bool {
return req.ClientInfo.SigningRegion != otherRegion
}

View file

@ -69,10 +69,23 @@ func (r *EventReader) ReadEvent() (event interface{}, err error) {
case ErrorMessageType:
return nil, r.unmarshalErrorMessage(msg)
default:
return nil, fmt.Errorf("unknown eventstream message type, %v", typ)
return nil, &UnknownMessageTypeError{
Type: typ, Message: msg.Clone(),
}
}
}
// UnknownMessageTypeError provides an error when a message is received from
// the stream, but the reader is unable to determine what kind of message it is.
type UnknownMessageTypeError struct {
Type string
Message eventstream.Message
}
func (e *UnknownMessageTypeError) Error() string {
return "unknown eventstream message type, " + e.Type
}
func (r *EventReader) unmarshalEventMessage(
msg eventstream.Message,
) (event interface{}, err error) {

View file

@ -52,6 +52,15 @@ func (hs *Headers) Del(name string) {
}
}
// Clone returns a deep copy of the headers
func (hs Headers) Clone() Headers {
o := make(Headers, 0, len(hs))
for _, h := range hs {
o.Set(h.Name, h.Value)
}
return o
}
func decodeHeaders(r io.Reader) (Headers, error) {
hs := Headers{}

View file

@ -57,6 +57,20 @@ func (m *Message) rawMessage() (rawMessage, error) {
return raw, nil
}
// Clone returns a deep copy of the message.
func (m Message) Clone() Message {
var payload []byte
if m.Payload != nil {
payload = make([]byte, len(m.Payload))
copy(payload, m.Payload)
}
return Message{
Headers: m.Headers.Clone(),
Payload: payload,
}
}
type messagePrelude struct {
Length uint32
HeadersLen uint32

View file

@ -6,6 +6,7 @@ import (
"encoding/json"
"fmt"
"io"
"math/big"
"reflect"
"strings"
"time"
@ -15,6 +16,8 @@ import (
"github.com/aws/aws-sdk-go/private/protocol"
)
var millisecondsFloat = new(big.Float).SetInt64(1e3)
// UnmarshalJSONError unmarshal's the reader's JSON document into the passed in
// type. The value to unmarshal the json document into must be a pointer to the
// type.
@ -39,7 +42,9 @@ func UnmarshalJSONError(v interface{}, stream io.Reader) error {
func UnmarshalJSON(v interface{}, stream io.Reader) error {
var out interface{}
err := json.NewDecoder(stream).Decode(&out)
decoder := json.NewDecoder(stream)
decoder.UseNumber()
err := decoder.Decode(&out)
if err == io.EOF {
return nil
} else if err != nil {
@ -54,7 +59,9 @@ func UnmarshalJSON(v interface{}, stream io.Reader) error {
func UnmarshalJSONCaseInsensitive(v interface{}, stream io.Reader) error {
var out interface{}
err := json.NewDecoder(stream).Decode(&out)
decoder := json.NewDecoder(stream)
decoder.UseNumber()
err := decoder.Decode(&out)
if err == io.EOF {
return nil
} else if err != nil {
@ -254,16 +261,31 @@ func (u unmarshaler) unmarshalScalar(value reflect.Value, data interface{}, tag
default:
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())
}
case float64:
case json.Number:
switch value.Interface().(type) {
case *int64:
di := int64(d)
// Retain the old behavior where we would just truncate the float64
// calling d.Int64() here could cause an invalid syntax error due to the usage of strconv.ParseInt
f, err := d.Float64()
if err != nil {
return err
}
di := int64(f)
value.Set(reflect.ValueOf(&di))
case *float64:
value.Set(reflect.ValueOf(&d))
f, err := d.Float64()
if err != nil {
return err
}
value.Set(reflect.ValueOf(&f))
case *time.Time:
// Time unmarshaled from a float64 can only be epoch seconds
t := time.Unix(int64(d), 0).UTC()
float, ok := new(big.Float).SetString(d.String())
if !ok {
return fmt.Errorf("unsupported float time representation: %v", d.String())
}
float = float.Mul(float, millisecondsFloat)
ms, _ := float.Int64()
t := time.Unix(0, ms*1e6).UTC()
value.Set(reflect.ValueOf(&t))
default:
return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type())

View file

@ -27,8 +27,8 @@ const (
// RFC3339 a subset of the ISO8601 timestamp format. e.g 2014-04-29T18:30:38Z
ISO8601TimeFormat = "2006-01-02T15:04:05.999999999Z"
// This format is used for output time without seconds precision
ISO8601OutputTimeFormat = "2006-01-02T15:04:05Z"
// This format is used for output time with fractional second precision up to milliseconds
ISO8601OutputTimeFormat = "2006-01-02T15:04:05.999999999Z"
)
// IsKnownTimestampFormat returns if the timestamp format name
@ -48,7 +48,7 @@ func IsKnownTimestampFormat(name string) bool {
// FormatTime returns a string value of the time.
func FormatTime(name string, t time.Time) string {
t = t.UTC()
t = t.UTC().Truncate(time.Millisecond)
switch name {
case RFC822TimeFormatName:

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,78 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
// Package applicationautoscaling provides the client and types for making API
// requests to Application Auto Scaling.
//
// With Application Auto Scaling, you can configure automatic scaling for the
// following resources:
//
// * Amazon ECS services
//
// * Amazon EC2 Spot Fleet requests
//
// * Amazon EMR clusters
//
// * Amazon AppStream 2.0 fleets
//
// * Amazon DynamoDB tables and global secondary indexes throughput capacity
//
// * Amazon Aurora Replicas
//
// * Amazon SageMaker endpoint variants
//
// * Custom resources provided by your own applications or services
//
// * Amazon Comprehend document classification and entity recognizer endpoints
//
// * AWS Lambda function provisioned concurrency
//
// * Amazon Keyspaces (for Apache Cassandra) tables
//
// * Amazon Managed Streaming for Apache Kafka cluster storage
//
// API Summary
//
// The Application Auto Scaling service API includes three key sets of actions:
//
// * Register and manage scalable targets - Register AWS or custom resources
// as scalable targets (a resource that Application Auto Scaling can scale),
// set minimum and maximum capacity limits, and retrieve information on existing
// scalable targets.
//
// * Configure and manage automatic scaling - Define scaling policies to
// dynamically scale your resources in response to CloudWatch alarms, schedule
// one-time or recurring scaling actions, and retrieve your recent scaling
// activity history.
//
// * Suspend and resume scaling - Temporarily suspend and later resume automatic
// scaling by calling the RegisterScalableTarget (https://docs.aws.amazon.com/autoscaling/application/APIReference/API_RegisterScalableTarget.html)
// API action for any Application Auto Scaling scalable target. You can suspend
// and resume (individually or in combination) scale-out activities that
// are triggered by a scaling policy, scale-in activities that are triggered
// by a scaling policy, and scheduled scaling.
//
// To learn more about Application Auto Scaling, including information about
// granting IAM users required permissions for Application Auto Scaling actions,
// see the Application Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html).
//
// See https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06 for more information on this service.
//
// See applicationautoscaling package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/applicationautoscaling/
//
// Using the Client
//
// To contact Application Auto Scaling with the SDK use the New function to create
// a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
//
// See the SDK's documentation for more information on how to use the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/
//
// See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
//
// See the Application Auto Scaling client ApplicationAutoScaling for more
// information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/applicationautoscaling/#New
package applicationautoscaling

View file

@ -0,0 +1,74 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package applicationautoscaling
import (
"github.com/aws/aws-sdk-go/private/protocol"
)
const (
// ErrCodeConcurrentUpdateException for service response error code
// "ConcurrentUpdateException".
//
// Concurrent updates caused an exception, for example, if you request an update
// to an Application Auto Scaling resource that already has a pending update.
ErrCodeConcurrentUpdateException = "ConcurrentUpdateException"
// ErrCodeFailedResourceAccessException for service response error code
// "FailedResourceAccessException".
//
// Failed access to resources caused an exception. This exception is thrown
// when Application Auto Scaling is unable to retrieve the alarms associated
// with a scaling policy due to a client error, for example, if the role ARN
// specified for a scalable target does not have permission to call the CloudWatch
// DescribeAlarms (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html)
// on your behalf.
ErrCodeFailedResourceAccessException = "FailedResourceAccessException"
// ErrCodeInternalServiceException for service response error code
// "InternalServiceException".
//
// The service encountered an internal error.
ErrCodeInternalServiceException = "InternalServiceException"
// ErrCodeInvalidNextTokenException for service response error code
// "InvalidNextTokenException".
//
// The next token supplied was invalid.
ErrCodeInvalidNextTokenException = "InvalidNextTokenException"
// ErrCodeLimitExceededException for service response error code
// "LimitExceededException".
//
// A per-account resource limit is exceeded. For more information, see Application
// Auto Scaling Limits (https://docs.aws.amazon.com/ApplicationAutoScaling/latest/userguide/application-auto-scaling-limits.html).
ErrCodeLimitExceededException = "LimitExceededException"
// ErrCodeObjectNotFoundException for service response error code
// "ObjectNotFoundException".
//
// The specified object could not be found. For any operation that depends on
// the existence of a scalable target, this exception is thrown if the scalable
// target with the specified service namespace, resource ID, and scalable dimension
// does not exist. For any operation that deletes or deregisters a resource,
// this exception is thrown if the resource cannot be found.
ErrCodeObjectNotFoundException = "ObjectNotFoundException"
// ErrCodeValidationException for service response error code
// "ValidationException".
//
// An exception was thrown for a validation issue. Review the available parameters
// for the API request.
ErrCodeValidationException = "ValidationException"
)
var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{
"ConcurrentUpdateException": newErrorConcurrentUpdateException,
"FailedResourceAccessException": newErrorFailedResourceAccessException,
"InternalServiceException": newErrorInternalServiceException,
"InvalidNextTokenException": newErrorInvalidNextTokenException,
"LimitExceededException": newErrorLimitExceededException,
"ObjectNotFoundException": newErrorObjectNotFoundException,
"ValidationException": newErrorValidationException,
}

View file

@ -0,0 +1,106 @@
// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT.
package applicationautoscaling
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/signer/v4"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
)
// ApplicationAutoScaling provides the API operation methods for making requests to
// Application Auto Scaling. See this package's package overview docs
// for details on the service.
//
// ApplicationAutoScaling methods are safe to use concurrently. It is not safe to
// modify mutate any of the struct's properties though.
type ApplicationAutoScaling struct {
*client.Client
}
// Used for custom client initialization logic
var initClient func(*client.Client)
// Used for custom request initialization logic
var initRequest func(*request.Request)
// Service information constants
const (
ServiceName = "autoscaling" // Name of service.
EndpointsID = "application-autoscaling" // ID to lookup a service endpoint with.
ServiceID = "Application Auto Scaling" // ServiceID is a unique identifier of a specific service.
)
// New creates a new instance of the ApplicationAutoScaling client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// mySession := session.Must(session.NewSession())
//
// // Create a ApplicationAutoScaling client from just a session.
// svc := applicationautoscaling.New(mySession)
//
// // Create a ApplicationAutoScaling client with additional configuration
// svc := applicationautoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationAutoScaling {
c := p.ClientConfig(EndpointsID, cfgs...)
if c.SigningNameDerived || len(c.SigningName) == 0 {
c.SigningName = "application-autoscaling"
}
return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName)
}
// newClient creates, initializes and returns a new service client instance.
func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApplicationAutoScaling {
svc := &ApplicationAutoScaling{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: ServiceName,
ServiceID: ServiceID,
SigningName: signingName,
SigningRegion: signingRegion,
PartitionID: partitionID,
Endpoint: endpoint,
APIVersion: "2016-02-06",
JSONVersion: "1.1",
TargetPrefix: "AnyScaleFrontendService",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler)
svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler)
svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler)
svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler)
svc.Handlers.UnmarshalError.PushBackNamed(
protocol.NewUnmarshalErrorHandler(jsonrpc.NewUnmarshalTypedError(exceptionFromCode)).NamedHandler(),
)
// Run custom client initialization if present
if initClient != nil {
initClient(svc.Client)
}
return svc
}
// newRequest creates a new request for a ApplicationAutoScaling operation and runs any
// custom request initialization.
func (c *ApplicationAutoScaling) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
// Run custom request initialization if present
if initRequest != nil {
initRequest(req)
}
return req
}

View file

@ -162,9 +162,9 @@ func (c *DynamoDB) BatchGetItemRequest(input *BatchGetItemInput) (req *request.R
// might not be specified correctly, or its status might not be ACTIVE.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -408,9 +408,9 @@ func (c *DynamoDB) BatchWriteItemRequest(input *BatchWriteItemInput) (req *reque
// that have one or more local secondary indexes.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -570,7 +570,7 @@ func (c *DynamoDB) CreateBackupRequest(input *CreateBackupInput) (req *request.R
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -731,7 +731,7 @@ func (c *DynamoDB) CreateGlobalTableRequest(input *CreateGlobalTableInput) (req
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -875,7 +875,7 @@ func (c *DynamoDB) CreateTableRequest(input *CreateTableInput) (req *request.Req
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -1002,7 +1002,7 @@ func (c *DynamoDB) DeleteBackupRequest(input *DeleteBackupInput) (req *request.R
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -1144,9 +1144,9 @@ func (c *DynamoDB) DeleteItemRequest(input *DeleteItemInput) (req *request.Reque
// Operation was rejected because there is an ongoing transaction for the item.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -1290,7 +1290,7 @@ func (c *DynamoDB) DeleteTableRequest(input *DeleteTableInput) (req *request.Req
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -2067,27 +2067,27 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque
// DescribeLimits API operation for Amazon DynamoDB.
//
// Returns the current provisioned-capacity limits for your AWS account in a
// Returns the current provisioned-capacity quotas for your AWS account in a
// Region, both for the Region as a whole and for any one DynamoDB table that
// you create there.
//
// When you establish an AWS account, the account has initial limits on the
// When you establish an AWS account, the account has initial quotas on the
// maximum read capacity units and write capacity units that you can provision
// across all of your DynamoDB tables in a given Region. Also, there are per-table
// limits that apply when you create a table there. For more information, see
// Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// quotas that apply when you create a table there. For more information, see
// Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// page in the Amazon DynamoDB Developer Guide.
//
// Although you can increase these limits by filing a case at AWS Support Center
// Although you can increase these quotas by filing a case at AWS Support Center
// (https://console.aws.amazon.com/support/home#/), obtaining the increase is
// not instantaneous. The DescribeLimits action lets you write code to compare
// the capacity you are currently using to those limits imposed by your account
// so that you have enough time to apply for an increase before you hit a limit.
// the capacity you are currently using to those quotas imposed by your account
// so that you have enough time to apply for an increase before you hit a quota.
//
// For example, you could use one of the AWS SDKs to do the following:
//
// Call DescribeLimits for a particular Region to obtain your current account
// limits on provisioned capacity there.
// quotas on provisioned capacity there.
//
// Create a variable to hold the aggregate read capacity units provisioned for
// all your tables in that Region, and one to hold the aggregate write capacity
@ -2106,20 +2106,20 @@ func (c *DynamoDB) DescribeLimitsRequest(input *DescribeLimitsInput) (req *reque
// these GSIs and add their provisioned capacity values to your variables
// as well.
//
// Report the account limits for that Region returned by DescribeLimits, along
// Report the account quotas for that Region returned by DescribeLimits, along
// with the total current provisioned capacity levels you have calculated.
//
// This will let you see whether you are getting close to your account-level
// limits.
// quotas.
//
// The per-table limits apply only when you are creating a new table. They restrict
// The per-table quotas apply only when you are creating a new table. They restrict
// the sum of the provisioned capacity of the new table itself and all its global
// secondary indexes.
//
// For existing tables and their GSIs, DynamoDB doesn't let you increase provisioned
// capacity extremely rapidly. But the only upper limit that applies is that
// the aggregate provisioned capacity over all your tables and GSIs cannot exceed
// either of the per-account limits.
// capacity extremely rapidly, but the only quota that applies is that the aggregate
// provisioned capacity over all your tables and GSIs cannot exceed either of
// the per-account quotas.
//
// DescribeLimits should only be called periodically. You can expect throttling
// errors if you call it more than once in a minute.
@ -2568,9 +2568,9 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou
// might not be specified correctly, or its status might not be ACTIVE.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -2668,11 +2668,11 @@ func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Req
//
// List backups associated with an AWS account. To list backups for a given
// table, specify TableName. ListBackups returns a paginated list of results
// with at most 1 MB worth of items in a page. You can also specify a limit
// for the maximum number of entries to be returned in a page.
// with at most 1 MB worth of items in a page. You can also specify a maximum
// number of entries to be returned in a page.
//
// In the request, start time is inclusive, but end time is exclusive. Note
// that these limits are for the time at which the original backup was requested.
// that these boundaries are for the time at which the original backup was requested.
//
// You can call ListBackups a maximum of five times per second.
//
@ -3385,9 +3385,9 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou
// Operation was rejected because there is an ongoing transaction for the item.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -3559,9 +3559,9 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output
// might not be specified correctly, or its status might not be ACTIVE.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -3761,7 +3761,7 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -3924,7 +3924,7 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InvalidRestoreTimeException
// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime
@ -4083,9 +4083,9 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *
// might not be specified correctly, or its status might not be ACTIVE.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -4262,7 +4262,7 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * ResourceNotFoundException
// The operation tried to access a nonexistent table or index. The resource
@ -4496,9 +4496,9 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r
// in the Amazon DynamoDB Developer Guide.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -4764,9 +4764,9 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re
// in the Amazon DynamoDB Developer Guide.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -4889,7 +4889,7 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * ResourceNotFoundException
// The operation tried to access a nonexistent table or index. The resource
@ -5365,7 +5365,7 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * ResourceInUseException
// The operation conflicts with the resource's availability. For example, you
@ -5506,9 +5506,9 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque
// Operation was rejected because there is an ongoing transaction for the item.
//
// * RequestLimitExceeded
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
//
// * InternalServerError
// An error occurred on the server side.
@ -5652,7 +5652,7 @@ func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Req
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -5757,7 +5757,7 @@ func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplic
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -5911,7 +5911,7 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -8212,8 +8212,8 @@ type CreateGlobalSecondaryIndexAction struct {
// Represents the provisioned throughput settings for the specified global secondary
// index.
//
// For current minimum and maximum provisioned throughput values, see Limits
// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// For current minimum and maximum provisioned throughput values, see Service,
// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// in the Amazon DynamoDB Developer Guide.
ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
}
@ -8615,8 +8615,8 @@ type CreateTableInput struct {
// If you set BillingMode as PROVISIONED, you must specify this property. If
// you set BillingMode as PAY_PER_REQUEST, you cannot specify this property.
//
// For current minimum and maximum provisioned throughput values, see Limits
// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// For current minimum and maximum provisioned throughput values, see Service,
// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// in the Amazon DynamoDB Developer Guide.
ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
@ -10820,8 +10820,8 @@ type GlobalSecondaryIndex struct {
// Represents the provisioned throughput settings for the specified global secondary
// index.
//
// For current minimum and maximum provisioned throughput values, see Limits
// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// For current minimum and maximum provisioned throughput values, see Service,
// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// in the Amazon DynamoDB Developer Guide.
ProvisionedThroughput *ProvisionedThroughput `type:"structure"`
}
@ -11029,8 +11029,8 @@ type GlobalSecondaryIndexDescription struct {
// Represents the provisioned throughput settings for the specified global secondary
// index.
//
// For current minimum and maximum provisioned throughput values, see Limits
// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// For current minimum and maximum provisioned throughput values, see Service,
// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// in the Amazon DynamoDB Developer Guide.
ProvisionedThroughput *ProvisionedThroughputDescription `type:"structure"`
}
@ -12118,7 +12118,7 @@ func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
type LimitExceededException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@ -13094,8 +13094,8 @@ type Projection struct {
//
// * KEYS_ONLY - Only the index and primary keys are projected into the index.
//
// * INCLUDE - Only the specified table attributes are projected into the
// index. The list of projected attributes is in NonKeyAttributes.
// * INCLUDE - In addition to the attributes described in KEYS_ONLY, the
// secondary index will include other non-key attributes that you specify.
//
// * ALL - All of the table attributes are projected into the index.
ProjectionType *string `type:"string" enum:"ProjectionType"`
@ -13139,8 +13139,8 @@ func (s *Projection) SetProjectionType(v string) *Projection {
// Represents the provisioned throughput settings for a specified table or index.
// The settings can be modified using the UpdateTable operation.
//
// For current minimum and maximum provisioned throughput values, see Limits
// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// For current minimum and maximum provisioned throughput values, see Service,
// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// in the Amazon DynamoDB Developer Guide.
type ProvisionedThroughput struct {
_ struct{} `type:"structure"`
@ -13223,7 +13223,7 @@ type ProvisionedThroughputDescription struct {
// The number of provisioned throughput decreases for this table during this
// UTC calendar day. For current maximums on provisioned throughput decreases,
// see Limits (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// see Service, Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// in the Amazon DynamoDB Developer Guide.
NumberOfDecreasesToday *int64 `min:"1" type:"long"`
@ -14596,6 +14596,10 @@ type ReplicaDescription struct {
// The name of the Region.
RegionName *string `type:"string"`
// The time at which the replica was first detected as inaccessible. To determine
// cause of inaccessibility check the ReplicaStatus property.
ReplicaInaccessibleDateTime *time.Time `type:"timestamp"`
// The current state of the replica:
//
// * CREATING - The replica is being created.
@ -14605,6 +14609,12 @@ type ReplicaDescription struct {
// * DELETING - The replica is being deleted.
//
// * ACTIVE - The replica is ready for use.
//
// * REGION_DISABLED - The replica is inaccessible because the AWS Region
// has been disabled. If the AWS Region remains inaccessible for more than
// 20 hours, DynamoDB will remove this replica from the replication group.
// The replica will not be deleted and replication will stop from and to
// this region.
ReplicaStatus *string `type:"string" enum:"ReplicaStatus"`
// Detailed information about the replica status.
@ -14649,6 +14659,12 @@ func (s *ReplicaDescription) SetRegionName(v string) *ReplicaDescription {
return s
}
// SetReplicaInaccessibleDateTime sets the ReplicaInaccessibleDateTime field's value.
func (s *ReplicaDescription) SetReplicaInaccessibleDateTime(v time.Time) *ReplicaDescription {
s.ReplicaInaccessibleDateTime = &v
return s
}
// SetReplicaStatus sets the ReplicaStatus field's value.
func (s *ReplicaDescription) SetReplicaStatus(v string) *ReplicaDescription {
s.ReplicaStatus = &v
@ -15416,9 +15432,9 @@ func (s *ReplicationGroupUpdate) SetUpdate(v *UpdateReplicationGroupMemberAction
return s
}
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
type RequestLimitExceeded struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@ -16977,14 +16993,14 @@ type TableDescription struct {
// and index key attributes, which are automatically projected. Each attribute
// specification is composed of: ProjectionType - One of the following: KEYS_ONLY
// - Only the index and primary keys are projected into the index. INCLUDE
// - Only the specified table attributes are projected into the index. The
// list of projected attributes is in NonKeyAttributes. ALL - All of the
// table attributes are projected into the index. NonKeyAttributes - A list
// of one or more non-key attribute names that are projected into the secondary
// index. The total count of attributes provided in NonKeyAttributes, summed
// across all of the secondary indexes, must not exceed 20. If you project
// the same attribute into two different indexes, this counts as two distinct
// attributes when determining the total.
// - In addition to the attributes described in KEYS_ONLY, the secondary
// index will include other non-key attributes that you specify. ALL - All
// of the table attributes are projected into the index. NonKeyAttributes
// - A list of one or more non-key attribute names that are projected into
// the secondary index. The total count of attributes provided in NonKeyAttributes,
// summed across all of the secondary indexes, must not exceed 20. If you
// project the same attribute into two different indexes, this counts as
// two distinct attributes when determining the total.
//
// * ProvisionedThroughput - The provisioned throughput settings for the
// global secondary index, consisting of read and write capacity units, along
@ -18645,8 +18661,8 @@ type UpdateGlobalSecondaryIndexAction struct {
// Represents the provisioned throughput settings for the specified global secondary
// index.
//
// For current minimum and maximum provisioned throughput values, see Limits
// (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// For current minimum and maximum provisioned throughput values, see Service,
// Account, and Table Quotas (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Limits.html)
// in the Amazon DynamoDB Developer Guide.
//
// ProvisionedThroughput is a required field
@ -19885,6 +19901,15 @@ const (
AttributeActionDelete = "DELETE"
)
// AttributeAction_Values returns all elements of the AttributeAction enum
func AttributeAction_Values() []string {
return []string{
AttributeActionAdd,
AttributeActionPut,
AttributeActionDelete,
}
}
const (
// BackupStatusCreating is a BackupStatus enum value
BackupStatusCreating = "CREATING"
@ -19896,6 +19921,15 @@ const (
BackupStatusAvailable = "AVAILABLE"
)
// BackupStatus_Values returns all elements of the BackupStatus enum
func BackupStatus_Values() []string {
return []string{
BackupStatusCreating,
BackupStatusDeleted,
BackupStatusAvailable,
}
}
const (
// BackupTypeUser is a BackupType enum value
BackupTypeUser = "USER"
@ -19907,6 +19941,15 @@ const (
BackupTypeAwsBackup = "AWS_BACKUP"
)
// BackupType_Values returns all elements of the BackupType enum
func BackupType_Values() []string {
return []string{
BackupTypeUser,
BackupTypeSystem,
BackupTypeAwsBackup,
}
}
const (
// BackupTypeFilterUser is a BackupTypeFilter enum value
BackupTypeFilterUser = "USER"
@ -19921,6 +19964,16 @@ const (
BackupTypeFilterAll = "ALL"
)
// BackupTypeFilter_Values returns all elements of the BackupTypeFilter enum
func BackupTypeFilter_Values() []string {
return []string{
BackupTypeFilterUser,
BackupTypeFilterSystem,
BackupTypeFilterAwsBackup,
BackupTypeFilterAll,
}
}
const (
// BillingModeProvisioned is a BillingMode enum value
BillingModeProvisioned = "PROVISIONED"
@ -19929,6 +19982,14 @@ const (
BillingModePayPerRequest = "PAY_PER_REQUEST"
)
// BillingMode_Values returns all elements of the BillingMode enum
func BillingMode_Values() []string {
return []string{
BillingModeProvisioned,
BillingModePayPerRequest,
}
}
const (
// ComparisonOperatorEq is a ComparisonOperator enum value
ComparisonOperatorEq = "EQ"
@ -19970,6 +20031,25 @@ const (
ComparisonOperatorBeginsWith = "BEGINS_WITH"
)
// ComparisonOperator_Values returns all elements of the ComparisonOperator enum
func ComparisonOperator_Values() []string {
return []string{
ComparisonOperatorEq,
ComparisonOperatorNe,
ComparisonOperatorIn,
ComparisonOperatorLe,
ComparisonOperatorLt,
ComparisonOperatorGe,
ComparisonOperatorGt,
ComparisonOperatorBetween,
ComparisonOperatorNotNull,
ComparisonOperatorNull,
ComparisonOperatorContains,
ComparisonOperatorNotContains,
ComparisonOperatorBeginsWith,
}
}
const (
// ConditionalOperatorAnd is a ConditionalOperator enum value
ConditionalOperatorAnd = "AND"
@ -19978,6 +20058,14 @@ const (
ConditionalOperatorOr = "OR"
)
// ConditionalOperator_Values returns all elements of the ConditionalOperator enum
func ConditionalOperator_Values() []string {
return []string{
ConditionalOperatorAnd,
ConditionalOperatorOr,
}
}
const (
// ContinuousBackupsStatusEnabled is a ContinuousBackupsStatus enum value
ContinuousBackupsStatusEnabled = "ENABLED"
@ -19986,6 +20074,14 @@ const (
ContinuousBackupsStatusDisabled = "DISABLED"
)
// ContinuousBackupsStatus_Values returns all elements of the ContinuousBackupsStatus enum
func ContinuousBackupsStatus_Values() []string {
return []string{
ContinuousBackupsStatusEnabled,
ContinuousBackupsStatusDisabled,
}
}
const (
// ContributorInsightsActionEnable is a ContributorInsightsAction enum value
ContributorInsightsActionEnable = "ENABLE"
@ -19994,6 +20090,14 @@ const (
ContributorInsightsActionDisable = "DISABLE"
)
// ContributorInsightsAction_Values returns all elements of the ContributorInsightsAction enum
func ContributorInsightsAction_Values() []string {
return []string{
ContributorInsightsActionEnable,
ContributorInsightsActionDisable,
}
}
const (
// ContributorInsightsStatusEnabling is a ContributorInsightsStatus enum value
ContributorInsightsStatusEnabling = "ENABLING"
@ -20011,6 +20115,17 @@ const (
ContributorInsightsStatusFailed = "FAILED"
)
// ContributorInsightsStatus_Values returns all elements of the ContributorInsightsStatus enum
func ContributorInsightsStatus_Values() []string {
return []string{
ContributorInsightsStatusEnabling,
ContributorInsightsStatusEnabled,
ContributorInsightsStatusDisabling,
ContributorInsightsStatusDisabled,
ContributorInsightsStatusFailed,
}
}
const (
// GlobalTableStatusCreating is a GlobalTableStatus enum value
GlobalTableStatusCreating = "CREATING"
@ -20025,6 +20140,16 @@ const (
GlobalTableStatusUpdating = "UPDATING"
)
// GlobalTableStatus_Values returns all elements of the GlobalTableStatus enum
func GlobalTableStatus_Values() []string {
return []string{
GlobalTableStatusCreating,
GlobalTableStatusActive,
GlobalTableStatusDeleting,
GlobalTableStatusUpdating,
}
}
const (
// IndexStatusCreating is a IndexStatus enum value
IndexStatusCreating = "CREATING"
@ -20039,6 +20164,16 @@ const (
IndexStatusActive = "ACTIVE"
)
// IndexStatus_Values returns all elements of the IndexStatus enum
func IndexStatus_Values() []string {
return []string{
IndexStatusCreating,
IndexStatusUpdating,
IndexStatusDeleting,
IndexStatusActive,
}
}
const (
// KeyTypeHash is a KeyType enum value
KeyTypeHash = "HASH"
@ -20047,6 +20182,14 @@ const (
KeyTypeRange = "RANGE"
)
// KeyType_Values returns all elements of the KeyType enum
func KeyType_Values() []string {
return []string{
KeyTypeHash,
KeyTypeRange,
}
}
const (
// PointInTimeRecoveryStatusEnabled is a PointInTimeRecoveryStatus enum value
PointInTimeRecoveryStatusEnabled = "ENABLED"
@ -20055,6 +20198,14 @@ const (
PointInTimeRecoveryStatusDisabled = "DISABLED"
)
// PointInTimeRecoveryStatus_Values returns all elements of the PointInTimeRecoveryStatus enum
func PointInTimeRecoveryStatus_Values() []string {
return []string{
PointInTimeRecoveryStatusEnabled,
PointInTimeRecoveryStatusDisabled,
}
}
const (
// ProjectionTypeAll is a ProjectionType enum value
ProjectionTypeAll = "ALL"
@ -20066,6 +20217,15 @@ const (
ProjectionTypeInclude = "INCLUDE"
)
// ProjectionType_Values returns all elements of the ProjectionType enum
func ProjectionType_Values() []string {
return []string{
ProjectionTypeAll,
ProjectionTypeKeysOnly,
ProjectionTypeInclude,
}
}
const (
// ReplicaStatusCreating is a ReplicaStatus enum value
ReplicaStatusCreating = "CREATING"
@ -20081,8 +20241,23 @@ const (
// ReplicaStatusActive is a ReplicaStatus enum value
ReplicaStatusActive = "ACTIVE"
// ReplicaStatusRegionDisabled is a ReplicaStatus enum value
ReplicaStatusRegionDisabled = "REGION_DISABLED"
)
// ReplicaStatus_Values returns all elements of the ReplicaStatus enum
func ReplicaStatus_Values() []string {
return []string{
ReplicaStatusCreating,
ReplicaStatusCreationFailed,
ReplicaStatusUpdating,
ReplicaStatusDeleting,
ReplicaStatusActive,
ReplicaStatusRegionDisabled,
}
}
// Determines the level of detail about provisioned throughput consumption that
// is returned in the response:
//
@ -20107,6 +20282,15 @@ const (
ReturnConsumedCapacityNone = "NONE"
)
// ReturnConsumedCapacity_Values returns all elements of the ReturnConsumedCapacity enum
func ReturnConsumedCapacity_Values() []string {
return []string{
ReturnConsumedCapacityIndexes,
ReturnConsumedCapacityTotal,
ReturnConsumedCapacityNone,
}
}
const (
// ReturnItemCollectionMetricsSize is a ReturnItemCollectionMetrics enum value
ReturnItemCollectionMetricsSize = "SIZE"
@ -20115,6 +20299,14 @@ const (
ReturnItemCollectionMetricsNone = "NONE"
)
// ReturnItemCollectionMetrics_Values returns all elements of the ReturnItemCollectionMetrics enum
func ReturnItemCollectionMetrics_Values() []string {
return []string{
ReturnItemCollectionMetricsSize,
ReturnItemCollectionMetricsNone,
}
}
const (
// ReturnValueNone is a ReturnValue enum value
ReturnValueNone = "NONE"
@ -20132,6 +20324,17 @@ const (
ReturnValueUpdatedNew = "UPDATED_NEW"
)
// ReturnValue_Values returns all elements of the ReturnValue enum
func ReturnValue_Values() []string {
return []string{
ReturnValueNone,
ReturnValueAllOld,
ReturnValueUpdatedOld,
ReturnValueAllNew,
ReturnValueUpdatedNew,
}
}
const (
// ReturnValuesOnConditionCheckFailureAllOld is a ReturnValuesOnConditionCheckFailure enum value
ReturnValuesOnConditionCheckFailureAllOld = "ALL_OLD"
@ -20140,6 +20343,14 @@ const (
ReturnValuesOnConditionCheckFailureNone = "NONE"
)
// ReturnValuesOnConditionCheckFailure_Values returns all elements of the ReturnValuesOnConditionCheckFailure enum
func ReturnValuesOnConditionCheckFailure_Values() []string {
return []string{
ReturnValuesOnConditionCheckFailureAllOld,
ReturnValuesOnConditionCheckFailureNone,
}
}
const (
// SSEStatusEnabling is a SSEStatus enum value
SSEStatusEnabling = "ENABLING"
@ -20157,6 +20368,17 @@ const (
SSEStatusUpdating = "UPDATING"
)
// SSEStatus_Values returns all elements of the SSEStatus enum
func SSEStatus_Values() []string {
return []string{
SSEStatusEnabling,
SSEStatusEnabled,
SSEStatusDisabling,
SSEStatusDisabled,
SSEStatusUpdating,
}
}
const (
// SSETypeAes256 is a SSEType enum value
SSETypeAes256 = "AES256"
@ -20165,6 +20387,14 @@ const (
SSETypeKms = "KMS"
)
// SSEType_Values returns all elements of the SSEType enum
func SSEType_Values() []string {
return []string{
SSETypeAes256,
SSETypeKms,
}
}
const (
// ScalarAttributeTypeS is a ScalarAttributeType enum value
ScalarAttributeTypeS = "S"
@ -20176,6 +20406,15 @@ const (
ScalarAttributeTypeB = "B"
)
// ScalarAttributeType_Values returns all elements of the ScalarAttributeType enum
func ScalarAttributeType_Values() []string {
return []string{
ScalarAttributeTypeS,
ScalarAttributeTypeN,
ScalarAttributeTypeB,
}
}
const (
// SelectAllAttributes is a Select enum value
SelectAllAttributes = "ALL_ATTRIBUTES"
@ -20190,6 +20429,16 @@ const (
SelectCount = "COUNT"
)
// Select_Values returns all elements of the Select enum
func Select_Values() []string {
return []string{
SelectAllAttributes,
SelectAllProjectedAttributes,
SelectSpecificAttributes,
SelectCount,
}
}
const (
// StreamViewTypeNewImage is a StreamViewType enum value
StreamViewTypeNewImage = "NEW_IMAGE"
@ -20204,6 +20453,16 @@ const (
StreamViewTypeKeysOnly = "KEYS_ONLY"
)
// StreamViewType_Values returns all elements of the StreamViewType enum
func StreamViewType_Values() []string {
return []string{
StreamViewTypeNewImage,
StreamViewTypeOldImage,
StreamViewTypeNewAndOldImages,
StreamViewTypeKeysOnly,
}
}
const (
// TableStatusCreating is a TableStatus enum value
TableStatusCreating = "CREATING"
@ -20227,6 +20486,19 @@ const (
TableStatusArchived = "ARCHIVED"
)
// TableStatus_Values returns all elements of the TableStatus enum
func TableStatus_Values() []string {
return []string{
TableStatusCreating,
TableStatusUpdating,
TableStatusDeleting,
TableStatusActive,
TableStatusInaccessibleEncryptionCredentials,
TableStatusArchiving,
TableStatusArchived,
}
}
const (
// TimeToLiveStatusEnabling is a TimeToLiveStatus enum value
TimeToLiveStatusEnabling = "ENABLING"
@ -20240,3 +20512,13 @@ const (
// TimeToLiveStatusDisabled is a TimeToLiveStatus enum value
TimeToLiveStatusDisabled = "DISABLED"
)
// TimeToLiveStatus_Values returns all elements of the TimeToLiveStatus enum
func TimeToLiveStatus_Values() []string {
return []string{
TimeToLiveStatusEnabling,
TimeToLiveStatusDisabling,
TimeToLiveStatusEnabled,
TimeToLiveStatusDisabled,
}
}

View file

@ -88,7 +88,7 @@
// the reliance on encoding.json. `json` struct tags are still supported. In
// addition support for a new struct tag `dynamodbav` was added. Support for
// the json.Marshaler and json.Unmarshaler interfaces have been removed and
// replaced with have been replaced with dynamodbattribute.Marshaler and
// replaced with dynamodbattribute.Marshaler and
// dynamodbattribute.Unmarshaler interfaces.
//
// The Unmarshal functions are backwards compatible with data marshalled by

View file

@ -92,7 +92,7 @@ const (
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account limit of 256 tables.
// There is a soft account quota of 256 tables.
ErrCodeLimitExceededException = "LimitExceededException"
// ErrCodePointInTimeRecoveryUnavailableException for service response error code
@ -127,9 +127,9 @@ const (
// ErrCodeRequestLimitExceeded for service response error code
// "RequestLimitExceeded".
//
// Throughput exceeds the current throughput limit for your account. Please
// Throughput exceeds the current throughput quota for your account. Please
// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request
// a limit increase.
// a quota increase.
ErrCodeRequestLimitExceeded = "RequestLimitExceeded"
// ErrCodeResourceInUseException for service response error code

View file

@ -78,7 +78,8 @@ func (c *DynamoDBStreams) DescribeStreamRequest(input *DescribeStreamInput) (req
//
// Returned Error Types:
// * ResourceNotFoundException
// The operation tried to access a nonexistent stream.
// The operation tried to access a nonexistent table or index. The resource
// might not be specified correctly, or its status might not be ACTIVE.
//
// * InternalServerError
// An error occurred on the server side.
@ -170,15 +171,22 @@ func (c *DynamoDBStreams) GetRecordsRequest(input *GetRecordsInput) (req *reques
//
// Returned Error Types:
// * ResourceNotFoundException
// The operation tried to access a nonexistent stream.
// The operation tried to access a nonexistent table or index. The resource
// might not be specified correctly, or its status might not be ACTIVE.
//
// * LimitExceededException
// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
// requests that receive this exception. Your request is eventually successful,
// unless your retry queue is too large to finish. Reduce the frequency of requests
// and use exponential backoff. For more information, go to Error Retries and
// Exponential Backoff (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#APIRetries)
// in the Amazon DynamoDB Developer Guide.
// There is no limit to the number of daily on-demand backups that can be taken.
//
// Up to 50 simultaneous table operations are allowed per account. These operations
// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
// and RestoreTableToPointInTime.
//
// The only exception is when you are creating a table with one or more secondary
// indexes. You can have up to 25 such requests running at a time; however,
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account quota of 256 tables.
//
// * InternalServerError
// An error occurred on the server side.
@ -283,7 +291,8 @@ func (c *DynamoDBStreams) GetShardIteratorRequest(input *GetShardIteratorInput)
//
// Returned Error Types:
// * ResourceNotFoundException
// The operation tried to access a nonexistent stream.
// The operation tried to access a nonexistent table or index. The resource
// might not be specified correctly, or its status might not be ACTIVE.
//
// * InternalServerError
// An error occurred on the server side.
@ -383,7 +392,8 @@ func (c *DynamoDBStreams) ListStreamsRequest(input *ListStreamsInput) (req *requ
//
// Returned Error Types:
// * ResourceNotFoundException
// The operation tried to access a nonexistent stream.
// The operation tried to access a nonexistent table or index. The resource
// might not be specified correctly, or its status might not be ACTIVE.
//
// * InternalServerError
// An error occurred on the server side.
@ -872,12 +882,18 @@ func (s *InternalServerError) RequestID() string {
return s.RespMetadata.RequestID
}
// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
// requests that receive this exception. Your request is eventually successful,
// unless your retry queue is too large to finish. Reduce the frequency of requests
// and use exponential backoff. For more information, go to Error Retries and
// Exponential Backoff (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#APIRetries)
// in the Amazon DynamoDB Developer Guide.
// There is no limit to the number of daily on-demand backups that can be taken.
//
// Up to 50 simultaneous table operations are allowed per account. These operations
// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
// and RestoreTableToPointInTime.
//
// The only exception is when you are creating a table with one or more secondary
// indexes. You can have up to 25 such requests running at a time; however,
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account quota of 256 tables.
type LimitExceededException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@ -1137,7 +1153,8 @@ func (s *Record) SetUserIdentity(v *Identity) *Record {
return s
}
// The operation tried to access a nonexistent stream.
// The operation tried to access a nonexistent table or index. The resource
// might not be specified correctly, or its status might not be ACTIVE.
type ResourceNotFoundException struct {
_ struct{} `type:"structure"`
RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"`
@ -1199,10 +1216,12 @@ func (s *ResourceNotFoundException) RequestID() string {
type SequenceNumberRange struct {
_ struct{} `type:"structure"`
// The last sequence number.
// The last sequence number for the stream records contained within a shard.
// String contains numeric characters only.
EndingSequenceNumber *string `min:"21" type:"string"`
// The first sequence number.
// The first sequence number for the stream records contained within a shard.
// String contains numeric characters only.
StartingSequenceNumber *string `min:"21" type:"string"`
}
@ -1624,6 +1643,14 @@ const (
KeyTypeRange = "RANGE"
)
// KeyType_Values returns all elements of the KeyType enum
func KeyType_Values() []string {
return []string{
KeyTypeHash,
KeyTypeRange,
}
}
const (
// OperationTypeInsert is a OperationType enum value
OperationTypeInsert = "INSERT"
@ -1635,6 +1662,15 @@ const (
OperationTypeRemove = "REMOVE"
)
// OperationType_Values returns all elements of the OperationType enum
func OperationType_Values() []string {
return []string{
OperationTypeInsert,
OperationTypeModify,
OperationTypeRemove,
}
}
const (
// ShardIteratorTypeTrimHorizon is a ShardIteratorType enum value
ShardIteratorTypeTrimHorizon = "TRIM_HORIZON"
@ -1649,6 +1685,16 @@ const (
ShardIteratorTypeAfterSequenceNumber = "AFTER_SEQUENCE_NUMBER"
)
// ShardIteratorType_Values returns all elements of the ShardIteratorType enum
func ShardIteratorType_Values() []string {
return []string{
ShardIteratorTypeTrimHorizon,
ShardIteratorTypeLatest,
ShardIteratorTypeAtSequenceNumber,
ShardIteratorTypeAfterSequenceNumber,
}
}
const (
// StreamStatusEnabling is a StreamStatus enum value
StreamStatusEnabling = "ENABLING"
@ -1663,6 +1709,16 @@ const (
StreamStatusDisabled = "DISABLED"
)
// StreamStatus_Values returns all elements of the StreamStatus enum
func StreamStatus_Values() []string {
return []string{
StreamStatusEnabling,
StreamStatusEnabled,
StreamStatusDisabling,
StreamStatusDisabled,
}
}
const (
// StreamViewTypeNewImage is a StreamViewType enum value
StreamViewTypeNewImage = "NEW_IMAGE"
@ -1676,3 +1732,13 @@ const (
// StreamViewTypeKeysOnly is a StreamViewType enum value
StreamViewTypeKeysOnly = "KEYS_ONLY"
)
// StreamViewType_Values returns all elements of the StreamViewType enum
func StreamViewType_Values() []string {
return []string{
StreamViewTypeNewImage,
StreamViewTypeOldImage,
StreamViewTypeNewAndOldImages,
StreamViewTypeKeysOnly,
}
}

View file

@ -5,7 +5,7 @@
//
// Amazon DynamoDB Streams provides API actions for accessing streams and processing
// stream records. To learn more about application development with Streams,
// see Capturing Table Activity with DynamoDB Streams (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html)
// see Capturing Table Activity with DynamoDB Streams (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html)
// in the Amazon DynamoDB Developer Guide.
//
// See https://docs.aws.amazon.com/goto/WebAPI/streams-dynamodb-2012-08-10 for more information on this service.

View file

@ -25,18 +25,25 @@ const (
// ErrCodeLimitExceededException for service response error code
// "LimitExceededException".
//
// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry
// requests that receive this exception. Your request is eventually successful,
// unless your retry queue is too large to finish. Reduce the frequency of requests
// and use exponential backoff. For more information, go to Error Retries and
// Exponential Backoff (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ErrorHandling.html#APIRetries)
// in the Amazon DynamoDB Developer Guide.
// There is no limit to the number of daily on-demand backups that can be taken.
//
// Up to 50 simultaneous table operations are allowed per account. These operations
// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup,
// and RestoreTableToPointInTime.
//
// The only exception is when you are creating a table with one or more secondary
// indexes. You can have up to 25 such requests running at a time; however,
// if the table or index specifications are complex, DynamoDB might temporarily
// reduce the number of concurrent operations.
//
// There is a soft account quota of 256 tables.
ErrCodeLimitExceededException = "LimitExceededException"
// ErrCodeResourceNotFoundException for service response error code
// "ResourceNotFoundException".
//
// The operation tried to access a nonexistent stream.
// The operation tried to access a nonexistent table or index. The resource
// might not be specified correctly, or its status might not be ACTIVE.
ErrCodeResourceNotFoundException = "ResourceNotFoundException"
// ErrCodeTrimmedDataAccessException for service response error code

File diff suppressed because it is too large Load diff

View file

@ -3,8 +3,8 @@ package s3
import (
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3err"
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
"github.com/aws/aws-sdk-go/internal/s3shared/s3err"
)
func init() {
@ -69,6 +69,8 @@ type copySourceSSECustomerKeyGetter interface {
getCopySourceSSECustomerKey() string
}
// endpointARNGetter is an accessor interface to grab the
// the field corresponding to an endpoint ARN input.
type endpointARNGetter interface {
getEndpointARN() (arn.Resource, error)
hasEndpointARN() bool

View file

@ -104,19 +104,6 @@
// content from S3. The Encryption and Decryption clients can be used concurrently
// once the client is created.
//
// sess := session.Must(session.NewSession())
//
// // Create the decryption client.
// svc := s3crypto.NewDecryptionClient(sess)
//
// // The object will be downloaded from S3 and decrypted locally. By metadata
// // about the object's encryption will instruct the decryption client how
// // decrypt the content of the object. By default KMS is used for keys.
// result, err := svc.GetObject(&s3.GetObjectInput {
// Bucket: aws.String(myBucket),
// Key: aws.String(myKey),
// })
//
// See the s3crypto package documentation for more information.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/s3crypto/
//

View file

@ -6,11 +6,9 @@ import (
"github.com/aws/aws-sdk-go/aws"
awsarn "github.com/aws/aws-sdk-go/aws/arn"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/private/protocol"
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
"github.com/aws/aws-sdk-go/internal/s3shared"
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
)
// Used by shapes with members decorated as endpoint ARN.
@ -22,12 +20,66 @@ func accessPointResourceParser(a awsarn.ARN) (arn.Resource, error) {
resParts := arn.SplitResource(a.Resource)
switch resParts[0] {
case "accesspoint":
if a.Service != "s3" {
return arn.AccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3"}
}
return arn.ParseAccessPointResource(a, resParts[1:])
case "outpost":
if a.Service != "s3-outposts" {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
}
return parseOutpostAccessPointResource(a, resParts[1:])
default:
return nil, arn.InvalidARNError{ARN: a, Reason: "unknown resource type"}
}
}
// parseOutpostAccessPointResource attempts to parse the ARNs resource as an
// outpost access-point resource.
//
// Supported Outpost AccessPoint ARN format:
// - ARN format: arn:{partition}:s3-outposts:{region}:{accountId}:outpost/{outpostId}/accesspoint/{accesspointName}
// - example: arn:aws:s3-outposts:us-west-2:012345678901:outpost/op-1234567890123456/accesspoint/myaccesspoint
//
func parseOutpostAccessPointResource(a awsarn.ARN, resParts []string) (arn.OutpostAccessPointARN, error) {
// outpost accesspoint arn is only valid if service is s3-outposts
if a.Service != "s3-outposts" {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "service is not s3-outposts"}
}
if len(resParts) == 0 {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
}
if len(resParts) < 3 {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{
ARN: a, Reason: "access-point resource not set in Outpost ARN",
}
}
resID := strings.TrimSpace(resParts[0])
if len(resID) == 0 {
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "outpost resource-id not set"}
}
var outpostAccessPointARN = arn.OutpostAccessPointARN{}
switch resParts[1] {
case "accesspoint":
accessPointARN, err := arn.ParseAccessPointResource(a, resParts[2:])
if err != nil {
return arn.OutpostAccessPointARN{}, err
}
// set access-point arn
outpostAccessPointARN.AccessPointARN = accessPointARN
default:
return arn.OutpostAccessPointARN{}, arn.InvalidARNError{ARN: a, Reason: "access-point resource not set in Outpost ARN"}
}
// set outpost id
outpostAccessPointARN.OutpostID = resID
return outpostAccessPointARN, nil
}
func endpointHandler(req *request.Request) {
endpoint, ok := req.Params.(endpointARNGetter)
if !ok || !endpoint.hasEndpointARN() {
@ -37,29 +89,29 @@ func endpointHandler(req *request.Request) {
resource, err := endpoint.getEndpointARN()
if err != nil {
req.Error = newInvalidARNError(nil, err)
req.Error = s3shared.NewInvalidARNError(nil, err)
return
}
resReq := resourceRequest{
resReq := s3shared.ResourceRequest{
Resource: resource,
Request: req,
}
if resReq.IsCrossPartition() {
req.Error = newClientPartitionMismatchError(resource,
req.Error = s3shared.NewClientPartitionMismatchError(resource,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
return
}
if !resReq.AllowCrossRegion() && resReq.IsCrossRegion() {
req.Error = newClientRegionMismatchError(resource,
req.Error = s3shared.NewClientRegionMismatchError(resource,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
return
}
if resReq.HasCustomEndpoint() {
req.Error = newInvalidARNWithCustomEndpointError(resource, nil)
req.Error = s3shared.NewInvalidARNWithCustomEndpointError(resource, nil)
return
}
@ -69,47 +121,22 @@ func endpointHandler(req *request.Request) {
if err != nil {
req.Error = err
}
case arn.OutpostAccessPointARN:
// outposts does not support FIPS regions
if resReq.ResourceConfiguredForFIPS() {
req.Error = s3shared.NewInvalidARNWithFIPSError(resource, nil)
return
}
err = updateRequestOutpostAccessPointEndpoint(req, tv)
if err != nil {
req.Error = err
}
default:
req.Error = newInvalidARNError(resource, nil)
req.Error = s3shared.NewInvalidARNError(resource, nil)
}
}
type resourceRequest struct {
Resource arn.Resource
Request *request.Request
}
func (r resourceRequest) ARN() awsarn.ARN {
return r.Resource.GetARN()
}
func (r resourceRequest) AllowCrossRegion() bool {
return aws.BoolValue(r.Request.Config.S3UseARNRegion)
}
func (r resourceRequest) UseFIPS() bool {
return isFIPS(aws.StringValue(r.Request.Config.Region))
}
func (r resourceRequest) IsCrossPartition() bool {
return r.Request.ClientInfo.PartitionID != r.Resource.GetARN().Partition
}
func (r resourceRequest) IsCrossRegion() bool {
return isCrossRegion(r.Request, r.Resource.GetARN().Region)
}
func (r resourceRequest) HasCustomEndpoint() bool {
return len(aws.StringValue(r.Request.Config.Endpoint)) > 0
}
func isFIPS(clientRegion string) bool {
return strings.HasPrefix(clientRegion, "fips-") || strings.HasSuffix(clientRegion, "-fips")
}
func isCrossRegion(req *request.Request, otherRegion string) bool {
return req.ClientInfo.SigningRegion != otherRegion
}
func updateBucketEndpointFromParams(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
@ -124,7 +151,7 @@ func updateBucketEndpointFromParams(r *request.Request) {
func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.AccessPointARN) error {
// Accelerate not supported
if aws.BoolValue(req.Config.S3UseAccelerate) {
return newClientConfiguredForAccelerateError(accessPoint,
return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
@ -132,7 +159,7 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce
// are not supported.
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
if err := accessPointEndpointBuilder(accessPoint).Build(req); err != nil {
if err := accessPointEndpointBuilder(accessPoint).build(req); err != nil {
return err
}
@ -141,93 +168,34 @@ func updateRequestAccessPointEndpoint(req *request.Request, accessPoint arn.Acce
return nil
}
func updateRequestOutpostAccessPointEndpoint(req *request.Request, accessPoint arn.OutpostAccessPointARN) error {
// Accelerate not supported
if aws.BoolValue(req.Config.S3UseAccelerate) {
return s3shared.NewClientConfiguredForAccelerateError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
// Dualstack not supported
if aws.BoolValue(req.Config.UseDualStack) {
return s3shared.NewClientConfiguredForDualStackError(accessPoint,
req.ClientInfo.PartitionID, aws.StringValue(req.Config.Region), nil)
}
// Ignore the disable host prefix for access points since custom endpoints
// are not supported.
req.Config.DisableEndpointHostPrefix = aws.Bool(false)
if err := outpostAccessPointEndpointBuilder(accessPoint).build(req); err != nil {
return err
}
removeBucketFromPath(req.HTTPRequest.URL)
return nil
}
func removeBucketFromPath(u *url.URL) {
u.Path = strings.Replace(u.Path, "/{Bucket}", "", -1)
if u.Path == "" {
u.Path = "/"
}
}
type accessPointEndpointBuilder arn.AccessPointARN
const (
accessPointPrefixLabel = "accesspoint"
accountIDPrefixLabel = "accountID"
accesPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
)
func (a accessPointEndpointBuilder) Build(req *request.Request) error {
resolveRegion := arn.AccessPointARN(a).Region
cfgRegion := aws.StringValue(req.Config.Region)
if isFIPS(cfgRegion) {
if aws.BoolValue(req.Config.S3UseARNRegion) && isCrossRegion(req, resolveRegion) {
// FIPS with cross region is not supported, the SDK must fail
// because there is no well defined method for SDK to construct a
// correct FIPS endpoint.
return newClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a),
req.ClientInfo.PartitionID, cfgRegion, nil)
}
resolveRegion = cfgRegion
}
endpoint, err := resolveRegionalEndpoint(req, resolveRegion)
if err != nil {
return newFailedToResolveEndpointError(arn.AccessPointARN(a),
req.ClientInfo.PartitionID, cfgRegion, err)
}
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
return err
}
const serviceEndpointLabel = "s3-accesspoint"
// dualstack provided by endpoint resolver
cfgHost := req.HTTPRequest.URL.Host
if strings.HasPrefix(cfgHost, "s3") {
req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
}
protocol.HostPrefixBuilder{
Prefix: accesPointPrefixTemplate,
LabelsFn: a.hostPrefixLabelValues,
}.Build(req)
req.ClientInfo.SigningName = endpoint.SigningName
req.ClientInfo.SigningRegion = endpoint.SigningRegion
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
if err != nil {
return newInvalidARNError(arn.AccessPointARN(a), err)
}
return nil
}
func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
return map[string]string{
accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
}
}
func resolveRegionalEndpoint(r *request.Request, region string) (endpoints.ResolvedEndpoint, error) {
return r.Config.EndpointResolver.EndpointFor(EndpointsID, region, func(opts *endpoints.Options) {
opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
})
}
func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL))
r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
if err != nil {
return awserr.New(request.ErrCodeSerialization,
"failed to parse endpoint URL", err)
}
return nil
}

View file

@ -0,0 +1,177 @@
package s3
import (
"net/url"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/internal/s3shared"
"github.com/aws/aws-sdk-go/internal/s3shared/arn"
"github.com/aws/aws-sdk-go/private/protocol"
)
const (
accessPointPrefixLabel = "accesspoint"
accountIDPrefixLabel = "accountID"
accessPointPrefixTemplate = "{" + accessPointPrefixLabel + "}-{" + accountIDPrefixLabel + "}."
outpostPrefixLabel = "outpost"
outpostAccessPointPrefixTemplate = accessPointPrefixTemplate + "{" + outpostPrefixLabel + "}."
)
// accessPointEndpointBuilder represents the endpoint builder for access point arn
type accessPointEndpointBuilder arn.AccessPointARN
// build builds the endpoint for corresponding access point arn
//
// For building an endpoint from access point arn, format used is:
// - Access point endpoint format : {accesspointName}-{accountId}.s3-accesspoint.{region}.{dnsSuffix}
// - example : myaccesspoint-012345678901.s3-accesspoint.us-west-2.amazonaws.com
//
// Access Point Endpoint requests are signed using "s3" as signing name.
//
func (a accessPointEndpointBuilder) build(req *request.Request) error {
resolveService := arn.AccessPointARN(a).Service
resolveRegion := arn.AccessPointARN(a).Region
cfgRegion := aws.StringValue(req.Config.Region)
if s3shared.IsFIPS(cfgRegion) {
if aws.BoolValue(req.Config.S3UseARNRegion) && s3shared.IsCrossRegion(req, resolveRegion) {
// FIPS with cross region is not supported, the SDK must fail
// because there is no well defined method for SDK to construct a
// correct FIPS endpoint.
return s3shared.NewClientConfiguredForCrossRegionFIPSError(arn.AccessPointARN(a),
req.ClientInfo.PartitionID, cfgRegion, nil)
}
resolveRegion = cfgRegion
}
endpoint, err := resolveRegionalEndpoint(req, resolveRegion, resolveService)
if err != nil {
return s3shared.NewFailedToResolveEndpointError(arn.AccessPointARN(a),
req.ClientInfo.PartitionID, cfgRegion, err)
}
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
return err
}
const serviceEndpointLabel = "s3-accesspoint"
// dual stack provided by endpoint resolver
cfgHost := req.HTTPRequest.URL.Host
if strings.HasPrefix(cfgHost, "s3") {
req.HTTPRequest.URL.Host = serviceEndpointLabel + cfgHost[2:]
}
protocol.HostPrefixBuilder{
Prefix: accessPointPrefixTemplate,
LabelsFn: a.hostPrefixLabelValues,
}.Build(req)
// signer redirection
redirectSigner(req, endpoint.SigningName, endpoint.SigningRegion)
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
if err != nil {
return s3shared.NewInvalidARNError(arn.AccessPointARN(a), err)
}
return nil
}
func (a accessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
return map[string]string{
accessPointPrefixLabel: arn.AccessPointARN(a).AccessPointName,
accountIDPrefixLabel: arn.AccessPointARN(a).AccountID,
}
}
// outpostAccessPointEndpointBuilder represents the Endpoint builder for outpost access point arn.
type outpostAccessPointEndpointBuilder arn.OutpostAccessPointARN
// build builds an endpoint corresponding to the outpost access point arn.
//
// For building an endpoint from outpost access point arn, format used is:
// - Outpost access point endpoint format : {accesspointName}-{accountId}.{outpostId}.s3-outposts.{region}.{dnsSuffix}
// - example : myaccesspoint-012345678901.op-01234567890123456.s3-outposts.us-west-2.amazonaws.com
//
// Outpost AccessPoint Endpoint request are signed using "s3-outposts" as signing name.
//
func (o outpostAccessPointEndpointBuilder) build(req *request.Request) error {
resolveRegion := o.Region
resolveService := o.Service
endpointsID := resolveService
if resolveService == "s3-outposts" {
endpointsID = "s3"
}
endpoint, err := resolveRegionalEndpoint(req, resolveRegion, endpointsID)
if err != nil {
return s3shared.NewFailedToResolveEndpointError(o,
req.ClientInfo.PartitionID, resolveRegion, err)
}
if err = updateRequestEndpoint(req, endpoint.URL); err != nil {
return err
}
// add url host as s3-outposts
cfgHost := req.HTTPRequest.URL.Host
if strings.HasPrefix(cfgHost, endpointsID) {
req.HTTPRequest.URL.Host = resolveService + cfgHost[len(endpointsID):]
}
protocol.HostPrefixBuilder{
Prefix: outpostAccessPointPrefixTemplate,
LabelsFn: o.hostPrefixLabelValues,
}.Build(req)
// set the signing region, name to resolved names from ARN
redirectSigner(req, resolveService, resolveRegion)
err = protocol.ValidateEndpointHost(req.Operation.Name, req.HTTPRequest.URL.Host)
if err != nil {
return s3shared.NewInvalidARNError(o, err)
}
return nil
}
func (o outpostAccessPointEndpointBuilder) hostPrefixLabelValues() map[string]string {
return map[string]string{
accessPointPrefixLabel: o.AccessPointName,
accountIDPrefixLabel: o.AccountID,
outpostPrefixLabel: o.OutpostID,
}
}
func resolveRegionalEndpoint(r *request.Request, region string, endpointsID string) (endpoints.ResolvedEndpoint, error) {
return r.Config.EndpointResolver.EndpointFor(endpointsID, region, func(opts *endpoints.Options) {
opts.DisableSSL = aws.BoolValue(r.Config.DisableSSL)
opts.UseDualStack = aws.BoolValue(r.Config.UseDualStack)
opts.S3UsEast1RegionalEndpoint = endpoints.RegionalS3UsEast1Endpoint
})
}
func updateRequestEndpoint(r *request.Request, endpoint string) (err error) {
endpoint = endpoints.AddScheme(endpoint, aws.BoolValue(r.Config.DisableSSL))
r.HTTPRequest.URL, err = url.Parse(endpoint + r.Operation.HTTPPath)
if err != nil {
return awserr.New(request.ErrCodeSerialization,
"failed to parse endpoint URL", err)
}
return nil
}
// redirectSigner sets signing name, signing region for a request
func redirectSigner(req *request.Request, signingName string, signingRegion string) {
req.ClientInfo.SigningName = signingName
req.ClientInfo.SigningRegion = signingRegion
}

View file

@ -1,151 +0,0 @@
package s3
import (
"fmt"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/s3/internal/arn"
)
const (
invalidARNErrorErrCode = "InvalidARNError"
configurationErrorErrCode = "ConfigurationError"
)
type invalidARNError struct {
message string
resource arn.Resource
origErr error
}
func (e invalidARNError) Error() string {
var extra string
if e.resource != nil {
extra = "ARN: " + e.resource.String()
}
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
}
func (e invalidARNError) Code() string {
return invalidARNErrorErrCode
}
func (e invalidARNError) Message() string {
return e.message
}
func (e invalidARNError) OrigErr() error {
return e.origErr
}
func newInvalidARNError(resource arn.Resource, err error) invalidARNError {
return invalidARNError{
message: "invalid ARN",
origErr: err,
resource: resource,
}
}
func newInvalidARNWithCustomEndpointError(resource arn.Resource, err error) invalidARNError {
return invalidARNError{
message: "resource ARN not supported with custom client endpoints",
origErr: err,
resource: resource,
}
}
// ARN not supported for the target partition
func newInvalidARNWithUnsupportedPartitionError(resource arn.Resource, err error) invalidARNError {
return invalidARNError{
message: "resource ARN not supported for the target ARN partition",
origErr: err,
resource: resource,
}
}
type configurationError struct {
message string
resource arn.Resource
clientPartitionID string
clientRegion string
origErr error
}
func (e configurationError) Error() string {
extra := fmt.Sprintf("ARN: %s, client partition: %s, client region: %s",
e.resource, e.clientPartitionID, e.clientRegion)
return awserr.SprintError(e.Code(), e.Message(), extra, e.origErr)
}
func (e configurationError) Code() string {
return configurationErrorErrCode
}
func (e configurationError) Message() string {
return e.message
}
func (e configurationError) OrigErr() error {
return e.origErr
}
func newClientPartitionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client partition does not match provided ARN partition",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newClientRegionMismatchError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client region does not match provided ARN region",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newFailedToResolveEndpointError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "endpoint resolver failed to find an endpoint for the provided ARN region",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newClientConfiguredForFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client configured for fips but cross-region resource ARN provided",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newClientConfiguredForAccelerateError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client configured for S3 Accelerate but is supported with resource ARN",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}
func newClientConfiguredForCrossRegionFIPSError(resource arn.Resource, clientPartitionID, clientRegion string, err error) configurationError {
return configurationError{
message: "client configured for FIPS with cross-region enabled but is supported with cross-region resource ARN",
origErr: err,
resource: resource,
clientPartitionID: clientPartitionID,
clientRegion: clientRegion,
}
}

View file

@ -8,7 +8,7 @@ const (
// "BucketAlreadyExists".
//
// The requested bucket name is not available. The bucket namespace is shared
// by all users of the system. Please select a different name and try again.
// by all users of the system. Select a different name and try again.
ErrCodeBucketAlreadyExists = "BucketAlreadyExists"
// ErrCodeBucketAlreadyOwnedByYou for service response error code

View file

@ -108,6 +108,10 @@ type S3API interface {
DeleteBucketMetricsConfigurationWithContext(aws.Context, *s3.DeleteBucketMetricsConfigurationInput, ...request.Option) (*s3.DeleteBucketMetricsConfigurationOutput, error)
DeleteBucketMetricsConfigurationRequest(*s3.DeleteBucketMetricsConfigurationInput) (*request.Request, *s3.DeleteBucketMetricsConfigurationOutput)
DeleteBucketOwnershipControls(*s3.DeleteBucketOwnershipControlsInput) (*s3.DeleteBucketOwnershipControlsOutput, error)
DeleteBucketOwnershipControlsWithContext(aws.Context, *s3.DeleteBucketOwnershipControlsInput, ...request.Option) (*s3.DeleteBucketOwnershipControlsOutput, error)
DeleteBucketOwnershipControlsRequest(*s3.DeleteBucketOwnershipControlsInput) (*request.Request, *s3.DeleteBucketOwnershipControlsOutput)
DeleteBucketPolicy(*s3.DeleteBucketPolicyInput) (*s3.DeleteBucketPolicyOutput, error)
DeleteBucketPolicyWithContext(aws.Context, *s3.DeleteBucketPolicyInput, ...request.Option) (*s3.DeleteBucketPolicyOutput, error)
DeleteBucketPolicyRequest(*s3.DeleteBucketPolicyInput) (*request.Request, *s3.DeleteBucketPolicyOutput)
@ -192,6 +196,10 @@ type S3API interface {
GetBucketNotificationConfigurationWithContext(aws.Context, *s3.GetBucketNotificationConfigurationRequest, ...request.Option) (*s3.NotificationConfiguration, error)
GetBucketNotificationConfigurationRequest(*s3.GetBucketNotificationConfigurationRequest) (*request.Request, *s3.NotificationConfiguration)
GetBucketOwnershipControls(*s3.GetBucketOwnershipControlsInput) (*s3.GetBucketOwnershipControlsOutput, error)
GetBucketOwnershipControlsWithContext(aws.Context, *s3.GetBucketOwnershipControlsInput, ...request.Option) (*s3.GetBucketOwnershipControlsOutput, error)
GetBucketOwnershipControlsRequest(*s3.GetBucketOwnershipControlsInput) (*request.Request, *s3.GetBucketOwnershipControlsOutput)
GetBucketPolicy(*s3.GetBucketPolicyInput) (*s3.GetBucketPolicyOutput, error)
GetBucketPolicyWithContext(aws.Context, *s3.GetBucketPolicyInput, ...request.Option) (*s3.GetBucketPolicyOutput, error)
GetBucketPolicyRequest(*s3.GetBucketPolicyInput) (*request.Request, *s3.GetBucketPolicyOutput)
@ -359,6 +367,10 @@ type S3API interface {
PutBucketNotificationConfigurationWithContext(aws.Context, *s3.PutBucketNotificationConfigurationInput, ...request.Option) (*s3.PutBucketNotificationConfigurationOutput, error)
PutBucketNotificationConfigurationRequest(*s3.PutBucketNotificationConfigurationInput) (*request.Request, *s3.PutBucketNotificationConfigurationOutput)
PutBucketOwnershipControls(*s3.PutBucketOwnershipControlsInput) (*s3.PutBucketOwnershipControlsOutput, error)
PutBucketOwnershipControlsWithContext(aws.Context, *s3.PutBucketOwnershipControlsInput, ...request.Option) (*s3.PutBucketOwnershipControlsOutput, error)
PutBucketOwnershipControlsRequest(*s3.PutBucketOwnershipControlsInput) (*request.Request, *s3.PutBucketOwnershipControlsOutput)
PutBucketPolicy(*s3.PutBucketPolicyInput) (*s3.PutBucketPolicyOutput, error)
PutBucketPolicyWithContext(aws.Context, *s3.PutBucketPolicyInput, ...request.Option) (*s3.PutBucketPolicyOutput, error)
PutBucketPolicyRequest(*s3.PutBucketPolicyInput) (*request.Request, *s3.PutBucketPolicyOutput)

View file

@ -3,6 +3,7 @@ package s3manager
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/service/s3"
@ -35,6 +36,30 @@ import (
// }
// fmt.Printf("Bucket %s is in %s region\n", bucket, region)
//
// By default the request will be made to the Amazon S3 endpoint using the Path
// style addressing.
//
// s3.us-west-2.amazonaws.com/bucketname
//
// This is not compatible with Amazon S3's FIPS endpoints. To override this
// behavior to use Virtual Host style addressing, provide a functional option
// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false).
//
// region, err := s3manager.GetBucketRegion(ctx, sess, "bucketname", "us-west-2", func(r *request.Request) {
// r.S3ForcePathStyle = aws.Bool(false)
// })
//
// To configure the GetBucketRegion to make a request via the Amazon
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the
// utility is called with. The hint region will be ignored if an endpoint URL
// is configured on the session or client.
//
// sess, err := session.NewSession(&aws.Config{
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
// })
//
// region, err := s3manager.GetBucketRegion(context.Background(), sess, "bucketname", "")
func GetBucketRegion(ctx aws.Context, c client.ConfigProvider, bucket, regionHint string, opts ...request.Option) (string, error) {
var cfg aws.Config
if len(regionHint) != 0 {
@ -50,12 +75,38 @@ const bucketRegionHeader = "X-Amz-Bucket-Region"
// that it takes a S3 service client instead of a Session. The regionHint is
// derived from the region the S3 service client was created in.
//
// By default the request will be made to the Amazon S3 endpoint using the Path
// style addressing.
//
// s3.us-west-2.amazonaws.com/bucketname
//
// This is not compatible with Amazon S3's FIPS endpoints. To override this
// behavior to use Virtual Host style addressing, provide a functional option
// that will set the Request's Config.S3ForcePathStyle to aws.Bool(false).
//
// region, err := s3manager.GetBucketRegionWithClient(ctx, client, "bucketname", func(r *request.Request) {
// r.S3ForcePathStyle = aws.Bool(false)
// })
//
// To configure the GetBucketRegion to make a request via the Amazon
// S3 FIPS endpoints directly when a FIPS region name is not available, (e.g.
// fips-us-gov-west-1) set the Config.Endpoint on the Session, or client the
// utility is called with. The hint region will be ignored if an endpoint URL
// is configured on the session or client.
//
// region, err := s3manager.GetBucketRegionWithClient(context.Background(),
// s3.New(sess, &aws.Config{
// Endpoint: aws.String("https://s3-fips.us-west-2.amazonaws.com"),
// }),
// "bucketname")
//
// See GetBucketRegion for more information.
func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string, opts ...request.Option) (string, error) {
req, _ := svc.HeadBucketRequest(&s3.HeadBucketInput{
Bucket: aws.String(bucket),
})
req.Config.S3ForcePathStyle = aws.Bool(true)
req.Config.Credentials = credentials.AnonymousCredentials
req.SetContext(ctx)
@ -75,6 +126,16 @@ func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string
r.HTTPResponse.Status = "OK"
r.Error = nil
})
// Replace the endpoint validation handler to not require a region if an
// endpoint URL was specified. Since these requests are not authenticated,
// requiring a region is not needed when an endpoint URL is provided.
req.Handlers.Validate.Swap(
corehandlers.ValidateEndpointHandler.Name,
request.NamedHandler{
Name: "validateEndpointWithoutRegion",
Fn: validateEndpointWithoutRegion,
},
)
req.ApplyOptions(opts...)
@ -86,3 +147,13 @@ func GetBucketRegionWithClient(ctx aws.Context, svc s3iface.S3API, bucket string
return bucketRegion, nil
}
func validateEndpointWithoutRegion(r *request.Request) {
// Check if the caller provided an explicit URL instead of one derived by
// the SDK's endpoint resolver. For GetBucketRegion, with an explicit
// endpoint URL, a region is not needed. If no endpoint URL is provided,
// fallback the SDK's standard endpoint validation handler.
if len(aws.StringValue(r.Config.Endpoint)) == 0 {
corehandlers.ValidateEndpointHandler.Fn(r)
}
}

View file

@ -60,6 +60,14 @@ func (p *maxSlicePool) Get(ctx aws.Context) (*[]byte, error) {
return nil, errZeroCapacity
}
return bs, nil
case <-ctx.Done():
p.mtx.RUnlock()
return nil, ctx.Err()
default:
// pass
}
select {
case _, ok := <-p.allocations:
p.mtx.RUnlock()
if !ok {

View file

@ -16,20 +16,30 @@ type UploadInput struct {
// The canned ACL to apply to the object. For more information, see Canned ACL
// (https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL).
//
// This action is not supported by Amazon S3 on Outposts.
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
// The readable body payload to send to S3.
Body io.Reader
// Bucket name to which the PUT operation was initiated.
// The bucket name to which the PUT operation was initiated.
//
// When using this API with an access point, you must direct requests to the
// access point hostname. The access point hostname takes the form AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com.
// When using this operation using an access point through the AWS SDKs, you
// When using this operation with an access point through the AWS SDKs, you
// provide the access point ARN in place of the bucket name. For more information
// about access point ARNs, see Using Access Points (https://docs.aws.amazon.com/AmazonS3/latest/dev/using-access-points.html)
// in the Amazon Simple Storage Service Developer Guide.
//
// When using this API with Amazon S3 on Outposts, you must direct requests
// to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
// AccessPointName-AccountId.outpostID.s3-outposts.Region.amazonaws.com. When
// using this operation using S3 on Outposts through the AWS SDKs, you provide
// the Outposts bucket ARN in place of the bucket name. For more information
// about S3 on Outposts ARNs, see Using S3 on Outposts (https://docs.aws.amazon.com/AmazonS3/latest/dev/S3onOutposts.html)
// in the Amazon Simple Storage Service Developer Guide.
//
// Bucket is a required field
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
@ -63,20 +73,33 @@ type UploadInput struct {
// see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.17).
ContentType *string `location:"header" locationName:"Content-Type" type:"string"`
// The account id of the expected bucket owner. If the bucket is owned by a
// different account, the request will fail with an HTTP 403 (Access Denied)
// error.
ExpectedBucketOwner *string `location:"header" locationName:"x-amz-expected-bucket-owner" type:"string"`
// The date and time at which the object is no longer cacheable. For more information,
// see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21 (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21).
Expires *time.Time `location:"header" locationName:"Expires" type:"timestamp"`
// Gives the grantee READ, READ_ACP, and WRITE_ACP permissions on the object.
//
// This action is not supported by Amazon S3 on Outposts.
GrantFullControl *string `location:"header" locationName:"x-amz-grant-full-control" type:"string"`
// Allows grantee to read the object data and its metadata.
//
// This action is not supported by Amazon S3 on Outposts.
GrantRead *string `location:"header" locationName:"x-amz-grant-read" type:"string"`
// Allows grantee to read the object ACL.
//
// This action is not supported by Amazon S3 on Outposts.
GrantReadACP *string `location:"header" locationName:"x-amz-grant-read-acp" type:"string"`
// Allows grantee to write the ACL for the applicable object.
//
// This action is not supported by Amazon S3 on Outposts.
GrantWriteACP *string `location:"header" locationName:"x-amz-grant-write-acp" type:"string"`
// Object key for which the PUT operation was initiated.
@ -111,7 +134,7 @@ type UploadInput struct {
// Specifies the customer-provided encryption key for Amazon S3 to use in encrypting
// data. This value is used to store the object and then it is discarded; Amazon
// S3 does not store the encryption key. The key must be appropriate for use
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// with the algorithm specified in the x-amz-server-side-encryption-customer-algorithm
// header.
SSECustomerKey *string `marshal-as:"blob" location:"header" locationName:"x-amz-server-side-encryption-customer-key" type:"string" sensitive:"true"`
@ -141,8 +164,12 @@ type UploadInput struct {
// S3 (for example, AES256, aws:kms).
ServerSideEncryption *string `location:"header" locationName:"x-amz-server-side-encryption" type:"string" enum:"ServerSideEncryption"`
// If you don't specify, S3 Standard is the default storage class. Amazon S3
// supports other storage classes.
// By default, Amazon S3 uses the STANDARD Storage Class to store newly created
// objects. The STANDARD storage class provides high durability and high availability.
// Depending on performance needs, you can specify a different Storage Class.
// Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For more information,
// see Storage Classes (https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
// in the Amazon S3 Service Developer Guide.
StorageClass *string `location:"header" locationName:"x-amz-storage-class" type:"string" enum:"StorageClass"`
// The tag-set for the object. The tag-set must be encoded as URL Query parameters.

View file

@ -69,7 +69,7 @@ func computeKeyMD5(keyHeader, keyMD5Header, key string, r *http.Request) {
return
}
// In backwards compatiable, the header's value is not base64 encoded,
// In backwards compatible, the header's value is not base64 encoded,
// and needs to be encoded and updated by the SDK's customizations.
b64Key := base64.StdEncoding.EncodeToString([]byte(key))
r.Header.Set(keyHeader, b64Key)

View file

@ -207,6 +207,10 @@ func (c *STS) AssumeRoleRequest(input *AssumeRoleInput) (req *request.Request, o
// and Deactivating AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// * ErrCodeExpiredTokenException "ExpiredTokenException"
// The web identity token that was passed is expired or is not valid. Get a
// new identity token from the identity provider and then retry the request.
//
// See also, https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15/AssumeRole
func (c *STS) AssumeRole(input *AssumeRoleInput) (*AssumeRoleOutput, error) {
req, out := c.AssumeRoleRequest(input)
@ -626,7 +630,7 @@ func (c *STS) AssumeRoleWithWebIdentityRequest(input *AssumeRoleWithWebIdentityI
// * Using Web Identity Federation API Operations for Mobile Apps (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_oidc_manual.html)
// and Federation Through a Web-based Identity Provider (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_request.html#api_assumerolewithwebidentity).
//
// * Web Identity Federation Playground (https://web-identity-federation-playground.s3.amazonaws.com/index.html).
// * Web Identity Federation Playground (https://aws.amazon.com/blogs/aws/the-aws-web-identity-federation-playground/).
// Walk through the process of authenticating through Login with Amazon,
// Facebook, or Google, getting temporary security credentials, and then
// using those credentials to make a request to AWS.
@ -1788,7 +1792,7 @@ type AssumeRoleWithSAMLInput struct {
// in the IAM User Guide.
//
// SAMLAssertion is a required field
SAMLAssertion *string `min:"4" type:"string" required:"true" sensitive:"true"`
SAMLAssertion *string `min:"4" type:"string" required:"true"`
}
// String returns the string representation
@ -2100,7 +2104,7 @@ type AssumeRoleWithWebIdentityInput struct {
// the application makes an AssumeRoleWithWebIdentity call.
//
// WebIdentityToken is a required field
WebIdentityToken *string `min:"4" type:"string" required:"true" sensitive:"true"`
WebIdentityToken *string `min:"4" type:"string" required:"true"`
}
// String returns the string representation

View file

@ -3,87 +3,11 @@
// Package sts provides the client and types for making API
// requests to AWS Security Token Service.
//
// The AWS Security Token Service (STS) is a web service that enables you to
// request temporary, limited-privilege credentials for AWS Identity and Access
// Management (IAM) users or for users that you authenticate (federated users).
// This guide provides descriptions of the STS API. For more detailed information
// about using this service, go to Temporary Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// For information about setting up signatures and authorization through the
// API, go to Signing AWS API Requests (https://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
// in the AWS General Reference. For general information about the Query API,
// go to Making Query Requests (https://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
// in Using IAM. For information about using security tokens with other AWS
// products, go to AWS Services That Work with IAM (https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_aws-services-that-work-with-iam.html)
// in the IAM User Guide.
//
// If you're new to AWS and need additional technical information about a specific
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
// (http://aws.amazon.com/documentation/).
//
// Endpoints
//
// By default, AWS Security Token Service (STS) is available as a global service,
// and all AWS STS requests go to a single endpoint at https://sts.amazonaws.com.
// Global requests map to the US East (N. Virginia) region. AWS recommends using
// Regional AWS STS endpoints instead of the global endpoint to reduce latency,
// build in redundancy, and increase session token validity. For more information,
// see Managing AWS STS in an AWS Region (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html)
// in the IAM User Guide.
//
// Most AWS Regions are enabled for operations in all AWS services by default.
// Those Regions are automatically activated for use with AWS STS. Some Regions,
// such as Asia Pacific (Hong Kong), must be manually enabled. To learn more
// about enabling and disabling AWS Regions, see Managing AWS Regions (https://docs.aws.amazon.com/general/latest/gr/rande-manage.html)
// in the AWS General Reference. When you enable these AWS Regions, they are
// automatically activated for use with AWS STS. You cannot activate the STS
// endpoint for a Region that is disabled. Tokens that are valid in all AWS
// Regions are longer than tokens that are valid in Regions that are enabled
// by default. Changing this setting might affect existing systems where you
// temporarily store tokens. For more information, see Managing Global Endpoint
// Session Tokens (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#sts-regions-manage-tokens)
// in the IAM User Guide.
//
// After you activate a Region for use with AWS STS, you can direct AWS STS
// API calls to that Region. AWS STS recommends that you provide both the Region
// and endpoint when you make calls to a Regional endpoint. You can provide
// the Region alone for manually enabled Regions, such as Asia Pacific (Hong
// Kong). In this case, the calls are directed to the STS Regional endpoint.
// However, if you provide the Region alone for Regions enabled by default,
// the calls are directed to the global endpoint of https://sts.amazonaws.com.
//
// To view the list of AWS STS endpoints and whether they are active by default,
// see Writing Code to Use AWS STS Regions (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp_enable-regions.html#id_credentials_temp_enable-regions_writing_code)
// in the IAM User Guide.
//
// Recording API requests
//
// STS supports AWS CloudTrail, which is a service that records AWS calls for
// your AWS account and delivers log files to an Amazon S3 bucket. By using
// information collected by CloudTrail, you can determine what requests were
// successfully made to STS, who made the request, when it was made, and so
// on.
//
// If you activate AWS STS endpoints in Regions other than the default global
// endpoint, then you must also turn on CloudTrail logging in those Regions.
// This is necessary to record any AWS STS API calls that are made in those
// Regions. For more information, see Turning On CloudTrail in Additional Regions
// (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/aggregating_logs_regions_turn_on_ct.html)
// in the AWS CloudTrail User Guide.
//
// AWS Security Token Service (STS) is a global service with a single endpoint
// at https://sts.amazonaws.com. Calls to this endpoint are logged as calls
// to a global service. However, because this endpoint is physically located
// in the US East (N. Virginia) Region, your logs list us-east-1 as the event
// Region. CloudTrail does not write these logs to the US East (Ohio) Region
// unless you choose to include global service logs in that Region. CloudTrail
// writes calls to all Regional endpoints to their respective Regions. For example,
// calls to sts.us-east-2.amazonaws.com are published to the US East (Ohio)
// Region and calls to sts.eu-central-1.amazonaws.com are published to the EU
// (Frankfurt) Region.
//
// To learn more about CloudTrail, including how to turn it on and find your
// log files, see the AWS CloudTrail User Guide (https://docs.aws.amazon.com/awscloudtrail/latest/userguide/what_is_cloud_trail_top_level.html).
// AWS Security Token Service (STS) enables you to request temporary, limited-privilege
// credentials for AWS Identity and Access Management (IAM) users or for users
// that you authenticate (federated users). This guide provides descriptions
// of the STS API. For more information about using this service, see Temporary
// Security Credentials (https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// See https://docs.aws.amazon.com/goto/WebAPI/sts-2011-06-15 for more information on this service.
//

View file

@ -12,6 +12,17 @@ go:
- 1.11.x
- 1.12.x
- 1.13.x
- 1.14.x
- 1.15.x
- tip
install: go get -v -t ./...
script: make test
allow_failures:
- go: tip
script: make build
matrix:
include:
- language: go
go: 1.15.x
script: make test

View file

@ -1,6 +1,8 @@
CMD = jpgo
SRC_PKGS=./ ./cmd/... ./fuzz/...
help:
@echo "Please use \`make <target>' where <target> is one of"
@echo " test to run all the tests"
@ -9,21 +11,22 @@ help:
generate:
go generate ./...
go generate ${SRC_PKGS}
build:
rm -f $(CMD)
go build ./...
go build ${SRC_PKGS}
rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
mv cmd/$(CMD)/$(CMD) .
test:
go test -v ./...
test: test-internal-testify
echo "making tests ${SRC_PKGS}"
go test -v ${SRC_PKGS}
check:
go vet ./...
@echo "golint ./..."
@lint=`golint ./...`; \
go vet ${SRC_PKGS}
@echo "golint ${SRC_PKGS}"
@lint=`golint ${SRC_PKGS}`; \
lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
echo "$$lint"; \
if [ "$$lint" != "" ]; then exit 1; fi
@ -42,3 +45,7 @@ bench:
pprof-cpu:
go tool pprof ./go-jmespath.test ./cpu.out
test-internal-testify:
cd internal/testify && go test ./...

View file

@ -2,4 +2,4 @@ module github.com/jmespath/go-jmespath
go 1.14
require github.com/stretchr/testify v1.5.1
require github.com/jmespath/go-jmespath/internal/testify v1.5.1

View file

@ -1,11 +1,11 @@
github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

12
vendor/modules.txt vendored
View file

@ -55,7 +55,7 @@ github.com/alecthomas/units
# github.com/armon/go-radix v1.0.0
## explicit
github.com/armon/go-radix
# github.com/aws/aws-sdk-go v1.32.7
# github.com/aws/aws-sdk-go v1.35.19
## explicit
github.com/aws/aws-sdk-go/aws
github.com/aws/aws-sdk-go/aws/arn
@ -79,7 +79,9 @@ github.com/aws/aws-sdk-go/aws/session
github.com/aws/aws-sdk-go/aws/signer/v4
github.com/aws/aws-sdk-go/internal/context
github.com/aws/aws-sdk-go/internal/ini
github.com/aws/aws-sdk-go/internal/s3err
github.com/aws/aws-sdk-go/internal/s3shared
github.com/aws/aws-sdk-go/internal/s3shared/arn
github.com/aws/aws-sdk-go/internal/s3shared/s3err
github.com/aws/aws-sdk-go/internal/sdkio
github.com/aws/aws-sdk-go/internal/sdkmath
github.com/aws/aws-sdk-go/internal/sdkrand
@ -98,11 +100,11 @@ github.com/aws/aws-sdk-go/private/protocol/query/queryutil
github.com/aws/aws-sdk-go/private/protocol/rest
github.com/aws/aws-sdk-go/private/protocol/restxml
github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil
github.com/aws/aws-sdk-go/service/applicationautoscaling
github.com/aws/aws-sdk-go/service/dynamodb
github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute
github.com/aws/aws-sdk-go/service/dynamodbstreams
github.com/aws/aws-sdk-go/service/s3
github.com/aws/aws-sdk-go/service/s3/internal/arn
github.com/aws/aws-sdk-go/service/s3/s3iface
github.com/aws/aws-sdk-go/service/s3/s3manager
github.com/aws/aws-sdk-go/service/sts
@ -165,6 +167,8 @@ github.com/gizak/termui
github.com/gizak/termui/widgets
# github.com/go-logr/logr v0.2.0
github.com/go-logr/logr
# github.com/go-sql-driver/mysql v1.5.0
## explicit
# github.com/gogo/protobuf v1.3.1
## explicit
github.com/gogo/protobuf/gogoproto
@ -263,7 +267,7 @@ github.com/imdario/mergo
## explicit
github.com/iovisor/gobpf/bcc
github.com/iovisor/gobpf/pkg/cpuonline
# github.com/jmespath/go-jmespath v0.3.0
# github.com/jmespath/go-jmespath v0.4.0
github.com/jmespath/go-jmespath
# github.com/johannesboyne/gofakes3 v0.0.0-20191228161223-9aee1c78a252
## explicit