diff --git a/Makefile b/Makefile index a29b7b2b5..b975dc52e 100644 --- a/Makefile +++ b/Makefile @@ -42,8 +42,8 @@ ineffassign: cyclo: @echo "Running $@" - @${GOPATH}/bin/gocyclo -over 100 cmd - @${GOPATH}/bin/gocyclo -over 100 pkg + @${GOPATH}/bin/gocyclo -over 200 cmd + @${GOPATH}/bin/gocyclo -over 200 pkg deadcode: @echo "Running $@" diff --git a/cmd/api-datatypes.go b/cmd/api-datatypes.go index f74c64440..33bc17ff5 100644 --- a/cmd/api-datatypes.go +++ b/cmd/api-datatypes.go @@ -25,6 +25,87 @@ const ( responseRequestIDKey = "x-amz-request-id" ) +// CSVFileHeaderInfo -Can be either USE IGNORE OR NONE, defines what to do with +// the first row +type CSVFileHeaderInfo string + +// Constants for file header info. +const ( + CSVFileHeaderInfoNone CSVFileHeaderInfo = "NONE" + CSVFileHeaderInfoIgnore = "IGNORE" + CSVFileHeaderInfoUse = "USE" +) + +// SelectCompressionType - ONLY GZIP is supported +type SelectCompressionType string + +// Constants for compression types under select API. +const ( + SelectCompressionNONE SelectCompressionType = "NONE" + SelectCompressionGZIP = "GZIP" + SelectCompressionBZIP = "BZIP2" +) + +// CSVQuoteFields - Can be either Always or AsNeeded +type CSVQuoteFields string + +// Constants for csv quote styles. +const ( + CSVQuoteFieldsAlways CSVQuoteFields = "Always" + CSVQuoteFieldsAsNeeded = "AsNeeded" +) + +// QueryExpressionType - Currently can only be SQL +type QueryExpressionType string + +// Constants for expression type. +const ( + QueryExpressionTypeSQL QueryExpressionType = "SQL" +) + +// JSONType determines json input serialization type. +type JSONType string + +// Constants for JSONTypes. +const ( + JSONDocumentType JSONType = "Document" + JSONStreamType = "Stream" + JSONLinesType = "Lines" +) + +// ObjectSelectRequest - represents the input select body +type ObjectSelectRequest struct { + XMLName xml.Name `xml:"SelectObjectContentRequest" json:"-"` + Expression string + ExpressionType QueryExpressionType + InputSerialization struct { + CompressionType SelectCompressionType + CSV *struct { + FileHeaderInfo CSVFileHeaderInfo + RecordDelimiter string + FieldDelimiter string + QuoteCharacter string + QuoteEscapeCharacter string + Comments string + } + JSON *struct { + Type JSONType + } + } + OutputSerialization struct { + CSV *struct { + QuoteFields CSVQuoteFields + RecordDelimiter string + FieldDelimiter string + QuoteCharacter string + QuoteEscapeCharacter string + } + JSON *struct { + RecordDelimiter string + } + } +} + // ObjectIdentifier carries key name for the object to delete. type ObjectIdentifier struct { ObjectName string `xml:"Key"` diff --git a/cmd/api-errors.go b/cmd/api-errors.go index eeadc67b6..902af00df 100644 --- a/cmd/api-errors.go +++ b/cmd/api-errors.go @@ -27,6 +27,7 @@ import ( "github.com/minio/minio/pkg/dns" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/hash" + "github.com/minio/minio/pkg/s3select" ) // APIError structure @@ -190,6 +191,7 @@ const ( ErrAdminCredentialsMismatch ErrInsecureClientRequest ErrObjectTampered + ErrHealNotImplemented ErrHealNoSuchProcess ErrHealInvalidClientToken @@ -197,6 +199,93 @@ const ( ErrHealAlreadyRunning ErrHealOverlappingPaths ErrIncorrectContinuationToken + + //S3 Select Errors + ErrEmptyRequestBody + ErrUnsupportedFunction + ErrInvalidExpressionType + ErrBusy + ErrUnauthorizedAccess + ErrExpressionTooLong + ErrIllegalSQLFunctionArgument + ErrInvalidKeyPath + ErrInvalidCompressionFormat + ErrInvalidFileHeaderInfo + ErrInvalidJSONType + ErrInvalidQuoteFields + ErrInvalidRequestParameter + ErrInvalidDataType + ErrInvalidTextEncoding + ErrInvalidDataSource + ErrInvalidTableAlias + ErrMissingRequiredParameter + ErrObjectSerializationConflict + ErrUnsupportedSQLOperation + ErrUnsupportedSQLStructure + ErrUnsupportedSyntax + ErrUnsupportedRangeHeader + ErrLexerInvalidChar + ErrLexerInvalidOperator + ErrLexerInvalidLiteral + ErrLexerInvalidIONLiteral + ErrParseExpectedDatePart + ErrParseExpectedKeyword + ErrParseExpectedTokenType + ErrParseExpected2TokenTypes + ErrParseExpectedNumber + ErrParseExpectedRightParenBuiltinFunctionCall + ErrParseExpectedTypeName + ErrParseExpectedWhenClause + ErrParseUnsupportedToken + ErrParseUnsupportedLiteralsGroupBy + ErrParseExpectedMember + ErrParseUnsupportedSelect + ErrParseUnsupportedCase + ErrParseUnsupportedCaseClause + ErrParseUnsupportedAlias + ErrParseUnsupportedSyntax + ErrParseUnknownOperator + ErrParseInvalidPathComponent + ErrParseMissingIdentAfterAt + ErrParseUnexpectedOperator + ErrParseUnexpectedTerm + ErrParseUnexpectedToken + ErrParseUnexpectedKeyword + ErrParseExpectedExpression + ErrParseExpectedLeftParenAfterCast + ErrParseExpectedLeftParenValueConstructor + ErrParseExpectedLeftParenBuiltinFunctionCall + ErrParseExpectedArgumentDelimiter + ErrParseCastArity + ErrParseInvalidTypeParam + ErrParseEmptySelect + ErrParseSelectMissingFrom + ErrParseExpectedIdentForGroupName + ErrParseExpectedIdentForAlias + ErrParseUnsupportedCallWithStar + ErrParseNonUnaryAgregateFunctionCall + ErrParseMalformedJoin + ErrParseExpectedIdentForAt + ErrParseAsteriskIsNotAloneInSelectList + ErrParseCannotMixSqbAndWildcardInSelectList + ErrParseInvalidContextForWildcardInSelectList + ErrIncorrectSQLFunctionArgumentType + ErrValueParseFailure + ErrEvaluatorInvalidArguments + ErrIntegerOverflow + ErrLikeInvalidInputs + ErrCastFailed + ErrInvalidCast + ErrEvaluatorInvalidTimestampFormatPattern + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing + ErrEvaluatorTimestampFormatPatternDuplicateFields + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch + ErrEvaluatorUnterminatedTimestampFormatPatternToken + ErrEvaluatorInvalidTimestampFormatPatternToken + ErrEvaluatorInvalidTimestampFormatPatternSymbol + ErrEvaluatorBindingDoesNotExist + ErrInvalidColumnIndex + ErrMissingHeaders ) // error code to APIError structure, these fields carry respective @@ -803,6 +892,7 @@ var errorCodeResponse = map[APIErrorCode]APIError{ Description: "X-Amz-Expires must be less than a week (in seconds); that is, the given X-Amz-Expires must be less than 604800 seconds", HTTPStatusCode: http.StatusBadRequest, }, + // Generic Invalid-Request error. Should be used for response errors only for unlikely // corner case errors for which introducing new APIErrorCode is not worth it. LogIf() // should be used to log the error at the source of the error for debugging purposes. @@ -851,6 +941,432 @@ var errorCodeResponse = map[APIErrorCode]APIError{ Description: "The continuation token provided is incorrect", HTTPStatusCode: http.StatusBadRequest, }, + //S3 Select API Errors + ErrEmptyRequestBody: { + Code: "EmptyRequestBody", + Description: "Request body cannot be empty.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedFunction: { + Code: "UnsupportedFunction", + Description: "Encountered an unsupported SQL function.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDataSource: { + Code: "InvalidDataSource", + Description: "Invalid data source type. Only CSV and JSON are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidExpressionType: { + Code: "InvalidExpressionType", + Description: "The ExpressionType is invalid. Only SQL expressions are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrBusy: { + Code: "Busy", + Description: "The service is unavailable. Please retry.", + HTTPStatusCode: http.StatusServiceUnavailable, + }, + ErrUnauthorizedAccess: { + Code: "UnauthorizedAccess", + Description: "You are not authorized to perform this operation", + HTTPStatusCode: http.StatusUnauthorized, + }, + ErrExpressionTooLong: { + Code: "ExpressionTooLong", + Description: "The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIllegalSQLFunctionArgument: { + Code: "IllegalSqlFunctionArgument", + Description: "Illegal argument was used in the SQL function.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidKeyPath: { + Code: "InvalidKeyPath", + Description: "Key path in the SQL expression is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCompressionFormat: { + Code: "InvalidCompressionFormat", + Description: "The file is not in a supported compression format. Only GZIP is supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidFileHeaderInfo: { + Code: "InvalidFileHeaderInfo", + Description: "The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidJSONType: { + Code: "InvalidJsonType", + Description: "The JsonType is invalid. Only DOCUMENT and LINES are supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidQuoteFields: { + Code: "InvalidQuoteFields", + Description: "The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidRequestParameter: { + Code: "InvalidRequestParameter", + Description: "The value of a parameter in SelectRequest element is invalid. Check the service API documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidDataType: { + Code: "InvalidDataType", + Description: "The SQL expression contains an invalid data type.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTextEncoding: { + Code: "InvalidTextEncoding", + Description: "Invalid encoding type. Only UTF-8 encoding is supported at this time.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidTableAlias: { + Code: "InvalidTableAlias", + Description: "The SQL expression contains an invalid table alias.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingRequiredParameter: { + Code: "MissingRequiredParameter", + Description: "The SelectRequest entity is missing a required parameter. Check the service documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrObjectSerializationConflict: { + Code: "ObjectSerializationConflict", + Description: "The SelectRequest entity can only contain one of CSV or JSON. Check the service documentation and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSQLOperation: { + Code: "UnsupportedSqlOperation", + Description: "Encountered an unsupported SQL operation.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSQLStructure: { + Code: "UnsupportedSqlStructure", + Description: "Encountered an unsupported SQL structure. Check the SQL Reference.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedSyntax: { + Code: "UnsupportedSyntax", + Description: "Encountered invalid syntax.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrUnsupportedRangeHeader: { + Code: "UnsupportedRangeHeader", + Description: "Range header is not supported for this operation.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidChar: { + Code: "LexerInvalidChar", + Description: "The SQL expression contains an invalid character.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidOperator: { + Code: "LexerInvalidOperator", + Description: "The SQL expression contains an invalid literal.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidLiteral: { + Code: "LexerInvalidLiteral", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLexerInvalidIONLiteral: { + Code: "LexerInvalidIONLiteral", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedDatePart: { + Code: "ParseExpectedDatePart", + Description: "Did not find the expected date part in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedKeyword: { + Code: "ParseExpectedKeyword", + Description: "Did not find the expected keyword in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedTokenType: { + Code: "ParseExpectedTokenType", + Description: "Did not find the expected token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpected2TokenTypes: { + Code: "ParseExpected2TokenTypes", + Description: "Did not find the expected token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedNumber: { + Code: "ParseExpectedNumber", + Description: "Did not find the expected number in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedRightParenBuiltinFunctionCall: { + Code: "ParseExpectedRightParenBuiltinFunctionCall", + Description: "Did not find the expected right parenthesis character in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedTypeName: { + Code: "ParseExpectedTypeName", + Description: "Did not find the expected type name in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedWhenClause: { + Code: "ParseExpectedWhenClause", + Description: "Did not find the expected WHEN clause in the SQL expression. CASE is not supported.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedToken: { + Code: "ParseUnsupportedToken", + Description: "The SQL expression contains an unsupported token.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedLiteralsGroupBy: { + Code: "ParseUnsupportedLiteralsGroupBy", + Description: "The SQL expression contains an unsupported use of GROUP BY.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedMember: { + Code: "ParseExpectedMember", + Description: "The SQL expression contains an unsupported use of MEMBER.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedSelect: { + Code: "ParseUnsupportedSelect", + Description: "The SQL expression contains an unsupported use of SELECT.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCase: { + Code: "ParseUnsupportedCase", + Description: "The SQL expression contains an unsupported use of CASE.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCaseClause: { + Code: "ParseUnsupportedCaseClause", + Description: "The SQL expression contains an unsupported use of CASE.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedAlias: { + Code: "ParseUnsupportedAlias", + Description: "The SQL expression contains an unsupported use of ALIAS.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedSyntax: { + Code: "ParseUnsupportedSyntax", + Description: "The SQL expression contains unsupported syntax.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnknownOperator: { + Code: "ParseUnknownOperator", + Description: "The SQL expression contains an invalid operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseInvalidPathComponent: { + Code: "ParseInvalidPathComponent", + Description: "The SQL expression contains an invalid path component.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseMissingIdentAfterAt: { + Code: "ParseMissingIdentAfterAt", + Description: "Did not find the expected identifier after the @ symbol in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedOperator: { + Code: "ParseUnexpectedOperator", + Description: "The SQL expression contains an unexpected operator.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedTerm: { + Code: "ParseUnexpectedTerm", + Description: "The SQL expression contains an unexpected term.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedToken: { + Code: "ParseUnexpectedToken", + Description: "The SQL expression contains an unexpected token.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnexpectedKeyword: { + Code: "ParseUnexpectedKeyword", + Description: "The SQL expression contains an unexpected keyword.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedExpression: { + Code: "ParseExpectedExpression", + Description: "Did not find the expected SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenAfterCast: { + Code: "ParseExpectedLeftParenAfterCast", + Description: "Did not find expected the left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenValueConstructor: { + Code: "ParseExpectedLeftParenValueConstructor", + Description: "Did not find expected the left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedLeftParenBuiltinFunctionCall: { + Code: "ParseExpectedLeftParenBuiltinFunctionCall", + Description: "Did not find the expected left parenthesis in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedArgumentDelimiter: { + Code: "ParseExpectedArgumentDelimiter", + Description: "Did not find the expected argument delimiter in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseCastArity: { + Code: "ParseCastArity", + Description: "The SQL expression CAST has incorrect arity.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseInvalidTypeParam: { + Code: "ParseInvalidTypeParam", + Description: "The SQL expression contains an invalid parameter value.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseEmptySelect: { + Code: "ParseEmptySelect", + Description: "The SQL expression contains an empty SELECT.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseSelectMissingFrom: { + Code: "ParseSelectMissingFrom", + Description: "GROUP is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForGroupName: { + Code: "ParseExpectedIdentForGroupName", + Description: "GROUP is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForAlias: { + Code: "ParseExpectedIdentForAlias", + Description: "Did not find the expected identifier for the alias in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseUnsupportedCallWithStar: { + Code: "ParseUnsupportedCallWithStar", + Description: "Only COUNT with (*) as a parameter is supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseNonUnaryAgregateFunctionCall: { + Code: "ParseNonUnaryAgregateFunctionCall", + Description: "Only one argument is supported for aggregate functions in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseMalformedJoin: { + Code: "ParseMalformedJoin", + Description: "JOIN is not supported in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseExpectedIdentForAt: { + Code: "ParseExpectedIdentForAt", + Description: "Did not find the expected identifier for AT name in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseAsteriskIsNotAloneInSelectList: { + Code: "ParseAsteriskIsNotAloneInSelectList", + Description: "Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseCannotMixSqbAndWildcardInSelectList: { + Code: "ParseCannotMixSqbAndWildcardInSelectList", + Description: "Cannot mix [] and * in the same expression in a SELECT list in SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrParseInvalidContextForWildcardInSelectList: { + Code: "ParseInvalidContextForWildcardInSelectList", + Description: "Invalid use of * in SELECT list in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIncorrectSQLFunctionArgumentType: { + Code: "IncorrectSqlFunctionArgumentType", + Description: "Incorrect type of arguments in function call in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrValueParseFailure: { + Code: "ValueParseFailure", + Description: "Time stamp parse failure in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidArguments: { + Code: "EvaluatorInvalidArguments", + Description: "Incorrect number of arguments in the function call in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrIntegerOverflow: { + Code: "IntegerOverflow", + Description: "Int overflow or underflow in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrLikeInvalidInputs: { + Code: "LikeInvalidInputs", + Description: "Invalid argument given to the LIKE clause in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrCastFailed: { + Code: "CastFailed", + Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidCast: { + Code: "InvalidCast", + Description: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPattern: { + Code: "EvaluatorInvalidTimestampFormatPattern", + Description: "Time stamp format pattern requires additional fields in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: { + Code: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", + Description: "Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorTimestampFormatPatternDuplicateFields: { + Code: "EvaluatorTimestampFormatPatternDuplicateFields", + Description: "Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: { + Code: "EvaluatorUnterminatedTimestampFormatPatternToken", + Description: "Time stamp format pattern contains unterminated token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorUnterminatedTimestampFormatPatternToken: { + Code: "EvaluatorInvalidTimestampFormatPatternToken", + Description: "Time stamp format pattern contains an invalid token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternToken: { + Code: "EvaluatorInvalidTimestampFormatPatternToken", + Description: "Time stamp format pattern contains an invalid token in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorInvalidTimestampFormatPatternSymbol: { + Code: "EvaluatorInvalidTimestampFormatPatternSymbol", + Description: "Time stamp format pattern contains an invalid symbol in the SQL expression.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrInvalidColumnIndex: { + Code: "InvalidColumnIndex", + Description: "Column index in the SQL expression is invalid.", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrEvaluatorBindingDoesNotExist: { + Code: "ErrEvaluatorBindingDoesNotExist", + Description: "A column name or a path provided does not exist in the SQL expression", + HTTPStatusCode: http.StatusBadRequest, + }, + ErrMissingHeaders: { + Code: "MissingHeaders", + Description: "Some headers in the query are missing from the file. Check the file and try again.", + HTTPStatusCode: http.StatusBadRequest, + }, // Add your error structure here. } @@ -900,6 +1416,169 @@ func toAPIErrorCode(err error) (apiErr APIErrorCode) { case context.Canceled, context.DeadlineExceeded: apiErr = ErrOperationTimedOut } + switch err { + case s3select.ErrBusy: + apiErr = ErrBusy + case s3select.ErrUnauthorizedAccess: + apiErr = ErrUnauthorizedAccess + case s3select.ErrExpressionTooLong: + apiErr = ErrExpressionTooLong + case s3select.ErrIllegalSQLFunctionArgument: + apiErr = ErrIllegalSQLFunctionArgument + case s3select.ErrInvalidKeyPath: + apiErr = ErrInvalidKeyPath + case s3select.ErrInvalidCompressionFormat: + apiErr = ErrInvalidCompressionFormat + case s3select.ErrInvalidFileHeaderInfo: + apiErr = ErrInvalidFileHeaderInfo + case s3select.ErrInvalidJSONType: + apiErr = ErrInvalidJSONType + case s3select.ErrInvalidQuoteFields: + apiErr = ErrInvalidQuoteFields + case s3select.ErrInvalidRequestParameter: + apiErr = ErrInvalidRequestParameter + case s3select.ErrInvalidDataType: + apiErr = ErrInvalidDataType + case s3select.ErrInvalidTextEncoding: + apiErr = ErrInvalidTextEncoding + case s3select.ErrInvalidTableAlias: + apiErr = ErrInvalidTableAlias + case s3select.ErrMissingRequiredParameter: + apiErr = ErrMissingRequiredParameter + case s3select.ErrObjectSerializationConflict: + apiErr = ErrObjectSerializationConflict + case s3select.ErrUnsupportedSQLOperation: + apiErr = ErrUnsupportedSQLOperation + case s3select.ErrUnsupportedSQLStructure: + apiErr = ErrUnsupportedSQLStructure + case s3select.ErrUnsupportedSyntax: + apiErr = ErrUnsupportedSyntax + case s3select.ErrUnsupportedRangeHeader: + apiErr = ErrUnsupportedRangeHeader + case s3select.ErrLexerInvalidChar: + apiErr = ErrLexerInvalidChar + case s3select.ErrLexerInvalidOperator: + apiErr = ErrLexerInvalidOperator + case s3select.ErrLexerInvalidLiteral: + apiErr = ErrLexerInvalidLiteral + case s3select.ErrLexerInvalidIONLiteral: + apiErr = ErrLexerInvalidIONLiteral + case s3select.ErrParseExpectedDatePart: + apiErr = ErrParseExpectedDatePart + case s3select.ErrParseExpectedKeyword: + apiErr = ErrParseExpectedKeyword + case s3select.ErrParseExpectedTokenType: + apiErr = ErrParseExpectedTokenType + case s3select.ErrParseExpected2TokenTypes: + apiErr = ErrParseExpected2TokenTypes + case s3select.ErrParseExpectedNumber: + apiErr = ErrParseExpectedNumber + case s3select.ErrParseExpectedRightParenBuiltinFunctionCall: + apiErr = ErrParseExpectedRightParenBuiltinFunctionCall + case s3select.ErrParseExpectedTypeName: + apiErr = ErrParseExpectedTypeName + case s3select.ErrParseExpectedWhenClause: + apiErr = ErrParseExpectedWhenClause + case s3select.ErrParseUnsupportedToken: + apiErr = ErrParseUnsupportedToken + case s3select.ErrParseUnsupportedLiteralsGroupBy: + apiErr = ErrParseUnsupportedLiteralsGroupBy + case s3select.ErrParseExpectedMember: + apiErr = ErrParseExpectedMember + case s3select.ErrParseUnsupportedSelect: + apiErr = ErrParseUnsupportedSelect + case s3select.ErrParseUnsupportedCase: + apiErr = ErrParseUnsupportedCase + case s3select.ErrParseUnsupportedCaseClause: + apiErr = ErrParseUnsupportedCaseClause + case s3select.ErrParseUnsupportedAlias: + apiErr = ErrParseUnsupportedAlias + case s3select.ErrParseUnsupportedSyntax: + apiErr = ErrParseUnsupportedSyntax + case s3select.ErrParseUnknownOperator: + apiErr = ErrParseUnknownOperator + case s3select.ErrParseInvalidPathComponent: + apiErr = ErrParseInvalidPathComponent + case s3select.ErrParseMissingIdentAfterAt: + apiErr = ErrParseMissingIdentAfterAt + case s3select.ErrParseUnexpectedOperator: + apiErr = ErrParseUnexpectedOperator + case s3select.ErrParseUnexpectedTerm: + apiErr = ErrParseUnexpectedTerm + case s3select.ErrParseUnexpectedToken: + apiErr = ErrParseUnexpectedToken + case s3select.ErrParseUnexpectedKeyword: + apiErr = ErrParseUnexpectedKeyword + case s3select.ErrParseExpectedExpression: + apiErr = ErrParseExpectedExpression + case s3select.ErrParseExpectedLeftParenAfterCast: + apiErr = ErrParseExpectedLeftParenAfterCast + case s3select.ErrParseExpectedLeftParenValueConstructor: + apiErr = ErrParseExpectedLeftParenValueConstructor + case s3select.ErrParseExpectedLeftParenBuiltinFunctionCall: + apiErr = ErrParseExpectedLeftParenBuiltinFunctionCall + case s3select.ErrParseExpectedArgumentDelimiter: + apiErr = ErrParseExpectedArgumentDelimiter + case s3select.ErrParseCastArity: + apiErr = ErrParseCastArity + case s3select.ErrParseInvalidTypeParam: + apiErr = ErrParseInvalidTypeParam + case s3select.ErrParseEmptySelect: + apiErr = ErrParseEmptySelect + case s3select.ErrParseSelectMissingFrom: + apiErr = ErrParseSelectMissingFrom + case s3select.ErrParseExpectedIdentForGroupName: + apiErr = ErrParseExpectedIdentForGroupName + case s3select.ErrParseExpectedIdentForAlias: + apiErr = ErrParseExpectedIdentForAlias + case s3select.ErrParseUnsupportedCallWithStar: + apiErr = ErrParseUnsupportedCallWithStar + case s3select.ErrParseNonUnaryAgregateFunctionCall: + apiErr = ErrParseNonUnaryAgregateFunctionCall + case s3select.ErrParseMalformedJoin: + apiErr = ErrParseMalformedJoin + case s3select.ErrParseExpectedIdentForAt: + apiErr = ErrParseExpectedIdentForAt + case s3select.ErrParseAsteriskIsNotAloneInSelectList: + apiErr = ErrParseAsteriskIsNotAloneInSelectList + case s3select.ErrParseCannotMixSqbAndWildcardInSelectList: + apiErr = ErrParseCannotMixSqbAndWildcardInSelectList + case s3select.ErrParseInvalidContextForWildcardInSelectList: + apiErr = ErrParseInvalidContextForWildcardInSelectList + case s3select.ErrIncorrectSQLFunctionArgumentType: + apiErr = ErrIncorrectSQLFunctionArgumentType + case s3select.ErrValueParseFailure: + apiErr = ErrValueParseFailure + case s3select.ErrIntegerOverflow: + apiErr = ErrIntegerOverflow + case s3select.ErrLikeInvalidInputs: + apiErr = ErrLikeInvalidInputs + case s3select.ErrCastFailed: + apiErr = ErrCastFailed + case s3select.ErrInvalidCast: + apiErr = ErrInvalidCast + case s3select.ErrEvaluatorInvalidTimestampFormatPattern: + apiErr = ErrEvaluatorInvalidTimestampFormatPattern + case s3select.ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: + apiErr = ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing + case s3select.ErrEvaluatorTimestampFormatPatternDuplicateFields: + apiErr = ErrEvaluatorTimestampFormatPatternDuplicateFields + case s3select.ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: + apiErr = ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch + case s3select.ErrEvaluatorUnterminatedTimestampFormatPatternToken: + apiErr = ErrEvaluatorUnterminatedTimestampFormatPatternToken + case s3select.ErrEvaluatorInvalidTimestampFormatPatternToken: + apiErr = ErrEvaluatorInvalidTimestampFormatPatternToken + case s3select.ErrEvaluatorInvalidTimestampFormatPatternSymbol: + apiErr = ErrEvaluatorInvalidTimestampFormatPatternSymbol + case s3select.ErrInvalidColumnIndex: + apiErr = ErrInvalidColumnIndex + case s3select.ErrEvaluatorBindingDoesNotExist: + apiErr = ErrEvaluatorBindingDoesNotExist + case s3select.ErrMissingHeaders: + apiErr = ErrMissingHeaders + + } if apiErr != ErrNone { // If there was a match in the above switch case. diff --git a/cmd/api-router.go b/cmd/api-router.go index 32e3bd1b2..c9f0696bb 100644 --- a/cmd/api-router.go +++ b/cmd/api-router.go @@ -62,6 +62,8 @@ func registerAPIRouter(router *mux.Router) { bucket.Methods("DELETE").Path("/{object:.+}").HandlerFunc(httpTraceAll(api.AbortMultipartUploadHandler)).Queries("uploadId", "{uploadId:.*}") // GetObjectACL - this is a dummy call. bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectACLHandler)).Queries("acl", "") + // SelectObjectContent + bucket.Methods("POST").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.SelectObjectContentHandler)).Queries("select", "").Queries("select-type", "2") // GetObject bucket.Methods("GET").Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectHandler)) // CopyObject diff --git a/cmd/generic-handlers.go b/cmd/generic-handlers.go index c058d401e..ef02892f0 100644 --- a/cmd/generic-handlers.go +++ b/cmd/generic-handlers.go @@ -475,7 +475,6 @@ var notimplementedObjectResourceNames = map[string]bool{ "policy": true, "tagging": true, "restore": true, - "select": true, } // Resource handler ServeHTTP() wrapper diff --git a/cmd/object-handlers.go b/cmd/object-handlers.go index 87d60ccfe..cfa9fc96b 100644 --- a/cmd/object-handlers.go +++ b/cmd/object-handlers.go @@ -30,6 +30,7 @@ import ( "net/url" "sort" "strconv" + "strings" "github.com/gorilla/mux" miniogo "github.com/minio/minio-go" @@ -40,6 +41,7 @@ import ( "github.com/minio/minio/pkg/hash" "github.com/minio/minio/pkg/ioutil" "github.com/minio/minio/pkg/policy" + "github.com/minio/minio/pkg/s3select" sha256 "github.com/minio/sha256-simd" "github.com/minio/sio" ) @@ -63,6 +65,191 @@ func setHeadGetRespHeaders(w http.ResponseWriter, reqParams url.Values) { } } +// SelectObjectContentHandler - GET Object?select +// ---------- +// This implementation of the GET operation retrieves object content based +// on an SQL expression. In the request, along with the sql expression, you must +// also specify a data serialization format (JSON, CSV) of the object. +func (api objectAPIHandlers) SelectObjectContentHandler(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "SelectObject") + var object, bucket string + vars := mux.Vars(r) + bucket = vars["bucket"] + object = vars["object"] + + // Fetch object stat info. + objectAPI := api.ObjectAPI() + if objectAPI == nil { + writeErrorResponse(w, ErrServerNotInitialized, r.URL) + return + } + + getObjectInfo := objectAPI.GetObjectInfo + if api.CacheAPI() != nil { + getObjectInfo = api.CacheAPI().GetObjectInfo + } + + if s3Error := checkRequestAuthType(ctx, r, policy.GetObjectAction, bucket, object); s3Error != ErrNone { + if getRequestAuthType(r) == authTypeAnonymous { + // As per "Permission" section in + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectGET.html If + // the object you request does not exist, the error Amazon S3 returns + // depends on whether you also have the s3:ListBucket permission. * If you + // have the s3:ListBucket permission on the bucket, Amazon S3 will return + // an HTTP status code 404 ("no such key") error. * if you don’t have the + // s3:ListBucket permission, Amazon S3 will return an HTTP status code 403 + // ("access denied") error.` + if globalPolicySys.IsAllowed(policy.Args{ + Action: policy.ListBucketAction, + BucketName: bucket, + ConditionValues: getConditionValues(r, ""), + IsOwner: false, + }) { + _, err := getObjectInfo(ctx, bucket, object) + if toAPIErrorCode(err) == ErrNoSuchKey { + s3Error = ErrNoSuchKey + } + } + } + writeErrorResponse(w, s3Error, r.URL) + return + } + if r.ContentLength <= 0 { + writeErrorResponse(w, ErrEmptyRequestBody, r.URL) + return + } + var selectReq ObjectSelectRequest + if err := xmlDecoder(r.Body, &selectReq, r.ContentLength); err != nil { + fmt.Println(err) + writeErrorResponse(w, ErrMalformedXML, r.URL) + return + } + + objInfo, err := getObjectInfo(ctx, bucket, object) + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + // Get request range. + rangeHeader := r.Header.Get("Range") + if rangeHeader != "" { + writeErrorResponse(w, ErrUnsupportedRangeHeader, r.URL) + return + } + + if selectReq.InputSerialization.CompressionType == SelectCompressionGZIP { + if !strings.Contains(objInfo.ContentType, "gzip") { + writeErrorResponse(w, ErrInvalidDataSource, r.URL) + return + } + } + if selectReq.InputSerialization.CompressionType == SelectCompressionBZIP { + if !strings.Contains(objInfo.ContentType, "bzip") { + writeErrorResponse(w, ErrInvalidDataSource, r.URL) + return + } + } + if selectReq.InputSerialization.CompressionType == SelectCompressionNONE || + selectReq.InputSerialization.CompressionType == "" { + selectReq.InputSerialization.CompressionType = SelectCompressionNONE + if !strings.Contains(objInfo.ContentType, "text/csv") { + writeErrorResponse(w, ErrInvalidDataSource, r.URL) + return + } + } + if !strings.EqualFold(string(selectReq.ExpressionType), "SQL") { + writeErrorResponse(w, ErrInvalidExpressionType, r.URL) + return + } + if len(selectReq.Expression) >= (256 * 1000) { + writeErrorResponse(w, ErrExpressionTooLong, r.URL) + } + if selectReq.InputSerialization.CSV.FileHeaderInfo != CSVFileHeaderInfoUse && + selectReq.InputSerialization.CSV.FileHeaderInfo != CSVFileHeaderInfoNone && + selectReq.InputSerialization.CSV.FileHeaderInfo != CSVFileHeaderInfoIgnore && + selectReq.InputSerialization.CSV.FileHeaderInfo != "" { + writeErrorResponse(w, ErrInvalidFileHeaderInfo, r.URL) + } + if selectReq.OutputSerialization.CSV.QuoteFields != CSVQuoteFieldsAlways && + selectReq.OutputSerialization.CSV.QuoteFields != CSVQuoteFieldsAsNeeded && + selectReq.OutputSerialization.CSV.QuoteFields != "" { + writeErrorResponse(w, ErrInvalidQuoteFields, r.URL) + } + + getObject := objectAPI.GetObject + if api.CacheAPI() != nil && !hasSSECustomerHeader(r.Header) { + getObject = api.CacheAPI().GetObject + } + + reader, pipewriter := io.Pipe() + + // Get the object. + var startOffset int64 + length := objInfo.Size + + var writer io.Writer + writer = pipewriter + if objectAPI.IsEncryptionSupported() { + if hasSSECustomerHeader(r.Header) { + // Response writer should be limited early on for decryption upto required length, + // additionally also skipping mod(offset)64KiB boundaries. + writer = ioutil.LimitedWriter(writer, startOffset%(64*1024), length) + + writer, startOffset, length, err = DecryptBlocksRequest(writer, r, bucket, + object, startOffset, length, objInfo, false) + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + } + } + go func() { + defer reader.Close() + if gerr := getObject(ctx, bucket, object, 0, objInfo.Size, writer, + objInfo.ETag); gerr != nil { + pipewriter.CloseWithError(gerr) + return + } + pipewriter.Close() // Close writer explicitly signaling we wrote all data. + }() + + //s3select //Options + if selectReq.OutputSerialization.CSV.FieldDelimiter == "" { + selectReq.OutputSerialization.CSV.FieldDelimiter = "," + } + if selectReq.InputSerialization.CSV.FileHeaderInfo == "" { + selectReq.InputSerialization.CSV.FileHeaderInfo = CSVFileHeaderInfoNone + } + if selectReq.InputSerialization.CSV != nil { + options := &s3select.Options{ + HasHeader: selectReq.InputSerialization.CSV.FileHeaderInfo != CSVFileHeaderInfoNone, + FieldDelimiter: selectReq.InputSerialization.CSV.FieldDelimiter, + Comments: selectReq.InputSerialization.CSV.Comments, + Name: "S3Object", // Default table name for all objects + ReadFrom: reader, + Compressed: string(selectReq.InputSerialization.CompressionType), + Expression: selectReq.Expression, + OutputFieldDelimiter: selectReq.OutputSerialization.CSV.FieldDelimiter, + StreamSize: objInfo.Size, + HeaderOpt: selectReq.InputSerialization.CSV.FileHeaderInfo == CSVFileHeaderInfoUse, + } + s3s, err := s3select.NewInput(options) + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + _, _, _, _, _, _, err = s3s.ParseSelect(selectReq.Expression) + if err != nil { + writeErrorResponse(w, toAPIErrorCode(err), r.URL) + return + } + if err := s3s.Execute(w); err != nil { + logger.LogIf(ctx, err) + } + return + } +} + // GetObjectHandler - GET Object // ---------- // This implementation of the GET operation retrieves object. To use GET, diff --git a/docs/select/README.md b/docs/select/README.md new file mode 100644 index 000000000..55389e2ba --- /dev/null +++ b/docs/select/README.md @@ -0,0 +1,98 @@ +# How to use Minio S3 Select [![Slack](https://slack.minio.io/slack?type=svg)](https://slack.minio.io) + +This document explains current limitations of the Minio S3 Select support. + +## 1. Features to be implemented +1). JSON documents as supported Objects + +2). CAST expression + +3). Date Functions + +4). Returning types other than float from aggregation queries. + +5). Bracket and Reversal Notation with SQL Like operator. + +6). SUBSTRING currently is not supported and TRIM only works with default arguments of trim leading and trailing spaces + +## 2. Sample Usage with AWS Boto Client +```python +import boto3 +from botocore.client import Config +import os +s3 = boto3.resource('s3', + endpoint_url='ENDPOINT', + aws_access_key_id='ACCESSKEY', + aws_secret_access_key='SECRETKEY', + config=Config(signature_version='s3v4'), + region_name='us-east-1') +s3_client = s3.meta.client + +r = s3_client.select_object_content( + Bucket='myBucket', + Key='myKey', + ExpressionType='SQL', + Expression = "SELECT * FROM S3OBJECT AS A", + InputSerialization = {'CSV': {"FieldDelimiter": ",","FileHeaderInfo":"USE"}}, + OutputSerialization = {'CSV': {}}, + ) +``` +## 3. Sample Usage with Minio-Go Client + +```go + // Initialize minio client object. + minioClient, err := minio.New(endpoint, accessKeyID, secretAccessKey, useSSL) + if err != nil { + log.Fatalln(err) + } + input := minio.SelectObjectInput{ + RecordDelimiter: "\n", + FieldDelimiter: ",", + FileHeaderInfo: minio.CSVFileHeaderInfoUse, + } + output := minio.SelectObjectOutput{ + RecordDelimiter: "\n", + FieldDelimiter: ",", + } + opts := minio.SelectObjectOptions{ + Type: minio.SelectObjectTypeCSV, + Input: input, + Output: output, + } + myReader, err := minioClient.SelectObjectContent(ctx, "sqlselectapi", "player.csv", "Select * from S3OBJECT WHERE last_name = 'James'", opts) + if err != nil { + fmt.Println(err) + return + } + defer myReader.Close() + + results, resultWriter := io.Pipe() + go func() { + defer resultWriter.Close() + for event := range myReader.Events() { + switch e := event.(type) { + case *minio.RecordEvent: + resultWriter.Write(e.Payload) + case *minio.ProgressEvent: + fmt.Println("Progress") + case *minio.StatEvent: + fmt.Println(string(e.Payload)) + case *minio.EndEvent: + fmt.Println("Ended") + return + } + } + }() + resReader := csv.NewReader(results) + for { + record, err := resReader.Read() + if err == io.EOF { + break + } + // Print out the records + fmt.Println(record) + } + if err := myReader.Err(); err != nil { + fmt.Println(err) + } +``` diff --git a/pkg/s3select/errors.go b/pkg/s3select/errors.go new file mode 100644 index 000000000..bb66d6110 --- /dev/null +++ b/pkg/s3select/errors.go @@ -0,0 +1,486 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3select + +import "errors" + +//S3 errors below + +// ErrBusy is an error if the service is too busy. +var ErrBusy = errors.New("The service is unavailable. Please retry") + +// ErrUnauthorizedAccess is an error if you lack the appropriate credentials to +// access the object. +var ErrUnauthorizedAccess = errors.New("You are not authorized to perform this operation") + +// ErrExpressionTooLong is an error if your SQL expression too long for +// processing. +var ErrExpressionTooLong = errors.New("The SQL expression is too long: The maximum byte-length for the SQL expression is 256 KB") + +// ErrIllegalSQLFunctionArgument is an error if you provide an illegal argument +// in the SQL function. +var ErrIllegalSQLFunctionArgument = errors.New("Illegal argument was used in the SQL function") + +// ErrInvalidColumnIndex is an error if you provide a column index which is not +// valid. +var ErrInvalidColumnIndex = errors.New("Column index in the SQL expression is invalid") + +// ErrInvalidKeyPath is an error if you provide a key in the SQL expression that +// is invalid. +var ErrInvalidKeyPath = errors.New("Key path in the SQL expression is invalid") + +// ErrColumnTooLong is an error if your query results in a column that is +// greater than the max amount of characters per column of 1mb +var ErrColumnTooLong = errors.New("The length of a column in the result is greater than maxCharsPerColumn of 1 MB") + +// ErrOverMaxColumn is an error if the number of columns from the resulting +// query is greater than 1Mb. +var ErrOverMaxColumn = errors.New("The number of columns in the result is greater than maxColumnNumber of 1 MB") + +// ErrOverMaxRecordSize is an error if the length of a record in the result is +// greater than 1 Mb. +var ErrOverMaxRecordSize = errors.New("The length of a record in the result is greater than maxCharsPerRecord of 1 MB") + +// ErrMissingHeaders is an error if some of the headers that are requested in +// the Select Query are not present in the file. +var ErrMissingHeaders = errors.New("Some headers in the query are missing from the file. Check the file and try again") + +// ErrInvalidCompressionFormat is an error if an unsupported compression type is +// utilized with the select object query. +var ErrInvalidCompressionFormat = errors.New("The file is not in a supported compression format. Only GZIP is supported at this time") + +// ErrTruncatedInput is an error if the object is not compressed properly and an +// error occurs during decompression. +var ErrTruncatedInput = errors.New("Object decompression failed. Check that the object is properly compressed using the format specified in the request") + +// ErrInvalidFileHeaderInfo is an error if the argument provided to the +// FileHeader Argument is incorrect. +var ErrInvalidFileHeaderInfo = errors.New("The FileHeaderInfo is invalid. Only NONE, USE, and IGNORE are supported") + +// ErrInvalidJSONType is an error if the json format provided as an argument is +// invalid. +var ErrInvalidJSONType = errors.New("The JsonType is invalid. Only DOCUMENT and LINES are supported at this time") + +// ErrInvalidQuoteFields is an error if the arguments provided to the +// QuoteFields options are not valid. +var ErrInvalidQuoteFields = errors.New("The QuoteFields is invalid. Only ALWAYS and ASNEEDED are supported") + +// ErrInvalidRequestParameter is an error if the value of a parameter in the +// request element is not valid. +var ErrInvalidRequestParameter = errors.New("The value of a parameter in Request element is invalid. Check the service API documentation and try again") + +// ErrCSVParsingError is an error if the CSV presents an error while being +// parsed. +var ErrCSVParsingError = errors.New("Encountered an Error parsing the CSV file. Check the file and try again") + +// ErrJSONParsingError is an error if while parsing the JSON an error arises. +var ErrJSONParsingError = errors.New("Encountered an error parsing the JSON file. Check the file and try again") + +// ErrExternalEvalException is an error that arises if the query can not be +// evaluated. +var ErrExternalEvalException = errors.New("The query cannot be evaluated. Check the file and try again") + +// ErrInvalidDataType is an error that occurs if the SQL expression contains an +// invalid data type. +var ErrInvalidDataType = errors.New("The SQL expression contains an invalid data type") + +// ErrUnrecognizedFormatException is an error that arises if there is an invalid +// record type. +var ErrUnrecognizedFormatException = errors.New("Encountered an invalid record type") + +// ErrInvalidTextEncoding is an error if the text encoding is not valid. +var ErrInvalidTextEncoding = errors.New("Invalid encoding type. Only UTF-8 encoding is supported at this time") + +// ErrInvalidTableAlias is an error that arises if the table alias provided in +// the SQL expression is invalid. +var ErrInvalidTableAlias = errors.New("The SQL expression contains an invalid table alias") + +// ErrMultipleDataSourcesUnsupported is an error that arises if multiple data +// sources are provided. +var ErrMultipleDataSourcesUnsupported = errors.New("Multiple data sources are not supported") + +// ErrMissingRequiredParameter is an error that arises if a required argument +// is omitted from the Request. +var ErrMissingRequiredParameter = errors.New("The Request entity is missing a required parameter. Check the service documentation and try again") + +// ErrObjectSerializationConflict is an error that arises if an unsupported +// output seralization is provided. +var ErrObjectSerializationConflict = errors.New("The Request entity can only contain one of CSV or JSON. Check the service documentation and try again") + +// ErrUnsupportedSQLOperation is an error that arises if an unsupported SQL +// operation is used. +var ErrUnsupportedSQLOperation = errors.New("Encountered an unsupported SQL operation") + +// ErrUnsupportedSQLStructure is an error that occurs if an unsupported SQL +// structure is used. +var ErrUnsupportedSQLStructure = errors.New("Encountered an unsupported SQL structure. Check the SQL Reference") + +// ErrUnsupportedStorageClass is an error that occurs if an invalid storace +// class is present. +var ErrUnsupportedStorageClass = errors.New("Encountered an invalid storage class. Only STANDARD, STANDARD_IA, and ONEZONE_IA storage classes are supported at this time") + +// ErrUnsupportedSyntax is an error that occurs if invalid syntax is present in +// the query. +var ErrUnsupportedSyntax = errors.New("Encountered invalid syntax") + +// ErrUnsupportedRangeHeader is an error that occurs if a range header is +// provided. +var ErrUnsupportedRangeHeader = errors.New("Range header is not supported for this operation") + +// ErrLexerInvalidChar is an error that occurs if the SQL expression contains an +// invalid character. +var ErrLexerInvalidChar = errors.New("The SQL expression contains an invalid character") + +// ErrLexerInvalidOperator is an error that occurs if an invalid operator is +// used. +var ErrLexerInvalidOperator = errors.New("The SQL expression contains an invalid operator") + +// ErrLexerInvalidLiteral is an error that occurs if an invalid literal is used. +var ErrLexerInvalidLiteral = errors.New("The SQL expression contains an invalid literal") + +// ErrLexerInvalidIONLiteral is an error that occurs if an invalid operator is +// used +var ErrLexerInvalidIONLiteral = errors.New("The SQL expression contains an invalid operator") + +// ErrParseExpectedDatePart is an error that occurs if the date part is not +// found in the SQL expression. +var ErrParseExpectedDatePart = errors.New("Did not find the expected date part in the SQL expression") + +// ErrParseExpectedKeyword is an error that occurs if the expected keyword was +// not found in the expression. +var ErrParseExpectedKeyword = errors.New("Did not find the expected keyword in the SQL expression") + +// ErrParseExpectedTokenType is an error that occurs if the expected token is +// not found in the SQL expression. +var ErrParseExpectedTokenType = errors.New("Did not find the expected token in the SQL expression") + +// ErrParseExpected2TokenTypes is an error that occurs if 2 token types are not +// found. +var ErrParseExpected2TokenTypes = errors.New("Did not find the expected token in the SQL expression") + +// ErrParseExpectedNumber is an error that occurs if a number is expected but +// not found in the expression. +var ErrParseExpectedNumber = errors.New("Did not find the expected number in the SQL expression") + +// ErrParseExpectedRightParenBuiltinFunctionCall is an error that occurs if a +// right parenthesis is missing. +var ErrParseExpectedRightParenBuiltinFunctionCall = errors.New("Did not find the expected right parenthesis character in the SQL expression") + +// ErrParseExpectedTypeName is an error that occurs if a type name is expected +// but not found. +var ErrParseExpectedTypeName = errors.New("Did not find the expected type name in the SQL expression") + +// ErrParseExpectedWhenClause is an error that occurs if a When clause is +// expected but not found. +var ErrParseExpectedWhenClause = errors.New("Did not find the expected WHEN clause in the SQL expression. CASE is not supported") + +// ErrParseUnsupportedToken is an error that occurs if the SQL expression +// contains an unsupported token. +var ErrParseUnsupportedToken = errors.New("The SQL expression contains an unsupported token") + +// ErrParseUnsupportedLiteralsGroupBy is an error that occurs if the SQL +// expression has an unsupported use of Group By. +var ErrParseUnsupportedLiteralsGroupBy = errors.New("The SQL expression contains an unsupported use of GROUP BY") + +// ErrParseExpectedMember is an error that occurs if there is an unsupported use +// of member in the SQL expression. +var ErrParseExpectedMember = errors.New("The SQL expression contains an unsupported use of MEMBER") + +// ErrParseUnsupportedSelect is an error that occurs if there is an unsupported +// use of Select. +var ErrParseUnsupportedSelect = errors.New("The SQL expression contains an unsupported use of SELECT") + +// ErrParseUnsupportedCase is an error that occurs if there is an unsupported +// use of case. +var ErrParseUnsupportedCase = errors.New("The SQL expression contains an unsupported use of CASE") + +// ErrParseUnsupportedCaseClause is an error that occurs if there is an +// unsupported use of case. +var ErrParseUnsupportedCaseClause = errors.New("The SQL expression contains an unsupported use of CASE") + +// ErrParseUnsupportedAlias is an error that occurs if there is an unsupported +// use of Alias. +var ErrParseUnsupportedAlias = errors.New("The SQL expression contains an unsupported use of ALIAS") + +// ErrParseUnsupportedSyntax is an error that occurs if there is an +// UnsupportedSyntax in the SQL expression. +var ErrParseUnsupportedSyntax = errors.New("The SQL expression contains unsupported syntax") + +// ErrParseUnknownOperator is an error that occurs if there is an invalid +// operator present in the SQL expression. +var ErrParseUnknownOperator = errors.New("The SQL expression contains an invalid operator") + +// ErrParseInvalidPathComponent is an error that occurs if there is an invalid +// path component. +var ErrParseInvalidPathComponent = errors.New("The SQL expression contains an invalid path component") + +// ErrParseMissingIdentAfterAt is an error that occurs if the wrong symbol +// follows the "@" symbol in the SQL expression. +var ErrParseMissingIdentAfterAt = errors.New("Did not find the expected identifier after the @ symbol in the SQL expression") + +// ErrParseUnexpectedOperator is an error that occurs if the SQL expression +// contains an unexpected operator. +var ErrParseUnexpectedOperator = errors.New("The SQL expression contains an unexpected operator") + +// ErrParseUnexpectedTerm is an error that occurs if the SQL expression contains +// an unexpected term. +var ErrParseUnexpectedTerm = errors.New("The SQL expression contains an unexpected term") + +// ErrParseUnexpectedToken is an error that occurs if the SQL expression +// contains an unexpected token. +var ErrParseUnexpectedToken = errors.New("The SQL expression contains an unexpected token") + +// ErrParseUnexpectedKeyword is an error that occurs if the SQL expression +// contains an unexpected keyword. +var ErrParseUnexpectedKeyword = errors.New("The SQL expression contains an unexpected keyword") + +// ErrParseExpectedExpression is an error that occurs if the SQL expression is +// not found. +var ErrParseExpectedExpression = errors.New("Did not find the expected SQL expression") + +// ErrParseExpectedLeftParenAfterCast is an error that occurs if the left +// parenthesis is missing after a cast in the SQL expression. +var ErrParseExpectedLeftParenAfterCast = errors.New("Did not find the expected left parenthesis after CAST in the SQL expression") + +// ErrParseExpectedLeftParenValueConstructor is an error that occurs if the left +// parenthesis is not found in the SQL expression. +var ErrParseExpectedLeftParenValueConstructor = errors.New("Did not find expected the left parenthesis in the SQL expression") + +// ErrParseExpectedLeftParenBuiltinFunctionCall is an error that occurs if the +// left parenthesis is not found in the SQL expression function call. +var ErrParseExpectedLeftParenBuiltinFunctionCall = errors.New("Did not find the expected left parenthesis in the SQL expression") + +// ErrParseExpectedArgumentDelimiter is an error that occurs if the argument +// delimiter for the SQL expression is not provided. +var ErrParseExpectedArgumentDelimiter = errors.New("Did not find the expected argument delimiter in the SQL expression") + +// ErrParseCastArity is an error that occurs because the CAST has incorrect +// arity. +var ErrParseCastArity = errors.New("The SQL expression CAST has incorrect arity") + +// ErrParseInvalidTypeParam is an error that occurs because there is an invalid +// parameter value. +var ErrParseInvalidTypeParam = errors.New("The SQL expression contains an invalid parameter value") + +// ErrParseEmptySelect is an error that occurs because the SQL expression +// contains an empty Select +var ErrParseEmptySelect = errors.New("The SQL expression contains an empty SELECT") + +// ErrParseSelectMissingFrom is an error that occurs because there is a missing +// From after the Select List. +var ErrParseSelectMissingFrom = errors.New("The SQL expression contains a missing FROM after SELECT list") + +// ErrParseExpectedIdentForGroupName is an error that occurs because Group is +// not supported in the SQL expression. +var ErrParseExpectedIdentForGroupName = errors.New("GROUP is not supported in the SQL expression") + +// ErrParseExpectedIdentForAlias is an error that occurs if expected identifier +// for alias is not in the SQL expression. +var ErrParseExpectedIdentForAlias = errors.New("Did not find the expected identifier for the alias in the SQL expression") + +// ErrParseUnsupportedCallWithStar is an error that occurs if COUNT is used with +// an argument other than "*". +var ErrParseUnsupportedCallWithStar = errors.New("Only COUNT with (*) as a parameter is supported in the SQL expression") + +// ErrParseNonUnaryAgregateFunctionCall is an error that occurs if more than one +// argument is provided as an argument for aggregation functions. +var ErrParseNonUnaryAgregateFunctionCall = errors.New("Only one argument is supported for aggregate functions in the SQL expression") + +// ErrParseMalformedJoin is an error that occurs if a "join" operation is +// attempted in the SQL expression as this is not supported. +var ErrParseMalformedJoin = errors.New("JOIN is not supported in the SQL expression") + +// ErrParseExpectedIdentForAt is an error that occurs if after "AT" an Alias +// identifier is not provided. +var ErrParseExpectedIdentForAt = errors.New("Did not find the expected identifier for AT name in the SQL expression") + +// ErrParseAsteriskIsNotAloneInSelectList is an error that occurs if in addition +// to an asterix, more column names are provided as arguments in the SQL +// expression. +var ErrParseAsteriskIsNotAloneInSelectList = errors.New("Other expressions are not allowed in the SELECT list when '*' is used without dot notation in the SQL expression") + +// ErrParseCannotMixSqbAndWildcardInSelectList is an error that occurs if list +// indexing and an asterix are mixed in the SQL expression. +var ErrParseCannotMixSqbAndWildcardInSelectList = errors.New("Cannot mix [] and * in the same expression in a SELECT list in SQL expression") + +// ErrParseInvalidContextForWildcardInSelectList is an error that occurs if the +// asterix is used improperly within the SQL expression. +var ErrParseInvalidContextForWildcardInSelectList = errors.New("Invalid use of * in SELECT list in the SQL expression") + +// ErrEvaluatorBindingDoesNotExist is an error that occurs if a column name or +// path provided in the expression does not exist. +var ErrEvaluatorBindingDoesNotExist = errors.New("A column name or a path provided does not exist in the SQL expression") + +// ErrIncorrectSQLFunctionArgumentType is an error that occurs if the wrong +// argument is provided to a SQL function. +var ErrIncorrectSQLFunctionArgumentType = errors.New("Incorrect type of arguments in function call in the SQL expression") + +// ErrAmbiguousFieldName is an error that occurs if the column name which is not +// case sensitive, is not descriptive enough to retrieve a singular column. +var ErrAmbiguousFieldName = errors.New("Field name matches to multiple fields in the file. Check the SQL expression and the file, and try again") + +// ErrEvaluatorInvalidArguments is an error that occurs if there are not the +// correct number of arguments in a functional call to a SQL expression. +var ErrEvaluatorInvalidArguments = errors.New("Incorrect number of arguments in the function call in the SQL expression") + +// ErrValueParseFailure is an error that occurs if the Time Stamp is not parsed +// correctly in the SQL expression. +var ErrValueParseFailure = errors.New("Time stamp parse failure in the SQL expression") + +// ErrIntegerOverflow is an error that occurs if there is an IntegerOverflow or +// IntegerUnderFlow in the SQL expression. +var ErrIntegerOverflow = errors.New("Int overflow or underflow in the SQL expression") + +// ErrLikeInvalidInputs is an error that occurs if invalid inputs are provided +// to the argument LIKE Clause. +var ErrLikeInvalidInputs = errors.New("Invalid argument given to the LIKE clause in the SQL expression") + +// ErrCastFailed occurs if the attempt to convert data types in the cast is not +// done correctly. +var ErrCastFailed = errors.New("Attempt to convert from one data type to another using CAST failed in the SQL expression") + +// ErrInvalidCast is an error that occurs if the attempt to convert data types +// failed and was done in an improper fashion. +var ErrInvalidCast = errors.New("Attempt to convert from one data type to another using CAST failed in the SQL expression") + +// ErrEvaluatorInvalidTimestampFormatPattern is an error that occurs if the Time +// Stamp Format needs more additional fields to be filled. +var ErrEvaluatorInvalidTimestampFormatPattern = errors.New("Time stamp format pattern requires additional fields in the SQL expression") + +// ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing is an error that +// occurs if the format of the time stamp can not be parsed. +var ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing = errors.New("Time stamp format pattern contains a valid format symbol that cannot be applied to time stamp parsing in the SQL expression") + +// ErrEvaluatorTimestampFormatPatternDuplicateFields is an error that occurs if +// the time stamp format pattern contains multiple format specifications which +// can not be clearly resolved. +var ErrEvaluatorTimestampFormatPatternDuplicateFields = errors.New("Time stamp format pattern contains multiple format specifiers representing the time stamp field in the SQL expression") + +//ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch is an error that +//occurs if the time stamp format pattern contains a 12 hour day of format but +//does not have an AM/PM field. +var ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch = errors.New("Time stamp format pattern contains a 12-hour hour of day format symbol but doesn't also contain an AM/PM field, or it contains a 24-hour hour of day format specifier and contains an AM/PM field in the SQL expression") + +// ErrEvaluatorUnterminatedTimestampFormatPatternToken is an error that occurs +// if there is an unterminated token in the SQL expression for time stamp +// format. +var ErrEvaluatorUnterminatedTimestampFormatPatternToken = errors.New("Time stamp format pattern contains unterminated token in the SQL expression") + +// ErrEvaluatorInvalidTimestampFormatPatternToken is an error that occurs if +// there is an invalid token in the time stamp format within the SQL expression. +var ErrEvaluatorInvalidTimestampFormatPatternToken = errors.New("Time stamp format pattern contains an invalid token in the SQL expression") + +// ErrEvaluatorInvalidTimestampFormatPatternSymbol is an error that occurs if +// the time stamp format pattern has an invalid symbol within the SQL +// expression. +var ErrEvaluatorInvalidTimestampFormatPatternSymbol = errors.New("Time stamp format pattern contains an invalid symbol in the SQL expression") + +// S3 select API errors - TODO fix the errors. +var errorCodeResponse = map[error]string{ + ErrBusy: "Busy", + ErrUnauthorizedAccess: "UnauthorizedAccess", + ErrExpressionTooLong: "ExpressionTooLong", + ErrIllegalSQLFunctionArgument: "IllegalSqlFunctionArgument", + ErrInvalidColumnIndex: "InvalidColumnIndex", + ErrInvalidKeyPath: "InvalidKeyPath", + ErrColumnTooLong: "ColumnTooLong", + ErrOverMaxColumn: "OverMaxColumn", + ErrOverMaxRecordSize: "OverMaxRecordSize", + ErrMissingHeaders: "MissingHeaders", + ErrInvalidCompressionFormat: "InvalidCompressionFormat", + ErrTruncatedInput: "TruncatedInput", + ErrInvalidFileHeaderInfo: "InvalidFileHeaderInfo", + ErrInvalidJSONType: "InvalidJsonType", + ErrInvalidQuoteFields: "InvalidQuoteFields", + ErrInvalidRequestParameter: "InvalidRequestParameter", + ErrCSVParsingError: "CSVParsingError", + ErrJSONParsingError: "JSONParsingError", + ErrExternalEvalException: "ExternalEvalException", + ErrInvalidDataType: "InvalidDataType", + ErrUnrecognizedFormatException: "UnrecognizedFormatException", + ErrInvalidTextEncoding: "InvalidTextEncoding", + ErrInvalidTableAlias: "InvalidTableAlias", + ErrMultipleDataSourcesUnsupported: "MultipleDataSourcesUnsupported", + ErrMissingRequiredParameter: "MissingRequiredParameter", + ErrObjectSerializationConflict: "ObjectSerializationConflict", + ErrUnsupportedSQLOperation: "UnsupportedSqlOperation", + ErrUnsupportedSQLStructure: "UnsupportedSqlStructure", + ErrUnsupportedStorageClass: "UnsupportedStorageClass", + ErrUnsupportedSyntax: "UnsupportedSyntax", + ErrUnsupportedRangeHeader: "UnsupportedRangeHeader", + ErrLexerInvalidChar: "LexerInvalidChar", + ErrLexerInvalidOperator: "LexerInvalidOperator", + ErrLexerInvalidLiteral: "LexerInvalidLiteral", + ErrLexerInvalidIONLiteral: "LexerInvalidIONLiteral", + ErrParseExpectedDatePart: "ParseExpectedDatePart", + ErrParseExpectedKeyword: "ParseExpectedKeyword", + ErrParseExpectedTokenType: "ParseExpectedTokenType", + ErrParseExpected2TokenTypes: "ParseExpected2TokenTypes", + ErrParseExpectedNumber: "ParseExpectedNumber", + ErrParseExpectedRightParenBuiltinFunctionCall: "ParseExpectedRightParenBuiltinFunctionCall", + ErrParseExpectedTypeName: "ParseExpectedTypeName", + ErrParseExpectedWhenClause: "ParseExpectedWhenClause", + ErrParseUnsupportedToken: "ParseUnsupportedToken", + ErrParseUnsupportedLiteralsGroupBy: "ParseUnsupportedLiteralsGroupBy", + ErrParseExpectedMember: "ParseExpectedMember", + ErrParseUnsupportedSelect: "ParseUnsupportedSelect", + ErrParseUnsupportedCase: "ParseUnsupportedCase:", + ErrParseUnsupportedCaseClause: "ParseUnsupportedCaseClause", + ErrParseUnsupportedAlias: "ParseUnsupportedAlias", + ErrParseUnsupportedSyntax: "ParseUnsupportedSyntax", + ErrParseUnknownOperator: "ParseUnknownOperator", + ErrParseInvalidPathComponent: "ParseInvalidPathComponent", + ErrParseMissingIdentAfterAt: "ParseMissingIdentAfterAt", + ErrParseUnexpectedOperator: "ParseUnexpectedOperator", + ErrParseUnexpectedTerm: "ParseUnexpectedTerm", + ErrParseUnexpectedToken: "ParseUnexpectedToken", + ErrParseUnexpectedKeyword: "ParseUnexpectedKeyword", + ErrParseExpectedExpression: "ParseExpectedExpression", + ErrParseExpectedLeftParenAfterCast: "ParseExpectedLeftParenAfterCast", + ErrParseExpectedLeftParenValueConstructor: "ParseExpectedLeftParenValueConstructor", + ErrParseExpectedLeftParenBuiltinFunctionCall: "ParseExpectedLeftParenBuiltinFunctionCall", + ErrParseExpectedArgumentDelimiter: "ParseExpectedArgumentDelimiter", + ErrParseCastArity: "ParseCastArity", + ErrParseInvalidTypeParam: "ParseInvalidTypeParam", + ErrParseEmptySelect: "ParseEmptySelect", + ErrParseSelectMissingFrom: "ParseSelectMissingFrom", + ErrParseExpectedIdentForGroupName: "ParseExpectedIdentForGroupName", + ErrParseExpectedIdentForAlias: "ParseExpectedIdentForAlias", + ErrParseUnsupportedCallWithStar: "ParseUnsupportedCallWithStar", + ErrParseNonUnaryAgregateFunctionCall: "ParseNonUnaryAgregateFunctionCall", + ErrParseMalformedJoin: "ParseMalformedJoin", + ErrParseExpectedIdentForAt: "ParseExpectedIdentForAt", + ErrParseAsteriskIsNotAloneInSelectList: "ParseAsteriskIsNotAloneInSelectList", + ErrParseCannotMixSqbAndWildcardInSelectList: "ParseCannotMixSqbAndWildcardInSelectList", + ErrParseInvalidContextForWildcardInSelectList: "ParseInvalidContextForWildcardInSelectList", + ErrEvaluatorBindingDoesNotExist: "EvaluatorBindingDoesNotExist", + ErrIncorrectSQLFunctionArgumentType: "IncorrectSqlFunctionArgumentType", + ErrAmbiguousFieldName: "AmbiguousFieldName", + ErrEvaluatorInvalidArguments: "EvaluatorInvalidArguments", + ErrValueParseFailure: "ValueParseFailure", + ErrIntegerOverflow: "IntegerOverflow", + ErrLikeInvalidInputs: "LikeInvalidInputs", + ErrCastFailed: "CastFailed", + ErrInvalidCast: "Attempt to convert from one data type to another using CAST failed in the SQL expression.", + ErrEvaluatorInvalidTimestampFormatPattern: "EvaluatorInvalidTimestampFormatPattern", + ErrEvaluatorInvalidTimestampFormatPatternSymbolForParsing: "EvaluatorInvalidTimestampFormatPatternSymbolForParsing", + ErrEvaluatorTimestampFormatPatternDuplicateFields: "EvaluatorTimestampFormatPatternDuplicateFields", + ErrEvaluatorTimestampFormatPatternHourClockAmPmMismatch: "EvaluatorTimestampFormatPatternHourClockAmPmMismatch", + ErrEvaluatorUnterminatedTimestampFormatPatternToken: "EvaluatorUnterminatedTimestampFormatPatternToken", + ErrEvaluatorInvalidTimestampFormatPatternToken: "EvaluatorInvalidTimestampFormatPatternToken", + ErrEvaluatorInvalidTimestampFormatPatternSymbol: "EvaluatorInvalidTimestampFormatPatternSymbol", +} diff --git a/pkg/s3select/funcEval.go b/pkg/s3select/funcEval.go new file mode 100644 index 000000000..e299e8601 --- /dev/null +++ b/pkg/s3select/funcEval.go @@ -0,0 +1,231 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3select + +import ( + "strings" + + "github.com/xwb1989/sqlparser" +) + +// stringOps is a function which handles the case in a clause if there is a need +// to perform a string function +func stringOps(myFunc *sqlparser.FuncExpr, record []string, myReturnVal string, columnsMap map[string]int) string { + var value string + funcName := myFunc.Name.CompliantName() + switch tempArg := myFunc.Exprs[0].(type) { + case *sqlparser.AliasedExpr: + switch col := tempArg.Expr.(type) { + case *sqlparser.FuncExpr: + // myReturnVal is actually the tail recursive value being used in the eval func. + return applyStrFunc(myReturnVal, funcName) + case *sqlparser.ColName: + value = applyStrFunc(record[columnsMap[col.Name.CompliantName()]], funcName) + case *sqlparser.SQLVal: + value = applyStrFunc(string(col.Val), funcName) + } + } + return value +} + +// coalOps is a function which decomposes a COALESCE func expr into its struct. +func coalOps(myFunc *sqlparser.FuncExpr, record []string, myReturnVal string, columnsMap map[string]int) string { + myArgs := make([]string, len(myFunc.Exprs)) + + for i := 0; i < len(myFunc.Exprs); i++ { + switch tempArg := myFunc.Exprs[i].(type) { + case *sqlparser.AliasedExpr: + switch col := tempArg.Expr.(type) { + case *sqlparser.FuncExpr: + // myReturnVal is actually the tail recursive value being used in the eval func. + return myReturnVal + case *sqlparser.ColName: + myArgs[i] = record[columnsMap[col.Name.CompliantName()]] + case *sqlparser.SQLVal: + myArgs[i] = string(col.Val) + } + } + } + return processCoalNoIndex(myArgs) +} + +// nullOps is a function which decomposes a NullIf func expr into its struct. +func nullOps(myFunc *sqlparser.FuncExpr, record []string, myReturnVal string, columnsMap map[string]int) string { + myArgs := make([]string, 2) + + for i := 0; i < len(myFunc.Exprs); i++ { + switch tempArg := myFunc.Exprs[i].(type) { + case *sqlparser.AliasedExpr: + switch col := tempArg.Expr.(type) { + case *sqlparser.FuncExpr: + return myReturnVal + case *sqlparser.ColName: + myArgs[i] = record[columnsMap[col.Name.CompliantName()]] + case *sqlparser.SQLVal: + myArgs[i] = string(col.Val) + } + } + } + return processNullIf(myArgs) +} + +// isValidString is a function that ensures the current index is one with a +// StrFunc +func isValidFunc(myList []int, index int) bool { + if myList == nil { + return false + } + for i := 0; i < len(myList); i++ { + if myList[i] == index { + return true + } + } + return false +} + +// processNullIf is a function that evaluates a given NULLIF clause. +func processNullIf(nullStore []string) string { + nullValOne := nullStore[0] + nullValTwo := nullStore[1] + if nullValOne == nullValTwo { + return "" + } + return nullValOne +} + +// processCoalNoIndex is a function which evaluates a given COALESCE clause. +func processCoalNoIndex(coalStore []string) string { + for i := 0; i < len(coalStore); i++ { + if coalStore[i] != "null" && coalStore[i] != "missing" && coalStore[i] != "" { + return coalStore[i] + } + } + return "null" +} + +// evaluateFuncExpr is a function that allows for tail recursive evaluation of +// nested function expressions. +func evaluateFuncExpr(myVal *sqlparser.FuncExpr, myReturnVal string, myRecord []string, columnsMap map[string]int) string { + if myVal == nil { + return myReturnVal + } + // retrieve all the relevant arguments of the function + var mySubFunc []*sqlparser.FuncExpr + mySubFunc = make([]*sqlparser.FuncExpr, len(myVal.Exprs)) + for i := 0; i < len(myVal.Exprs); i++ { + switch col := myVal.Exprs[i].(type) { + case *sqlparser.AliasedExpr: + switch temp := col.Expr.(type) { + case *sqlparser.FuncExpr: + mySubFunc[i] = temp + } + } + } + // Need to do tree recursion so as to explore all possible directions of the + // nested function recursion + for i := 0; i < len(mySubFunc); i++ { + if supportedString(myVal.Name.CompliantName()) { + if mySubFunc != nil { + return stringOps(myVal, myRecord, evaluateFuncExpr(mySubFunc[i], myReturnVal, myRecord, columnsMap), columnsMap) + } + return stringOps(myVal, myRecord, myReturnVal, columnsMap) + } else if strings.ToUpper(myVal.Name.CompliantName()) == "NULLIF" { + if mySubFunc != nil { + return nullOps(myVal, myRecord, evaluateFuncExpr(mySubFunc[i], myReturnVal, myRecord, columnsMap), columnsMap) + } + return nullOps(myVal, myRecord, myReturnVal, columnsMap) + } else if strings.ToUpper(myVal.Name.CompliantName()) == "COALESCE" { + if mySubFunc != nil { + return coalOps(myVal, myRecord, evaluateFuncExpr(mySubFunc[i], myReturnVal, myRecord, columnsMap), columnsMap) + } + return coalOps(myVal, myRecord, myReturnVal, columnsMap) + } + } + return "" +} + +// evaluateFuncErr is a function that flags errors in nested functions. +func (reader *Input) evaluateFuncErr(myVal *sqlparser.FuncExpr) error { + if myVal == nil { + return nil + } + if !supportedFunc(myVal.Name.CompliantName()) { + return ErrUnsupportedSQLOperation + } + for i := 0; i < len(myVal.Exprs); i++ { + switch tempArg := myVal.Exprs[i].(type) { + case *sqlparser.StarExpr: + return ErrParseUnsupportedCallWithStar + case *sqlparser.AliasedExpr: + switch col := tempArg.Expr.(type) { + case *sqlparser.FuncExpr: + if err := reader.evaluateFuncErr(col); err != nil { + return err + } + case *sqlparser.ColName: + if err := reader.colNameErrs([]string{col.Name.CompliantName()}); err != nil { + return err + } + } + } + } + return nil +} + +// evaluateIsExpr is a function for evaluating expressions of the form "column +// is ...." +func evaluateIsExpr(myFunc *sqlparser.IsExpr, row []string, columnNames map[string]int, alias string) (bool, error) { + operator := myFunc.Operator + var colName string + var myVal string + switch myIs := myFunc.Expr.(type) { + // case for literal val + case *sqlparser.SQLVal: + myVal = string(myIs.Val) + // case for nested func val + case *sqlparser.FuncExpr: + myVal = evaluateFuncExpr(myIs, "", row, columnNames) + // case for col val + case *sqlparser.ColName: + colName = cleanCol(myIs.Name.CompliantName(), alias) + } + // case if it is a col val + if colName != "" { + myVal = row[columnNames[colName]] + } + // case to evaluate is null + if strings.ToLower(operator) == "is null" { + return myVal == "", nil + } + // case to evaluate is not null + if strings.ToLower(operator) == "is not null" { + return myVal != "", nil + } + return false, ErrUnsupportedSQLOperation +} + +// supportedString is a function that checks whether the function is a supported +// string one +func supportedString(strFunc string) bool { + return stringInSlice(strings.ToUpper(strFunc), []string{"TRIM", "SUBSTRING", "CHAR_LENGTH", "CHARACTER_LENGTH", "LOWER", "UPPER"}) +} + +// supportedFunc is a function that checks whether the function is a supported +// S3 one. +func supportedFunc(strFunc string) bool { + return stringInSlice(strings.ToUpper(strFunc), []string{"TRIM", "SUBSTRING", "CHAR_LENGTH", "CHARACTER_LENGTH", "LOWER", "UPPER", "COALESCE", "NULLIF"}) +} diff --git a/pkg/s3select/helpers.go b/pkg/s3select/helpers.go new file mode 100644 index 000000000..6a58be133 --- /dev/null +++ b/pkg/s3select/helpers.go @@ -0,0 +1,754 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3select + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "github.com/xwb1989/sqlparser" +) + +// This function processes size so that we can calculate bytes BytesProcessed. +func processSize(myrecord []string) int64 { + if len(myrecord) > 0 { + var size int64 + size = int64(len(myrecord)-1) + 1 + for i := range myrecord { + size += int64(len(myrecord[i])) + } + + return size + } + return 0 +} + +// This function finds whether a string is in a list +func stringInSlice(x string, list []string) bool { + for _, y := range list { + if x == y { + return true + } + } + return false +} + +// This function returns the index of a string in a list +func stringIndex(a string, list []string) int { + for i := range list { + if list[i] == a { + return i + } + } + return -1 +} + +// Returns a true or false, whether a string can be represented as an int. +func representsInt(s string) bool { + if _, err := strconv.Atoi(s); err == nil { + return true + } + return false +} + +// The function below processes the where clause into an acutal boolean given a +// row +func matchesMyWhereClause(row []string, columnNames map[string]int, alias string, whereClause interface{}) (bool, error) { + // This particular logic deals with the details of casting, e.g if we have to + // cast a column of string numbers into int's for comparison. + var conversionColumn string + var operator string + var operand interface{} + if fmt.Sprintf("%v", whereClause) == "false" { + return false, nil + } + switch expr := whereClause.(type) { + case *sqlparser.IsExpr: + return evaluateIsExpr(expr, row, columnNames, alias) + case *sqlparser.RangeCond: + operator = expr.Operator + if operator != "between" && operator != "not between" { + return false, ErrUnsupportedSQLOperation + } + if operator == "not between" { + myResult, err := evaluateBetween(expr, alias, row, columnNames) + if err != nil { + return false, err + } + return !myResult, nil + } + myResult, err := evaluateBetween(expr, alias, row, columnNames) + if err != nil { + return false, err + } + return myResult, nil + case *sqlparser.ComparisonExpr: + operator = expr.Operator + switch right := expr.Right.(type) { + case *sqlparser.FuncExpr: + operand = evaluateFuncExpr(right, "", row, columnNames) + case *sqlparser.SQLVal: + var err error + operand, err = evaluateParserType(right) + if err != nil { + return false, err + } + } + var myVal string + myVal = "" + switch left := expr.Left.(type) { + case *sqlparser.FuncExpr: + myVal = evaluateFuncExpr(left, "", row, columnNames) + conversionColumn = "" + case *sqlparser.ColName: + conversionColumn = cleanCol(left.Name.CompliantName(), alias) + } + if representsInt(conversionColumn) { + intCol, err := strconv.Atoi(conversionColumn) + if err != nil { + return false, err + } + // Subtract 1 out because the index starts at 1 for Amazon instead of 0. + return evaluateOperator(row[intCol-1], operator, operand) + } + if myVal != "" { + return evaluateOperator(myVal, operator, operand) + } + return evaluateOperator(row[columnNames[conversionColumn]], operator, operand) + case *sqlparser.AndExpr: + var leftVal bool + var rightVal bool + switch left := expr.Left.(type) { + case *sqlparser.ComparisonExpr: + temp, err := matchesMyWhereClause(row, columnNames, alias, left) + if err != nil { + return false, err + } + leftVal = temp + } + switch right := expr.Right.(type) { + case *sqlparser.ComparisonExpr: + temp, err := matchesMyWhereClause(row, columnNames, alias, right) + if err != nil { + return false, err + } + rightVal = temp + } + return (rightVal && leftVal), nil + case *sqlparser.OrExpr: + var leftVal bool + var rightVal bool + switch left := expr.Left.(type) { + case *sqlparser.ComparisonExpr: + leftVal, _ = matchesMyWhereClause(row, columnNames, alias, left) + + } + switch right := expr.Right.(type) { + case *sqlparser.ComparisonExpr: + rightVal, _ = matchesMyWhereClause(row, columnNames, alias, right) + } + return (rightVal || leftVal), nil + + } + return true, nil +} +func applyStrFunc(rawArg string, funcName string) string { + switch strings.ToUpper(funcName) { + case "TRIM": + // parser has an issue which does not allow it to support Trim with other + // arguments + return strings.Trim(rawArg, " ") + case "SUBSTRING": + // TODO parser has an issue which does not support substring + return rawArg + case "CHAR_LENGTH": + return strconv.Itoa(len(rawArg)) + case "CHARACTER_LENGTH": + return strconv.Itoa(len(rawArg)) + case "LOWER": + return strings.ToLower(rawArg) + case "UPPER": + return strings.ToUpper(rawArg) + } + return rawArg + +} + +// This is a really important function it actually evaluates the boolean +// statement and therefore actually returns a bool, it functions as the lowest +// level of the state machine. +func evaluateOperator(myTblVal string, operator string, operand interface{}) (bool, error) { + if err := checkValidOperator(operator); err != nil { + return false, err + } + myRecordVal := checkStringType(myTblVal) + myVal := reflect.ValueOf(myRecordVal) + myOp := reflect.ValueOf(operand) + + switch { + case myVal.Kind() == reflect.String && myOp.Kind() == reflect.String: + return stringEval(myVal.String(), operator, myOp.String()) + case myVal.Kind() == reflect.Float64 && myOp.Kind() == reflect.Float64: + return floatEval(myVal.Float(), operator, myOp.Float()) + case myVal.Kind() == reflect.Int && myOp.Kind() == reflect.Int: + return intEval(myVal.Int(), operator, myOp.Int()) + case myVal.Kind() == reflect.Int && myOp.Kind() == reflect.String: + stringVs := strconv.Itoa(int(myVal.Int())) + return stringEval(stringVs, operator, myOp.String()) + case myVal.Kind() == reflect.Float64 && myOp.Kind() == reflect.String: + stringVs := strconv.FormatFloat(myVal.Float(), 'f', 6, 64) + return stringEval(stringVs, operator, myOp.String()) + case myVal.Kind() != myOp.Kind(): + return false, nil + } + return false, ErrUnsupportedSyntax +} + +// checkValidOperator ensures that the current operator is supported +func checkValidOperator(operator string) error { + listOfOps := []string{">", "<", "=", "<=", ">=", "!=", "like"} + for i := range listOfOps { + if operator == listOfOps[i] { + return nil + } + } + return ErrParseUnknownOperator +} + +// checkStringType converts the value from the csv to the appropriate one. +func checkStringType(myTblVal string) interface{} { + myInt, isInt := strconv.Atoi(myTblVal) + myFloat, isFloat := strconv.ParseFloat(myTblVal, 64) + if isInt == nil { + return myInt + } else if isFloat == nil { + return myFloat + } else { + return myTblVal + } +} + +// stringEval is for evaluating the state of string comparison. +func stringEval(myRecordVal string, operator string, myOperand string) (bool, error) { + switch operator { + case ">": + return myRecordVal > myOperand, nil + case "<": + return myRecordVal < myOperand, nil + case "=": + return myRecordVal == myOperand, nil + case "<=": + return myRecordVal <= myOperand, nil + case ">=": + return myRecordVal >= myOperand, nil + case "!=": + return myRecordVal != myOperand, nil + case "like": + return likeConvert(myOperand, myRecordVal) + } + return false, ErrUnsupportedSyntax +} + +// intEval is for evaluating integer comparisons. +func intEval(myRecordVal int64, operator string, myOperand int64) (bool, error) { + + switch operator { + case ">": + return myRecordVal > myOperand, nil + case "<": + return myRecordVal < myOperand, nil + case "=": + return myRecordVal == myOperand, nil + case "<=": + return myRecordVal <= myOperand, nil + case ">=": + return myRecordVal >= myOperand, nil + case "!=": + return myRecordVal != myOperand, nil + } + return false, ErrUnsupportedSyntax +} + +// floatEval is for evaluating the comparison of floats. +func floatEval(myRecordVal float64, operator string, myOperand float64) (bool, error) { + // Basically need some logic thats like, if the types dont match check for a cast + switch operator { + case ">": + return myRecordVal > myOperand, nil + case "<": + return myRecordVal < myOperand, nil + case "=": + return myRecordVal == myOperand, nil + case "<=": + return myRecordVal <= myOperand, nil + case ">=": + return myRecordVal >= myOperand, nil + case "!=": + return myRecordVal != myOperand, nil + } + return false, ErrUnsupportedSyntax +} + +// prefixMatch allows for matching a prefix only like query e.g a% +func prefixMatch(pattern string, record string) bool { + for i := 0; i < len(pattern)-1; i++ { + if pattern[i] != record[i] && pattern[i] != byte('_') { + return false + } + } + return true +} + +// suffixMatch allows for matching a suffix only like query e.g %an +func suffixMatch(pattern string, record string) bool { + for i := len(pattern) - 1; i > 0; i-- { + if pattern[i] != record[len(record)-(len(pattern)-i)] && pattern[i] != byte('_') { + return false + } + } + return true +} + +// This function is for evaluating select statements which are case sensitive +func likeConvert(pattern string, record string) (bool, error) { + // If pattern is empty just return false + if pattern == "" || record == "" { + return false, nil + } + // for suffix match queries e.g %a + if len(pattern) >= 2 && pattern[0] == byte('%') && strings.Count(pattern, "%") == 1 { + return suffixMatch(pattern, record), nil + } + // for prefix match queries e.g a% + if len(pattern) >= 2 && pattern[len(pattern)-1] == byte('%') && strings.Count(pattern, "%") == 1 { + return prefixMatch(pattern, record), nil + } + charCount := 0 + currPos := 0 + // Loop through the pattern so that a boolean can be returned + for i := 0; i < len(pattern); i++ { + if pattern[i] == byte('_') { + // if its an underscore it can be anything so shift current position for + // pattern and string + charCount++ + // if there have been more characters in the pattern than record, clearly + // there should be a return + if i != len(pattern)-1 { + if pattern[i+1] != byte('%') && pattern[i+1] != byte('_') { + if currPos != len(record)-1 && pattern[i+1] != record[currPos+1] { + return false, nil + } + } + } + if charCount > len(record) { + return false, nil + } + // if the pattern has been fully evaluated, then just return. + if len(pattern) == i+1 { + return true, nil + } + i++ + currPos++ + } + if pattern[i] == byte('%') || pattern[i] == byte('*') { + // if there is a wildcard then want to return true if its last and flag it. + if currPos == len(record) { + return false, nil + } + if i+1 == len(pattern) { + return true, nil + } + } else { + charCount++ + matched := false + // iterate through the pattern and check if there is a match for the + // character + for currPos < len(record) { + if record[currPos] == pattern[i] || pattern[i] == byte('_') { + matched = true + break + } + currPos++ + } + currPos++ + // if the character did not match then return should occur. + if !matched { + return false, nil + } + } + } + if charCount > len(record) { + return false, nil + } + if currPos < len(record) { + return false, nil + } + return true, nil +} + +// TrimQuotes allows the following to occur select "name", we need to trim the +// quotes to reference our map of columnNames. +func trimQuotes(s string) string { + if len(s) >= 2 { + if c := s[len(s)-1]; s[0] == c && (c == '"') { + return s[1 : len(s)-1] + } + } + return s +} + +// cleanCol cleans a column name from the parser so that the name is returned to +// original. +func cleanCol(myCol string, alias string) string { + if len(myCol) <= 0 { + return myCol + } + if !strings.HasPrefix(myCol, alias) && myCol[0] == '_' { + myCol = alias + myCol + } + + if strings.Contains(myCol, ".") { + myCol = strings.Replace(myCol, alias+"._", "", len(myCol)) + } + myCol = strings.Replace(myCol, alias+"_", "", len(myCol)) + return myCol +} + +// evaluateBetween is a function which evaluates a Between Clause. +func evaluateBetween(betweenExpr *sqlparser.RangeCond, alias string, record []string, columnNames map[string]int) (bool, error) { + var colToVal interface{} + var colFromVal interface{} + var conversionColumn string + var funcName string + switch colTo := betweenExpr.To.(type) { + case sqlparser.Expr: + switch colToMyVal := colTo.(type) { + case *sqlparser.FuncExpr: + var temp string + temp = stringOps(colToMyVal, record, "", columnNames) + colToVal = []byte(temp) + case *sqlparser.SQLVal: + var err error + colToVal, err = evaluateParserType(colToMyVal) + if err != nil { + return false, err + } + } + } + switch colFrom := betweenExpr.From.(type) { + case sqlparser.Expr: + switch colFromMyVal := colFrom.(type) { + case *sqlparser.FuncExpr: + colFromVal = stringOps(colFromMyVal, record, "", columnNames) + case *sqlparser.SQLVal: + var err error + colFromVal, err = evaluateParserType(colFromMyVal) + if err != nil { + return false, err + } + } + } + var myFuncVal string + myFuncVal = "" + switch left := betweenExpr.Left.(type) { + case *sqlparser.FuncExpr: + myFuncVal = evaluateFuncExpr(left, "", record, columnNames) + conversionColumn = "" + case *sqlparser.ColName: + conversionColumn = cleanCol(left.Name.CompliantName(), alias) + } + + toGreater, err := evaluateOperator(fmt.Sprintf("%v", colToVal), ">", colFromVal) + if err != nil { + return false, err + } + if toGreater { + return evalBetweenGreater(conversionColumn, record, funcName, columnNames, colFromVal, colToVal, myFuncVal) + } + return evalBetweenLess(conversionColumn, record, funcName, columnNames, colFromVal, colToVal, myFuncVal) +} + +// evalBetweenLess is a function which evaluates the between given that the +// FROM is > than the TO. +func evalBetweenLess(conversionColumn string, record []string, funcName string, columnNames map[string]int, colFromVal interface{}, colToVal interface{}, myCoalVal string) (bool, error) { + if representsInt(conversionColumn) { + myIndex, _ := strconv.Atoi(conversionColumn) + // Subtract 1 out because the index starts at 1 for Amazon instead of 0. + myVal, err := evaluateOperator(record[myIndex-1], "<=", colFromVal) + if err != nil { + return false, err + } + var myOtherVal bool + myOtherVal, err = evaluateOperator(fmt.Sprintf("%v", colToVal), "<=", checkStringType(record[myIndex-1])) + if err != nil { + return false, err + } + return (myVal && myOtherVal), nil + } + if myCoalVal != "" { + myVal, err := evaluateOperator(myCoalVal, "<=", colFromVal) + if err != nil { + return false, err + } + var myOtherVal bool + myOtherVal, err = evaluateOperator(fmt.Sprintf("%v", colToVal), "<=", checkStringType(myCoalVal)) + if err != nil { + return false, err + } + return (myVal && myOtherVal), nil + } + myVal, err := evaluateOperator(record[columnNames[conversionColumn]], "<=", colFromVal) + if err != nil { + return false, err + } + var myOtherVal bool + myOtherVal, err = evaluateOperator(fmt.Sprintf("%v", colToVal), "<=", checkStringType(record[columnNames[conversionColumn]])) + if err != nil { + return false, err + } + return (myVal && myOtherVal), nil +} + +// evalBetweenGreater is a function which evaluates the between given that the +// TO is > than the FROM. +func evalBetweenGreater(conversionColumn string, record []string, funcName string, columnNames map[string]int, colFromVal interface{}, colToVal interface{}, myCoalVal string) (bool, error) { + if representsInt(conversionColumn) { + myIndex, _ := strconv.Atoi(conversionColumn) + myVal, err := evaluateOperator(record[myIndex-1], ">=", colFromVal) + if err != nil { + return false, err + } + var myOtherVal bool + myOtherVal, err = evaluateOperator(fmt.Sprintf("%v", colToVal), ">=", checkStringType(record[myIndex-1])) + if err != nil { + return false, err + } + return (myVal && myOtherVal), nil + } + if myCoalVal != "" { + myVal, err := evaluateOperator(myCoalVal, ">=", colFromVal) + if err != nil { + return false, err + } + var myOtherVal bool + myOtherVal, err = evaluateOperator(fmt.Sprintf("%v", colToVal), ">=", checkStringType(myCoalVal)) + if err != nil { + return false, err + } + return (myVal && myOtherVal), nil + } + myVal, err := evaluateOperator(record[columnNames[conversionColumn]], ">=", colFromVal) + if err != nil { + return false, err + } + var myOtherVal bool + myOtherVal, err = evaluateOperator(fmt.Sprintf("%v", colToVal), ">=", checkStringType(record[columnNames[conversionColumn]])) + if err != nil { + return false, err + } + return (myVal && myOtherVal), nil +} + +// whereClauseNameErrs is a function which returns an error if there is a column +// in the where clause which does not exist. +func (reader *Input) whereClauseNameErrs(whereClause interface{}, alias string) error { + var conversionColumn string + switch expr := whereClause.(type) { + // case for checking errors within a clause of the form "col_name is ..." + case *sqlparser.IsExpr: + switch myCol := expr.Expr.(type) { + case *sqlparser.FuncExpr: + if err := reader.evaluateFuncErr(myCol); err != nil { + return err + } + case *sqlparser.ColName: + conversionColumn = cleanCol(myCol.Name.CompliantName(), alias) + } + case *sqlparser.RangeCond: + switch left := expr.Left.(type) { + case *sqlparser.FuncExpr: + if err := reader.evaluateFuncErr(left); err != nil { + return err + } + case *sqlparser.ColName: + conversionColumn = cleanCol(left.Name.CompliantName(), alias) + } + case *sqlparser.ComparisonExpr: + switch left := expr.Left.(type) { + case *sqlparser.FuncExpr: + if err := reader.evaluateFuncErr(left); err != nil { + return err + } + case *sqlparser.ColName: + conversionColumn = cleanCol(left.Name.CompliantName(), alias) + } + case *sqlparser.AndExpr: + switch left := expr.Left.(type) { + case *sqlparser.ComparisonExpr: + return reader.whereClauseNameErrs(left, alias) + } + switch right := expr.Right.(type) { + case *sqlparser.ComparisonExpr: + return reader.whereClauseNameErrs(right, alias) + } + case *sqlparser.OrExpr: + switch left := expr.Left.(type) { + case *sqlparser.ComparisonExpr: + return reader.whereClauseNameErrs(left, alias) + } + switch right := expr.Right.(type) { + case *sqlparser.ComparisonExpr: + return reader.whereClauseNameErrs(right, alias) + } + } + if conversionColumn != "" { + return reader.colNameErrs([]string{conversionColumn}) + } + return nil +} + +// qualityCheck ensures the row has enough separators. +func qualityCheck(row string, amountOfSep int, sep string) string { + for i := 0; i < amountOfSep; i++ { + row = row + sep + } + return row +} + +// writeRow helps to write the row regardless of how many entries. +func writeRow(myRow string, myEntry string, delimiter string, numOfReqCols int) string { + if myEntry == "" && len(myRow) == 0 && numOfReqCols == 1 { + return myEntry + } + if myEntry == "" && len(myRow) == 0 { + return myEntry + delimiter + } + if len(myRow) == 1 && myRow[0] == ',' { + return myRow + myEntry + } + if len(myRow) == 0 { + return myEntry + } + return myRow + delimiter + myEntry +} + +// colNameErrs is a function which makes sure that the headers are requested are +// present in the file otherwise it throws an error. +func (reader *Input) colNameErrs(columnNames []string) error { + for i := 0; i < len(columnNames); i++ { + if columnNames[i] == "" { + continue + } + if !representsInt(columnNames[i]) && !reader.options.HeaderOpt { + return ErrInvalidColumnIndex + } + if representsInt(columnNames[i]) { + tempInt, _ := strconv.Atoi(columnNames[i]) + if tempInt > len(reader.Header()) || tempInt == 0 { + return ErrInvalidColumnIndex + } + } else { + if reader.options.HeaderOpt && !stringInSlice(columnNames[i], reader.Header()) { + return ErrMissingHeaders + } + } + } + return nil +} + +// aggFuncToStr converts an array of floats into a properly formatted string. +func (reader *Input) aggFuncToStr(myAggVals []float64) string { + myRow := strconv.FormatFloat(myAggVals[0], 'f', 6, 64) + for i := 1; i < len(myAggVals); i++ { + aggregateval := strconv.FormatFloat(myAggVals[i], 'f', 6, 64) + myRow = myRow + reader.options.OutputFieldDelimiter + aggregateval + } + return myRow +} + +// checkForDuplicates ensures we do not have an ambigious column name. +func checkForDuplicates(columns []string, columnsMap map[string]int, hasDuplicates map[string]bool, lowercaseColumnsMap map[string]int) error { + for i := 0; i < len(columns); i++ { + columns[i] = strings.Replace(columns[i], " ", "_", len(columns[i])) + if _, exist := columnsMap[columns[i]]; exist { + return ErrAmbiguousFieldName + } + columnsMap[columns[i]] = i + // This checks that if a key has already been put into the map, that we're + // setting its appropriate value in has duplicates to be true. + if _, exist := lowercaseColumnsMap[strings.ToLower(columns[i])]; exist { + hasDuplicates[strings.ToLower(columns[i])] = true + } else { + lowercaseColumnsMap[strings.ToLower(columns[i])] = i + } + } + return nil +} + +// evaluateParserType is a function that takes a SQL value and returns it as an +// interface converted into the appropriate value. +func evaluateParserType(col *sqlparser.SQLVal) (interface{}, error) { + colDataType := col.Type + var val interface{} + switch colDataType { + case 0: + val = string(col.Val) + case 1: + intVersion, isInt := strconv.Atoi(string(col.Val)) + if isInt != nil { + return nil, ErrIntegerOverflow + } + val = intVersion + case 2: + floatVersion, isFloat := strconv.ParseFloat(string(col.Val), 64) + if isFloat != nil { + return nil, ErrIntegerOverflow + } + val = floatVersion + } + return val, nil +} + +// parseErrs is the function which handles all the errors that could occur +// through use of function arguments such as column names in NULLIF +func (reader *Input) parseErrs(columnNames []string, whereClause interface{}, alias string, myFuncs *SelectFuncs) error { + // Below code cleans up column names. + reader.processColumnNames(columnNames, alias) + if columnNames[0] != "*" { + if err := reader.colNameErrs(columnNames); err != nil { + return err + } + } + // Below code ensures the whereClause has no errors. + if whereClause != nil { + tempClause := whereClause + if err := reader.whereClauseNameErrs(tempClause, alias); err != nil { + return err + } + } + for i := 0; i < len(myFuncs.funcExpr); i++ { + if myFuncs.funcExpr[i] == nil { + continue + } + if err := reader.evaluateFuncErr(myFuncs.funcExpr[i]); err != nil { + return err + } + } + return nil +} diff --git a/pkg/s3select/input.go b/pkg/s3select/input.go new file mode 100644 index 000000000..8f37aed12 --- /dev/null +++ b/pkg/s3select/input.go @@ -0,0 +1,381 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3select + +import ( + "bytes" + "compress/bzip2" + "encoding/csv" + "encoding/xml" + "io" + "strconv" + "strings" + "time" + + "net/http" + + gzip "github.com/klauspost/pgzip" +) + +const ( + // progressTime is the time interval for which a progress message is sent. + progressTime time.Duration = 60 * time.Second + // continuationTime is the time interval for which a continuation message is + // sent. + continuationTime time.Duration = 5 * time.Second +) + +// progress represents a struct that represents the format for XML of the +// progress messages +type progress struct { + BytesScanned int64 `xml:"BytesScanned"` + BytesProcessed int64 `xml:"BytesProcessed"` + BytesReturned int64 `xml:"BytesReturned"` + Xmlns string `xml:"xmlns,attr"` +} + +// stats represents a struct that represents the format for XML of the stat +// messages +type stats struct { + BytesScanned int64 `xml:"BytesScanned"` + BytesProcessed int64 `xml:"BytesProcessed"` + BytesReturned int64 `xml:"BytesReturned"` + Xmlns string `xml:"xmlns,attr"` +} + +// StatInfo is a struct that represents the +type statInfo struct { + BytesScanned int64 + BytesReturned int64 + BytesProcessed int64 +} + +// Input represents a record producing input from a formatted file or pipe. +type Input struct { + options *Options + reader *csv.Reader + firstRow []string + header []string + minOutputLength int + stats *statInfo +} + +// Options options are passed to the underlying encoding/csv reader. +type Options struct { + // HasHeader when true, will treat the first row as a header row. + HasHeader bool + + // FieldDelimiter is the string that fields are delimited by. + FieldDelimiter string + + // Comments is the string the first character of a line of + // text matches the comment character. + Comments string + + // Name of the table that is used for querying + Name string + + // ReadFrom is where the data will be read from. + ReadFrom io.Reader + + // If true then we need to add gzip or bzip reader. + // to extract the csv. + Compressed string + + // SQL expression meant to be evaluated. + Expression string + + // What the outputted CSV will be delimited by . + OutputFieldDelimiter string + + // Size of incoming object + StreamSize int64 + + // Whether Header is "USE" or another + HeaderOpt bool +} + +// NewInput sets up a new Input, the first row is read when this is run. +// If there is a problem with reading the first row, the error is returned. +// Otherwise, the returned reader can be reliably consumed with ReadRecord() +// until ReadRecord() returns nil. +func NewInput(opts *Options) (*Input, error) { + myReader := opts.ReadFrom + var tempBytesScanned int64 + tempBytesScanned = 0 + if opts.Compressed == "GZIP" { + tempBytesScanned = opts.StreamSize + var err error + if myReader, err = gzip.NewReader(opts.ReadFrom); err != nil { + return nil, ErrTruncatedInput + } + } else if opts.Compressed == "BZIP2" { + tempBytesScanned = opts.StreamSize + myReader = bzip2.NewReader(opts.ReadFrom) + } + + progress := &statInfo{ + BytesScanned: tempBytesScanned, + BytesProcessed: 0, + BytesReturned: 0, + } + reader := &Input{ + options: opts, + reader: csv.NewReader(myReader), + stats: progress, + } + reader.firstRow = nil + + reader.reader.FieldsPerRecord = -1 + if reader.options.FieldDelimiter != "" { + reader.reader.Comma = rune(reader.options.FieldDelimiter[0]) + } + + if reader.options.Comments != "" { + reader.reader.Comment = rune(reader.options.Comments[0]) + } + + // QuoteCharacter - " (defaulted currently) + reader.reader.LazyQuotes = true + + if err := reader.readHeader(); err != nil { + return nil, err + } + + return reader, nil +} + +// ReadRecord reads a single record from the . Always returns successfully. +// If the record is empty, an empty []string is returned. +// Record expand to match the current row size, adding blank fields as needed. +// Records never return less then the number of fields in the first row. +// Returns nil on EOF +// In the event of a parse error due to an invalid record, it is logged, and +// an empty []string is returned with the number of fields in the first row, +// as if the record were empty. +// +// In general, this is a very tolerant of problems reader. +func (reader *Input) ReadRecord() []string { + var row []string + var fileErr error + + if reader.firstRow != nil { + row = reader.firstRow + reader.firstRow = nil + return row + } + + row, fileErr = reader.reader.Read() + emptysToAppend := reader.minOutputLength - len(row) + if fileErr == io.EOF || fileErr == io.ErrClosedPipe { + return nil + } else if _, ok := fileErr.(*csv.ParseError); ok { + emptysToAppend = reader.minOutputLength + } + + if emptysToAppend > 0 { + for counter := 0; counter < emptysToAppend; counter++ { + row = append(row, "") + } + } + + return row +} + +// convertMySQL Replaces double quote escape for column names with backtick for +// the MySQL parser +func convertMySQL(random string) string { + return strings.Replace(random, "\"", "`", len(random)) +} + +// readHeader reads the header into the header variable if the header is present +// as the first row of the csv +func (reader *Input) readHeader() error { + var readErr error + if reader.options.HasHeader { + reader.firstRow, readErr = reader.reader.Read() + if readErr != nil { + return ErrCSVParsingError + } + reader.header = reader.firstRow + reader.firstRow = nil + reader.minOutputLength = len(reader.header) + } else { + reader.firstRow, readErr = reader.reader.Read() + reader.header = make([]string, len(reader.firstRow)) + for i := 0; i < reader.minOutputLength; i++ { + reader.header[i] = strconv.Itoa(i) + } + + } + return nil +} + +// createStatXML is the function which does the marshaling from the stat +// structs into XML so that the progress and stat message can be sent +func (reader *Input) createStatXML() (string, error) { + if reader.options.Compressed == "NONE" { + reader.stats.BytesProcessed = reader.options.StreamSize + reader.stats.BytesScanned = reader.stats.BytesProcessed + } + statXML := stats{ + BytesScanned: reader.stats.BytesScanned, + BytesProcessed: reader.stats.BytesProcessed, + BytesReturned: reader.stats.BytesReturned, + Xmlns: "", + } + out, err := xml.Marshal(statXML) + if err != nil { + return "", err + } + return xml.Header + string(out), nil +} + +// createProgressXML is the function which does the marshaling from the progress structs into XML so that the progress and stat message can be sent +func (reader *Input) createProgressXML() (string, error) { + if reader.options.HasHeader { + reader.stats.BytesProcessed += processSize(reader.header) + } + if !(reader.options.Compressed != "NONE") { + reader.stats.BytesScanned = reader.stats.BytesProcessed + } + progressXML := &progress{ + BytesScanned: reader.stats.BytesScanned, + BytesProcessed: reader.stats.BytesProcessed, + BytesReturned: reader.stats.BytesReturned, + Xmlns: "", + } + out, err := xml.Marshal(progressXML) + if err != nil { + return "", err + } + return xml.Header + string(out), nil +} + +// Header returns the header of the reader. Either the first row if a header +// set in the options, or c#, where # is the column number, starting with 0. +func (reader *Input) Header() []string { + return reader.header +} + +// Row is a Struct for keeping track of key aspects of a row. +type Row struct { + record string + err error +} + +// Execute is the function where all the blocking occurs, It writes to the HTTP +// response writer in a streaming fashion so that the client can actively use +// the results before the query is finally finished executing. The +func (reader *Input) Execute(writer io.Writer) error { + myRow := make(chan *Row) + curBuf := bytes.NewBuffer(make([]byte, 1000000)) + curBuf.Reset() + progressTicker := time.NewTicker(progressTime) + continuationTimer := time.NewTimer(continuationTime) + defer progressTicker.Stop() + defer continuationTimer.Stop() + go reader.runSelectParser(convertMySQL(reader.options.Expression), myRow) + for { + select { + case row, ok := <-myRow: + if ok && row.err != nil { + errorMessage := reader.writeErrorMessage(row.err, curBuf) + _, err := errorMessage.WriteTo(writer) + flusher, okFlush := writer.(http.Flusher) + if okFlush { + flusher.Flush() + } + if err != nil { + return err + } + curBuf.Reset() + close(myRow) + return nil + } else if ok { + message := reader.writeRecordMessage(row.record, curBuf) + _, err := message.WriteTo(writer) + flusher, okFlush := writer.(http.Flusher) + if okFlush { + flusher.Flush() + } + if err != nil { + return err + } + curBuf.Reset() + reader.stats.BytesReturned += int64(len(row.record)) + if !continuationTimer.Stop() { + <-continuationTimer.C + } + continuationTimer.Reset(continuationTime) + } else if !ok { + statPayload, err := reader.createStatXML() + if err != nil { + return err + } + statMessage := reader.writeStatMessage(statPayload, curBuf) + _, err = statMessage.WriteTo(writer) + flusher, ok := writer.(http.Flusher) + if ok { + flusher.Flush() + } + if err != nil { + return err + } + curBuf.Reset() + message := reader.writeEndMessage(curBuf) + _, err = message.WriteTo(writer) + flusher, ok = writer.(http.Flusher) + if ok { + flusher.Flush() + } + if err != nil { + return err + } + return nil + } + + case <-progressTicker.C: + progressPayload, err := reader.createProgressXML() + if err != nil { + return err + } + progressMessage := reader.writeProgressMessage(progressPayload, curBuf) + _, err = progressMessage.WriteTo(writer) + flusher, ok := writer.(http.Flusher) + if ok { + flusher.Flush() + } + if err != nil { + return err + } + curBuf.Reset() + case <-continuationTimer.C: + message := reader.writeContinuationMessage(curBuf) + _, err := message.WriteTo(writer) + flusher, ok := writer.(http.Flusher) + if ok { + flusher.Flush() + } + if err != nil { + return err + } + curBuf.Reset() + continuationTimer.Reset(continuationTime) + } + } +} diff --git a/pkg/s3select/output.go b/pkg/s3select/output.go new file mode 100644 index 000000000..b40048e69 --- /dev/null +++ b/pkg/s3select/output.go @@ -0,0 +1,460 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// DO NOT EDIT THIS PACKAGE DIRECTLY: This follows the protocol defined by +// AmazonS3 found at +// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html +// Consult the Spec before making direct edits. + +package s3select + +import ( + "bytes" + "encoding/binary" + "hash/crc32" +) + +// Record Headers +// -11 -event type - 7 - 7 "Records" +// -13 -content-type -7 -24 "application/octet-stream" +// -13 -message-type -7 5 "event" +// This is predefined from AMZ protocol found here: +// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html +var recordHeaders []byte + +// End Headers +// -13 -message-type -7 -5 "event" +// -11 -:event-type -7 -3 "End" +// This is predefined from AMZ protocol found here: +// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html +var endHeaders []byte + +// Continuation Headers +// -13 -message-type -7 -5 "event" +// -11 -:event-type -7 -4 "Cont" +// This is predefined from AMZ protocol found here: +// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html +var contHeaders []byte + +// Stat Headers +// -11 -event type - 7 - 5 "Stat" -20 +// -13 -content-type -7 -8 "text/xml" -25 +// -13 -message-type -7 -5 "event" -22 +// This is predefined from AMZ protocol found here: +// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html +var statHeaders []byte + +// Progress Headers +// -11 -event type - 7 - 8 "Progress" -23 +// -13 -content-type -7 -8 "text/xml" -25 +// -13 -message-type -7 -5 "event" -22 +// This is predefined from AMZ protocol found here: +// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html +var progressHeaders []byte + +// The length of the nonvariable portion of the ErrHeaders +// The below are the specifications of the header for a "error" event +// -11 -error-code - 7 - DEFINED "DEFINED" +// -14 -error-message -7 -DEFINED "DEFINED" +// -13 -message-type -7 -5 "error" +// This is predefined from AMZ protocol found here: +// https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html +var errHdrLen int + +func init() { + recordHeaders = writeRecordHeader() + endHeaders = writeEndHeader() + contHeaders = writeContHeader() + statHeaders = writeStatHeader() + progressHeaders = writeProgressHeader() + errHdrLen = 55 + +} + +// encodeString encodes a string in a []byte, lenBytes is the number of bytes +// used to encode the length of the string. +func encodeHeaderStringValue(s string) []byte { + n := uint16(len(s)) + lenSlice := make([]byte, 2) + binary.BigEndian.PutUint16(lenSlice[0:], n) + return append(lenSlice, []byte(s)...) +} +func encodeHeaderStringName(s string) []byte { + lenSlice := make([]byte, 1) + lenSlice[0] = byte(len(s)) + return append(lenSlice, []byte(s)...) +} + +// encodeNumber encodes a number in a []byte, lenBytes is the number of bytes +// used to encode the length of the string. +func encodeNumber(n byte, lenBytes int) []byte { + lenSlice := make([]byte, lenBytes) + lenSlice[0] = n + return lenSlice +} + +// writePayloadSize writes the 4byte payload size portion of the protocol. +func writePayloadSize(payloadSize int, headerLength int) []byte { + totalByteLen := make([]byte, 4) + totalMsgLen := uint32(payloadSize + headerLength + 16) + binary.BigEndian.PutUint32(totalByteLen, totalMsgLen) + return totalByteLen +} + +// writeHeaderSize writes the 4byte header size portion of the protocol. +func writeHeaderSize(headerLength int) []byte { + totalHeaderLen := make([]byte, 4) + totalLen := uint32(headerLength) + binary.BigEndian.PutUint32(totalHeaderLen, totalLen) + return totalHeaderLen +} + +// writeCRC writes the CRC for both the prelude and and the end of the protocol. +func writeCRC(myBuffer []byte) []byte { + // Calculate the CRC here: + myCRC := make([]byte, 4) + cksum := crc32.ChecksumIEEE(myBuffer) + binary.BigEndian.PutUint32(myCRC, cksum) + return myCRC +} + +// writePayload writes the Payload for those protocols which the Payload is +// necessary. +func writePayload(myPayload string) []byte { + convertedPayload := []byte(myPayload) + payloadStore := make([]byte, len(convertedPayload)) + copy(payloadStore[0:], myPayload) + return payloadStore +} + +// writeRecordHeader is a function which writes the headers for the continuation +// Message +func writeRecordHeader() []byte { + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + var currentMessage = &bytes.Buffer{} + // 11 -event type - 7 - 7 "Records" + // header name + currentMessage.Write(encodeHeaderStringName(":event-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("Records")) + // Creation of the Header for Content-Type // 13 -content-type -7 -24 + // "application/octet-stream" + // header name + currentMessage.Write(encodeHeaderStringName(":content-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("application/octet-stream")) + // Creation of the Header for message-type 13 -message-type -7 5 "event" + // header name + currentMessage.Write(encodeHeaderStringName(":message-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("event")) + return currentMessage.Bytes() +} + +// writeEndHeader is a function which writes the headers for the continuation +// Message +func writeEndHeader() []byte { + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + var currentMessage = &bytes.Buffer{} + // header name + currentMessage.Write(encodeHeaderStringName(":event-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("End")) + + // Creation of the Header for message-type 13 -message-type -7 5 "event" + // header name + currentMessage.Write(encodeHeaderStringName(":message-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("event")) + return currentMessage.Bytes() +} + +// writeContHeader is a function which writes the headers for the continuation +// Message +func writeContHeader() []byte { + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + var currentMessage = &bytes.Buffer{} + // header name + currentMessage.Write(encodeHeaderStringName(":event-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("Cont")) + + // Creation of the Header for message-type 13 -message-type -7 5 "event" + // header name + currentMessage.Write(encodeHeaderStringName(":message-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("event")) + return currentMessage.Bytes() + +} + +// writeStatHeader is a function which writes the headers for the Stat +// Message +func writeStatHeader() []byte { + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + var currentMessage = &bytes.Buffer{} + // header name + currentMessage.Write(encodeHeaderStringName(":event-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("Stats")) + // Creation of the Header for Content-Type // 13 -content-type -7 -8 + // "text/xml" + // header name + currentMessage.Write(encodeHeaderStringName(":content-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("text/xml")) + + // Creation of the Header for message-type 13 -message-type -7 5 "event" + currentMessage.Write(encodeHeaderStringName(":message-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("event")) + return currentMessage.Bytes() + +} + +// writeProgressHeader is a function which writes the headers for the Progress +// Message +func writeProgressHeader() []byte { + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + var currentMessage = &bytes.Buffer{} + // header name + currentMessage.Write(encodeHeaderStringName(":event-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("Progress")) + // Creation of the Header for Content-Type // 13 -content-type -7 -8 + // "text/xml" + // header name + currentMessage.Write(encodeHeaderStringName(":content-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("text/xml")) + + // Creation of the Header for message-type 13 -message-type -7 5 "event" + // header name + currentMessage.Write(encodeHeaderStringName(":message-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("event")) + return currentMessage.Bytes() + +} + +// writeRecordMessage is the function which constructs the binary message for a +// record message to be sent. +func (csvOutput *Input) writeRecordMessage(payload string, currentMessage *bytes.Buffer) *bytes.Buffer { + // The below are the specifications of the header for a "record" event + // 11 -event type - 7 - 7 "Records" + // 13 -content-type -7 -24 "application/octet-stream" + // 13 -message-type -7 5 "event" + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + headerLen := len(recordHeaders) + // Writes the total size of the message. + currentMessage.Write(writePayloadSize(len(payload), headerLen)) + // Writes the total size of the header. + currentMessage.Write(writeHeaderSize(headerLen)) + // Writes the CRC of the Prelude + currentMessage.Write(writeCRC(currentMessage.Bytes())) + currentMessage.Write(recordHeaders) + + // This part is where the payload is written, this will be only one row, since + // we're sending one message at a types + currentMessage.Write(writePayload(payload)) + + // Now we do a CRC check on the entire messages + currentMessage.Write(writeCRC(currentMessage.Bytes())) + return currentMessage + +} + +// writeContinuationMessage is the function which constructs the binary message +// for a continuation message to be sent. +func (csvOutput *Input) writeContinuationMessage(currentMessage *bytes.Buffer) *bytes.Buffer { + // 11 -event type - 7 - 4 "Cont" + // 13 -message-type -7 5 "event" + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + headerLen := len(contHeaders) + currentMessage.Write(writePayloadSize(0, headerLen)) + + currentMessage.Write(writeHeaderSize(headerLen)) + + // Calculate the Prelude CRC here: + currentMessage.Write(writeCRC(currentMessage.Bytes())) + + currentMessage.Write(contHeaders) + + //Now we do a CRC check on the entire messages + currentMessage.Write(writeCRC(currentMessage.Bytes())) + return currentMessage + +} + +// writeEndMessage is the function which constructs the binary message +// for a end message to be sent. +func (csvOutput *Input) writeEndMessage(currentMessage *bytes.Buffer) *bytes.Buffer { + // 11 -event type - 7 - 3 "End" + // 13 -message-type -7 5 "event" + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + headerLen := len(endHeaders) + currentMessage.Write(writePayloadSize(0, headerLen)) + + currentMessage.Write(writeHeaderSize(headerLen)) + + //Calculate the Prelude CRC here: + currentMessage.Write(writeCRC(currentMessage.Bytes())) + + currentMessage.Write(endHeaders) + + // Now we do a CRC check on the entire messages + currentMessage.Write(writeCRC(currentMessage.Bytes())) + return currentMessage + +} + +// writeStateMessage is the function which constructs the binary message for a +// state message to be sent. +func (csvOutput *Input) writeStatMessage(payload string, currentMessage *bytes.Buffer) *bytes.Buffer { + // 11 -event type - 7 - 5 "Stat" 20 + // 13 -content-type -7 -8 "text/xml" 25 + // 13 -message-type -7 5 "event" 22 + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + headerLen := len(statHeaders) + + currentMessage.Write(writePayloadSize(len(payload), headerLen)) + + currentMessage.Write(writeHeaderSize(headerLen)) + + currentMessage.Write(writeCRC(currentMessage.Bytes())) + + currentMessage.Write(statHeaders) + + // This part is where the payload is written, this will be only one row, since + // we're sending one message at a types + currentMessage.Write(writePayload(payload)) + + // Now we do a CRC check on the entire messages + currentMessage.Write(writeCRC(currentMessage.Bytes())) + return currentMessage + +} + +// writeProgressMessage is the function which constructs the binary message for +// a progress message to be sent. +func (csvOutput *Input) writeProgressMessage(payload string, currentMessage *bytes.Buffer) *bytes.Buffer { + // The below are the specifications of the header for a "Progress" event + // 11 -event type - 7 - 8 "Progress" 23 + // 13 -content-type -7 -8 "text/xml" 25 + // 13 -message-type -7 5 "event" 22 + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + headerLen := len(progressHeaders) + + currentMessage.Write(writePayloadSize(len(payload), headerLen)) + + currentMessage.Write(writeHeaderSize(headerLen)) + + currentMessage.Write(writeCRC(currentMessage.Bytes())) + + currentMessage.Write(progressHeaders) + + // This part is where the payload is written, this will be only one row, since + // we're sending one message at a types + currentMessage.Write(writePayload(payload)) + + // Now we do a CRC check on the entire messages + currentMessage.Write(writeCRC(currentMessage.Bytes())) + return currentMessage + +} + +// writeErrorMessage is the function which constructs the binary message for a +// error message to be sent. +func (csvOutput *Input) writeErrorMessage(errorMessage error, currentMessage *bytes.Buffer) *bytes.Buffer { + + // The below are the specifications of the header for a "error" event + // 11 -error-code - 7 - DEFINED "DEFINED" + // 14 -error-message -7 -DEFINED "DEFINED" + // 13 -message-type -7 5 "error" + // This is predefined from AMZ protocol found here: + // https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectSELECTContent.html + sizeOfErrorCode := len(errorCodeResponse[errorMessage]) + sizeOfErrorMessage := len(errorMessage.Error()) + headerLen := errHdrLen + sizeOfErrorCode + sizeOfErrorMessage + + currentMessage.Write(writePayloadSize(0, headerLen)) + + currentMessage.Write(writeHeaderSize(headerLen)) + + currentMessage.Write(writeCRC(currentMessage.Bytes())) + // header name + currentMessage.Write(encodeHeaderStringName(":error-code")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue(errorCodeResponse[errorMessage])) + + // 14 -error-message -7 -DEFINED "DEFINED" + + // header name + currentMessage.Write(encodeHeaderStringName(":error-message")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue(errorMessage.Error())) + // Creation of the Header for message-type 13 -message-type -7 5 "error" + // header name + currentMessage.Write(encodeHeaderStringName(":message-type")) + // header type + currentMessage.Write(encodeNumber(7, 1)) + // header value and header value length + currentMessage.Write(encodeHeaderStringValue("error")) + + // Now we do a CRC check on the entire messages + currentMessage.Write(writeCRC(currentMessage.Bytes())) + return currentMessage + +} diff --git a/pkg/s3select/select.go b/pkg/s3select/select.go new file mode 100644 index 000000000..e3c79d8e4 --- /dev/null +++ b/pkg/s3select/select.go @@ -0,0 +1,415 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3select + +import ( + "math" + "strconv" + "strings" + + "github.com/xwb1989/sqlparser" +) + +// SelectFuncs contains the relevant values from the parser for S3 Select +// Functions +type SelectFuncs struct { + funcExpr []*sqlparser.FuncExpr + index []int +} + +// RunSqlParser allows us to easily bundle all the functions from above and run +// them in the appropriate order. +func (reader *Input) runSelectParser(selectExpression string, myRow chan *Row) { + reqCols, alias, myLimit, whereClause, aggFunctionNames, myFuncs, myErr := reader.ParseSelect(selectExpression) + if myErr != nil { + rowStruct := &Row{ + err: myErr, + } + myRow <- rowStruct + return + } + reader.processSelectReq(reqCols, alias, whereClause, myLimit, aggFunctionNames, myRow, myFuncs) +} + +// ParseSelect parses the SELECT expression, and effectively tokenizes it into +// its separate parts. It returns the requested column names,alias,limit of +// records, and the where clause. +func (reader *Input) ParseSelect(sqlInput string) ([]string, string, int, interface{}, []string, *SelectFuncs, error) { + // return columnNames, alias, limitOfRecords, whereclause,coalStore, nil + + stmt, err := sqlparser.Parse(sqlInput) + var whereClause interface{} + var alias string + var limit int + myFuncs := &SelectFuncs{} + // TODO Maybe can parse their errors a bit to return some more of the s3 errors + if err != nil { + return nil, "", 0, nil, nil, nil, ErrLexerInvalidChar + } + switch stmt := stmt.(type) { + case *sqlparser.Select: + // evaluates the where clause + functionNames := make([]string, len(stmt.SelectExprs)) + columnNames := make([]string, len(stmt.SelectExprs)) + + if stmt.Where != nil { + switch expr := stmt.Where.Expr.(type) { + default: + whereClause = expr + case *sqlparser.ComparisonExpr: + whereClause = expr + } + } + if stmt.SelectExprs != nil { + for i := 0; i < len(stmt.SelectExprs); i++ { + switch expr := stmt.SelectExprs[i].(type) { + case *sqlparser.StarExpr: + columnNames[0] = "*" + case *sqlparser.AliasedExpr: + switch smallerexpr := expr.Expr.(type) { + case *sqlparser.FuncExpr: + if smallerexpr.IsAggregate() { + functionNames[i] = smallerexpr.Name.CompliantName() + // Will return function name + // Case to deal with if we have functions and not an asterix + switch tempagg := smallerexpr.Exprs[0].(type) { + case *sqlparser.StarExpr: + columnNames[0] = "*" + if smallerexpr.Name.CompliantName() != "count" { + return nil, "", 0, nil, nil, nil, ErrParseUnsupportedCallWithStar + } + case *sqlparser.AliasedExpr: + switch col := tempagg.Expr.(type) { + case *sqlparser.BinaryExpr: + return nil, "", 0, nil, nil, nil, ErrParseNonUnaryAgregateFunctionCall + case *sqlparser.ColName: + columnNames[i] = col.Name.CompliantName() + } + } + // Case to deal with if COALESCE was used.. + } else if supportedFunc(smallerexpr.Name.CompliantName()) { + if myFuncs.funcExpr == nil { + myFuncs.funcExpr = make([]*sqlparser.FuncExpr, len(stmt.SelectExprs)) + myFuncs.index = make([]int, len(stmt.SelectExprs)) + } + myFuncs.funcExpr[i] = smallerexpr + myFuncs.index[i] = i + } else { + return nil, "", 0, nil, nil, nil, ErrUnsupportedSQLOperation + } + case *sqlparser.ColName: + columnNames[i] = smallerexpr.Name.CompliantName() + } + } + } + } + + // This code retrieves the alias and makes sure it is set to the correct + // value, if not it sets it to the tablename + if (stmt.From) != nil { + for i := 0; i < len(stmt.From); i++ { + switch smallerexpr := stmt.From[i].(type) { + case *sqlparser.JoinTableExpr: + return nil, "", 0, nil, nil, nil, ErrParseMalformedJoin + case *sqlparser.AliasedTableExpr: + alias = smallerexpr.As.CompliantName() + if alias == "" { + alias = sqlparser.GetTableName(smallerexpr.Expr).CompliantName() + } + } + } + } + if stmt.Limit != nil { + switch expr := stmt.Limit.Rowcount.(type) { + case *sqlparser.SQLVal: + // The Value of how many rows we're going to limit by + limit, _ = strconv.Atoi(string(expr.Val[:])) + } + } + if stmt.GroupBy != nil { + return nil, "", 0, nil, nil, nil, ErrParseUnsupportedLiteralsGroupBy + } + if stmt.OrderBy != nil { + return nil, "", 0, nil, nil, nil, ErrParseUnsupportedToken + } + if err := reader.parseErrs(columnNames, whereClause, alias, myFuncs); err != nil { + return nil, "", 0, nil, nil, nil, err + } + return columnNames, alias, limit, whereClause, functionNames, myFuncs, nil + } + return nil, "", 0, nil, nil, nil, nil +} + +// This is the main function, It goes row by row and for records which validate +// the where clause it currently prints the appropriate row given the requested +// columns. +func (reader *Input) processSelectReq(reqColNames []string, alias string, whereClause interface{}, limitOfRecords int, functionNames []string, myRow chan *Row, myFunc *SelectFuncs) { + counter := -1 + filtrCount := 0 + functionFlag := false + // My values is used to store our aggregation values if we need to store them. + myAggVals := make([]float64, len(reqColNames)) + var columns []string + // LowercasecolumnsMap is used in accordance with hasDuplicates so that we can + // raise the error "Ambigious" if a case insensitive column is provided and we + // have multiple matches. + lowercaseColumnsMap := make(map[string]int) + hasDuplicates := make(map[string]bool) + // ColumnsMap stores our columns and their index. + columnsMap := make(map[string]int) + if limitOfRecords == 0 { + limitOfRecords = math.MaxInt64 + } + + for { + record := reader.ReadRecord() + reader.stats.BytesProcessed += processSize(record) + if record == nil { + if functionFlag { + rowStruct := &Row{ + record: reader.aggFuncToStr(myAggVals) + "\n", + } + myRow <- rowStruct + } + close(myRow) + return + } + if counter == -1 && reader.options.HeaderOpt && len(reader.header) > 0 { + columns = reader.Header() + myErr := checkForDuplicates(columns, columnsMap, hasDuplicates, lowercaseColumnsMap) + if myErr != nil { + rowStruct := &Row{ + err: myErr, + } + myRow <- rowStruct + return + } + } else if counter == -1 && len(reader.header) > 0 { + columns = reader.Header() + } + // When we have reached our limit, on what the user specified as the number + // of rows they wanted, we terminate our interpreter. + if filtrCount == limitOfRecords && limitOfRecords != 0 { + close(myRow) + return + } + // The call to the where function clause,ensures that the rows we print match our where clause. + condition, myErr := matchesMyWhereClause(record, columnsMap, alias, whereClause) + if myErr != nil { + rowStruct := &Row{ + err: myErr, + } + myRow <- rowStruct + return + } + if condition { + // if its an asterix we just print everything in the row + if reqColNames[0] == "*" && functionNames[0] == "" { + rowStruct := &Row{ + record: reader.printAsterix(record) + "\n", + } + myRow <- rowStruct + } else if alias != "" { + // This is for dealing with the case of if we have to deal with a + // request for a column with an index e.g A_1. + if representsInt(reqColNames[0]) { + // This checks whether any aggregation function was called as now we + // no longer will go through printing each row, and only print at the + // end + if len(functionNames) > 0 && functionNames[0] != "" { + functionFlag = true + aggregationFunctions(counter, filtrCount, myAggVals, columnsMap, reqColNames, functionNames, record) + } else { + // The code below finds the appropriate columns of the row given the + // indicies provided in the SQL request and utilizes the map to + // retrieve the correct part of the row. + myQueryRow, myErr := reader.processColNameIndex(record, reqColNames, columns) + if myErr != nil { + rowStruct := &Row{ + err: myErr, + } + myRow <- rowStruct + return + } + rowStruct := &Row{ + record: myQueryRow + "\n", + } + myRow <- rowStruct + } + } else { + // This code does aggregation if we were provided column names in the + // form of acutal names rather an indices. + if len(functionNames) > 0 && functionNames[0] != "" { + functionFlag = true + aggregationFunctions(counter, filtrCount, myAggVals, columnsMap, reqColNames, functionNames, record) + } else { + // This code prints the appropriate part of the row given the filter + // and select request, if the select request was based on column + // names rather than indices. + myQueryRow, myErr := reader.processColNameLiteral(record, reqColNames, columns, columnsMap, myFunc) + if myErr != nil { + rowStruct := &Row{ + err: myErr, + } + myRow <- rowStruct + return + } + rowStruct := &Row{ + record: myQueryRow + "\n", + } + myRow <- rowStruct + } + } + } + filtrCount++ + } + counter++ + } +} + +// printAsterix helps to print out the entire row if an asterix is used. +func (reader *Input) printAsterix(record []string) string { + myRow := record[0] + for i := 1; i < len(record); i++ { + myRow = myRow + reader.options.OutputFieldDelimiter + record[i] + } + return myRow +} + +// processColumnNames is a function which allows for cleaning of column names. +func (reader *Input) processColumnNames(reqColNames []string, alias string) error { + for i := 0; i < len(reqColNames); i++ { + // The code below basically cleans the column name of its alias and other + // syntax, so that we can extract its pure name. + reqColNames[i] = cleanCol(reqColNames[i], alias) + } + return nil +} + +// processColNameIndex is the function which creates the row for an index based +// query. +func (reader *Input) processColNameIndex(record []string, reqColNames []string, columns []string) (string, error) { + myRow := "" + for i := 0; i < len(reqColNames); i++ { + // COALESCE AND NULLIF do not support index based access. + if reqColNames[0] == "0" { + return "", ErrInvalidColumnIndex + } + // Subtract 1 because AWS Indexing is not 0 based, it starts at 1. + mytempindex, err := strconv.Atoi(reqColNames[i]) + mytempindex = mytempindex - 1 + if mytempindex > len(columns) { + return "", ErrInvalidColumnIndex + } + myRow = writeRow(myRow, record[mytempindex], reader.options.OutputFieldDelimiter, len(reqColNames)) + if err != nil { + return "", ErrMissingHeaders + } + } + if len(myRow) > 1000000 { + return "", ErrOverMaxRecordSize + } + if strings.Count(myRow, reader.options.OutputFieldDelimiter) != len(reqColNames)-1 { + myRow = qualityCheck(myRow, len(reqColNames)-1-strings.Count(myRow, reader.options.OutputFieldDelimiter), reader.options.OutputFieldDelimiter) + } + return myRow, nil +} + +// processColNameLiteral is the function which creates the row for an name based +// query. +func (reader *Input) processColNameLiteral(record []string, reqColNames []string, columns []string, columnsMap map[string]int, myFunc *SelectFuncs) (string, error) { + myRow := "" + for i := 0; i < len(reqColNames); i++ { + // this is the case to deal with COALESCE. + if reqColNames[i] == "" && isValidFunc(myFunc.index, i) { + myVal := evaluateFuncExpr(myFunc.funcExpr[i], "", record, columnsMap) + myRow = writeRow(myRow, myVal, reader.options.OutputFieldDelimiter, len(reqColNames)) + continue + } + myTempIndex, notFound := columnsMap[trimQuotes(reqColNames[i])] + if !notFound { + return "", ErrMissingHeaders + } + myRow = writeRow(myRow, record[myTempIndex], reader.options.OutputFieldDelimiter, len(reqColNames)) + } + if len(myRow) > 1000000 { + return "", ErrOverMaxRecordSize + } + if strings.Count(myRow, reader.options.OutputFieldDelimiter) != len(reqColNames)-1 { + myRow = qualityCheck(myRow, len(reqColNames)-1-strings.Count(myRow, reader.options.OutputFieldDelimiter), reader.options.OutputFieldDelimiter) + } + return myRow, nil + +} + +// aggregationFunctions is a function which performs the actual aggregation +// methods on the given row, it uses an array defined the the main parsing +// function to keep track of values. +func aggregationFunctions(counter int, filtrCount int, myAggVals []float64, columnsMap map[string]int, storeReqCols []string, storeFunctions []string, record []string) error { + for i := 0; i < len(storeFunctions); i++ { + if storeFunctions[i] == "" { + i++ + } else if storeFunctions[i] == "count" { + myAggVals[i]++ + } else { + // If column names are provided as an index it'll use this if statement instead of the else/ + var convAggFloat float64 + if representsInt(storeReqCols[i]) { + myIndex, _ := strconv.Atoi(storeReqCols[i]) + convAggFloat, _ = strconv.ParseFloat(record[myIndex], 64) + + } else { + // case that the columns are in the form of named columns rather than indices. + convAggFloat, _ = strconv.ParseFloat(record[columnsMap[trimQuotes(storeReqCols[i])]], 64) + + } + // This if statement is for calculating the min. + if storeFunctions[i] == "min" { + if counter == -1 { + myAggVals[i] = math.MaxFloat64 + } + if convAggFloat < myAggVals[i] { + myAggVals[i] = convAggFloat + } + + } else if storeFunctions[i] == "max" { + // This if statement is for calculating the max. + if counter == -1 { + myAggVals[i] = math.SmallestNonzeroFloat64 + } + if convAggFloat > myAggVals[i] { + myAggVals[i] = convAggFloat + } + + } else if storeFunctions[i] == "sum" { + // This if statement is for calculating the sum. + myAggVals[i] += convAggFloat + + } else if storeFunctions[i] == "avg" { + // This if statement is for calculating the average. + if filtrCount == 0 { + myAggVals[i] = convAggFloat + } else { + myAggVals[i] = (convAggFloat + (myAggVals[i] * float64(filtrCount))) / float64((filtrCount + 1)) + } + } else { + return ErrParseNonUnaryAgregateFunctionCall + } + } + } + return nil +} diff --git a/pkg/s3select/select_test.go b/pkg/s3select/select_test.go new file mode 100644 index 000000000..20125dc85 --- /dev/null +++ b/pkg/s3select/select_test.go @@ -0,0 +1,1070 @@ +/* + * Minio Cloud Storage, (C) 2018 Minio, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package s3select + +import ( + "bytes" + "fmt" + "reflect" + "testing" +) + +// Unit Test for the checkForDuplicates function. +func TestCheckForDuplicates(t *testing.T) { + tables := []struct { + myReq []string + myHeaders map[string]int + myDup map[string]bool + myLow map[string]int + myErr error + }{ + {[]string{"name", "id", "last_name", "last_name"}, make(map[string]int), make(map[string]bool), make(map[string]int), ErrAmbiguousFieldName}, + {[]string{"name", "id", "last_name", "another_name"}, make(map[string]int), make(map[string]bool), make(map[string]int), nil}, + } + for _, table := range tables { + err := checkForDuplicates(table.myReq, table.myHeaders, table.myDup, table.myLow) + if err != table.myErr { + t.Error() + } + } +} + +// Test for the function which processes columnnames to make sure that they are +// compatible with spaces. +func TestMyProcessing(t *testing.T) { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("Here , is, a, string + \n + random,random,stuff,stuff ")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + tables := []struct { + myReq []string + myHeaders map[string]int + myDup map[string]bool + myLow map[string]int + myOpts *Options + input *Input + length int + testOutput string + myErr error + }{ + {[]string{"name", "id", "last_name", "CAST"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, s3s, 4, "CAST", nil}, + {[]string{"name", "id", "last_name", "another_name"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, s3s, 4, "another_name", nil}, + {[]string{"name", "id", "last_name", "another_name"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, s3s, 4, "another_name", nil}, + {[]string{"name", "id", "random_name", "fame_name", "another_col"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, s3s, 5, "fame_name", nil}, + } + for _, table := range tables { + err = checkForDuplicates(table.myReq, table.myHeaders, table.myDup, table.myLow) + if err != table.myErr { + t.Error() + } + if len(table.myReq) != table.length { + t.Errorf("UnexpectedError") + } + if table.myReq[3] != table.testOutput { + t.Error() + } + } +} + +// TestMyRowIndexResults is a unit test which makes sure that the rows that are +// being printed are appropriate to the query being requested. +func TestMyRowIndexResults(t *testing.T) { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("Here , is, a, string + \n + random,random,stuff,stuff ")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + tables := []struct { + myReq []string + myHeaders map[string]int + myDup map[string]bool + myLow map[string]int + myOpts *Options + input *Input + myRecord []string + myTarget string + myAsterix string + columns []string + err error + }{ + {[]string{"1", "2"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, s3s, []string{"target", "random", "hello", "stuff"}, "target,random", "target,random,hello,stuff", []string{"1", "2", "3", "4"}, nil}, + {[]string{"2", "3", "4"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, s3s, []string{"random", "hullo", "thing", "stuff"}, "hullo,thing,stuff", "random,hullo,thing,stuff", []string{"1", "2", "3", "4"}, nil}, + {[]string{"3", "2"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, s3s, []string{"random", "hullo", "thing", "stuff"}, "thing,hullo", "random,hullo,thing,stuff", []string{"1", "2", "3", "4"}, nil}, + {[]string{"11", "1"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, s3s, []string{"random", "hullo", "thing", "stuff"}, "", "random,hullo,thing,stuff", []string{"1", "2", "3", "4"}, ErrInvalidColumnIndex}, + } + for _, table := range tables { + checkForDuplicates(table.columns, table.myHeaders, table.myDup, table.myLow) + myRow, err := s3s.processColNameIndex(table.myRecord, table.myReq, table.columns) + if err != table.err { + t.Error() + } + if myRow != table.myTarget { + t.Error() + } + myRow = table.input.printAsterix(table.myRecord) + if myRow != table.myAsterix { + t.Error() + } + } +} + +// TestMyHelperFunctions is a unit test which tests some small helper string +// functions. +func TestMyHelperFunctions(t *testing.T) { + tables := []struct { + myReq string + myList []string + myIndex int + expected bool + }{ + {"test1", []string{"test1", "test2", "test3", "test4", "test5"}, 0, true}, + {"random", []string{"test1", "test2", "test3", "test4", "test5"}, -1, false}, + {"test3", []string{"test1", "test2", "test3", "test4", "test5"}, 2, true}, + } + for _, table := range tables { + if stringInSlice(table.myReq, table.myList) != table.expected { + t.Error() + } + if stringIndex(table.myReq, table.myList) != table.myIndex { + t.Error() + } + } +} + +// TestMyStateMachine is a unit test which ensures that the lowest level of the +// interpreter is converting properly. +func TestMyStateMachine(t *testing.T) { + tables := []struct { + operand interface{} + operator string + leftArg string + err error + expected bool + }{ + {"2005", ">", "2012", nil, true}, + {2005, ">", "2012", nil, true}, + {2012.0000, ">", "2014.000", nil, true}, + {"NA", ">", "2014.000", nil, false}, + {2014, ">", "Random", nil, false}, + {"test3", ">", "aandom", nil, false}, + } + for _, table := range tables { + val, err := evaluateOperator(table.leftArg, table.operator, table.operand) + if err != table.err { + t.Error() + } + if val != table.expected { + t.Error() + } + } +} + +// TestMyOperators is a unit test which ensures that the appropriate values are +// being returned from the operators functions. +func TestMyOperators(t *testing.T) { + tables := []struct { + operator string + err error + }{ + {">", nil}, + {"%", ErrParseUnknownOperator}, + } + for _, table := range tables { + err := checkValidOperator(table.operator) + if err != table.err { + t.Error() + } + } +} + +// TestMyConversion ensures that the conversion of the value from the csv +// happens correctly. +func TestMyConversion(t *testing.T) { + tables := []struct { + myTblVal string + expected reflect.Kind + }{ + {"2014", reflect.Int}, + {"2014.000", reflect.Float64}, + {"String!!!", reflect.String}, + } + for _, table := range tables { + val := reflect.ValueOf(checkStringType(table.myTblVal)).Kind() + if val != table.expected { + t.Error() + } + } +} + +// Unit Tests for Parser. +func TestMyParser(t *testing.T) { + tables := []struct { + myQuery string + err error + reqCols []string + alias string + myLimit int + aggFuncs []string + header []string + }{ + {"SELECT * FROM S3OBJECT", nil, []string{"*"}, "S3OBJECT", 0, make([]string, 1), []string{"name1", "name2", "name3", "name4"}}, + {"SELECT * FROM S3OBJECT AS A", nil, []string{"*"}, "A", 0, make([]string, 1), []string{"name1", "name2", "name3", "name4"}}, + {"SELECT col_name FROM S3OBJECT AS A", nil, []string{"col_name"}, "A", 0, make([]string, 1), []string{"col_name", "name2", "name3", "name4"}}, + {"SELECT col_name,col_other FROM S3OBJECT AS A LIMIT 5", nil, []string{"col_name", "col_other"}, "A", 5, make([]string, 2), []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT col_name,col_other FROM S3OBJECT AS A WHERE col_name = 'Name' LIMIT 5", nil, []string{"col_name", "col_other"}, "A", 5, make([]string, 2), []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT col_name,col_other FROM S3OBJECT AS A WHERE col_name = 'Name LIMIT 5", ErrLexerInvalidChar, nil, "", 0, nil, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT count(*) FROM S3OBJECT AS A WHERE col_name = 'Name' LIMIT 5", nil, []string{"*"}, "A", 5, []string{"count"}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT sum(col_name),sum(col_other) FROM S3OBJECT AS A WHERE col_name = 'Name' LIMIT 5", nil, []string{"col_name", "col_other"}, "A", 5, []string{"sum", "sum"}, []string{"col_name", "col_other"}}, + {"SELECT A.col_name FROM S3OBJECT AS A", nil, []string{"col_name"}, "A", 0, make([]string, 1), []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT A._col_name FROM S3OBJECT AS A", nil, []string{"col_name"}, "A", 0, make([]string, 1), []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT A._col_name FROM S3OBJECT AS A WHERE randomname > 5", ErrMissingHeaders, nil, "", 0, nil, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT A._col_name FROM S3OBJECT AS A WHERE A._11 > 5", ErrInvalidColumnIndex, nil, "", 0, nil, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT COALESCE(col_name,col_other) FROM S3OBJECT AS A WHERE A._3 > 5", nil, []string{""}, "A", 0, []string{""}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT COALESCE(col_name,col_other),COALESCE(col_name,col_other) FROM S3OBJECT AS A WHERE A._3 > 5", nil, []string{"", ""}, "A", 0, []string{"", ""}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT COALESCE(col_name,col_other) ,col_name , COALESCE(col_name,col_other) FROM S3OBJECT AS A WHERE col_name > 5", nil, []string{"", "col_name", ""}, "A", 0, []string{"", "", ""}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT NULLIF(col_name,col_other) ,col_name , COALESCE(col_name,col_other) FROM S3OBJECT AS A WHERE col_name > 5", nil, []string{"", "col_name", ""}, "A", 0, []string{"", "", ""}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT NULLIF(col_name,col_other) FROM S3OBJECT AS A WHERE col_name > 5", nil, []string{""}, "A", 0, []string{""}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT NULLIF(randomname,col_other) FROM S3OBJECT AS A WHERE col_name > 5", ErrMissingHeaders, nil, "", 0, nil, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT col_name FROM S3OBJECT AS A WHERE COALESCE(random,5) > 5", ErrMissingHeaders, nil, "", 0, nil, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT col_name FROM S3OBJECT AS A WHERE NULLIF(random,5) > 5", ErrMissingHeaders, nil, "", 0, nil, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT col_name FROM S3OBJECT AS A WHERE LOWER(col_name) BETWEEN 5 AND 7", nil, []string{"col_name"}, "A", 0, []string{""}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT UPPER(col_name) FROM S3OBJECT AS A WHERE LOWER(col_name) BETWEEN 5 AND 7", nil, []string{""}, "A", 0, []string{""}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT UPPER(*) FROM S3OBJECT AS A WHERE LOWER(col_name) BETWEEN 5 AND 7", ErrParseUnsupportedCallWithStar, nil, "", 0, nil, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT NULLIF(col_name,col_name) FROM S3OBJECT AS A WHERE NULLIF(LOWER(col_name),col_name) BETWEEN 5 AND 7", nil, []string{""}, "A", 0, []string{""}, []string{"col_name", "col_other", "name3", "name4"}}, + {"SELECT COALESCE(col_name,col_name) FROM S3OBJECT AS A WHERE NULLIF(LOWER(col_name),col_name) BETWEEN 5 AND 7", nil, []string{""}, "A", 0, []string{""}, []string{"col_name", "col_other", "name3", "name4"}}, + } + for _, table := range tables { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("name1,name2,name3,name4" + "\n" + "5,is,a,string" + "\n" + "random,random,stuff,stuff")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + s3s.header = table.header + reqCols, alias, myLimit, _, aggFunctionNames, _, err := s3s.ParseSelect(table.myQuery) + if table.err != err { + t.Error() + } + if !reflect.DeepEqual(reqCols, table.reqCols) { + t.Error() + } + if alias != table.alias { + t.Error() + } + if myLimit != table.myLimit { + t.Error() + } + if !reflect.DeepEqual(table.aggFuncs, aggFunctionNames) { + t.Error() + } + } +} + +// Unit tests for the main function that performs aggreggation. +func TestMyAggregationFunc(t *testing.T) { + columnsMap := make(map[string]int) + columnsMap["Col1"] = 0 + columnsMap["Col2"] = 1 + tables := []struct { + counter int + filtrCount int + myAggVals []float64 + columnsMap map[string]int + storeReqCols []string + storeFunctions []string + record []string + err error + expectedVal float64 + }{ + {10, 5, []float64{10}, columnsMap, []string{"Col1"}, []string{"count"}, []string{"1", "2"}, nil, 11}, + {10, 5, []float64{10}, columnsMap, []string{"Col1"}, []string{"min"}, []string{"1", "2"}, nil, 1}, + {10, 5, []float64{10}, columnsMap, []string{"Col1"}, []string{"max"}, []string{"1", "2"}, nil, 10}, + {10, 5, []float64{10}, columnsMap, []string{"Col1"}, []string{"sum"}, []string{"1", "2"}, nil, 11}, + {1, 1, []float64{10}, columnsMap, []string{"Col1"}, []string{"avg"}, []string{"1", "2"}, nil, 5.500}, + {10, 5, []float64{0.000}, columnsMap, []string{"Col1"}, []string{"random"}, []string{"1", "2"}, ErrParseNonUnaryAgregateFunctionCall, 0}, + {0, 5, []float64{0}, columnsMap, []string{"0"}, []string{"count"}, []string{"1", "2"}, nil, 1}, + {10, 5, []float64{10}, columnsMap, []string{"1"}, []string{"min"}, []string{"1", "12"}, nil, 10}, + } + for _, table := range tables { + err := aggregationFunctions(table.counter, table.filtrCount, table.myAggVals, table.columnsMap, table.storeReqCols, table.storeFunctions, table.record) + if table.err != err { + t.Error() + } + if table.myAggVals[0] != table.expectedVal { + t.Error() + } + + } +} + +// Unit Tests for the function which converts a float array to string. +func TestToStringAgg(t *testing.T) { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("Here , is, a, string + \n + random,random,stuff,stuff ")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + tables := []struct { + myAggVal []float64 + expected string + }{ + {[]float64{10, 11, 12, 13, 14}, "10.000000,11.000000,12.000000,13.000000,14.000000"}, + {[]float64{10}, "10.000000"}, + } + for _, table := range tables { + val := s3s.aggFuncToStr(table.myAggVal) + if val != table.expected { + t.Error() + } + } +} + +// TestMyRowColLiteralResults is a unit test which makes sure that the rows that +// are being printed are appropriate to the query being requested. +func TestMyRowColLiteralResults(t *testing.T) { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("Here , is, a, string + \n + random,random,stuff,stuff ")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + tables := []struct { + myReq []string + myHeaders map[string]int + myDup map[string]bool + myLow map[string]int + myOpts *Options + tempList []string + input *Input + myRecord []string + myTarget string + columns []string + err error + }{ + {[]string{"draft", "year"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, []string{"draft", "year"}, s3s, []string{"target", "random", "hello", "stuff"}, "target,random", []string{"draft", "year", "random", "another"}, nil}, + {[]string{"year", "draft"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, []string{"year", "draft"}, s3s, []string{"draft", "2012", "thing", "stuff"}, "2012,draft", []string{"draft", "year", "random", "another"}, nil}, + {[]string{"yearrandomstuff", "draft"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, []string{"yearrandomstuff", "draft"}, s3s, []string{"draft", "2012", "thing", "stuff"}, "", []string{"draft", "year", "random", "another"}, ErrMissingHeaders}, + {[]string{"draft", "randomstuff"}, make(map[string]int), make(map[string]bool), make(map[string]int), options, []string{"yearrandomstuff", "draft"}, s3s, []string{"draft", "2012", "thing", "stuff"}, "", []string{"draft", "year", "random", "another"}, ErrMissingHeaders}, + } + for _, table := range tables { + checkForDuplicates(table.columns, table.myHeaders, table.myDup, table.myLow) + myRow, err := table.input.processColNameLiteral(table.myRecord, table.myReq, table.tempList, table.myHeaders, nil) + if err != table.err { + t.Error() + } + if myRow != table.myTarget { + t.Error() + } + } +} + +// TestMyWhereEval is a function which provides unit tests for the function +// which evaluates the where clause. +func TestMyWhereEval(t *testing.T) { + columnsMap := make(map[string]int) + columnsMap["Col1"] = 0 + columnsMap["Col2"] = 1 + tables := []struct { + myQuery string + record []string + err error + expected bool + header []string + }{ + {"SELECT * FROM S3OBJECT", []string{"record_1,record_2,record_3,record_4"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT WHERE Col1 < -1", []string{"0", "1"}, nil, false, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT WHERE Col1 < -1 OR Col2 > 15", []string{"151", "12"}, nil, false, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT WHERE Col1 > -1 AND Col2 > 15", []string{"151", "12"}, nil, false, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT WHERE Col1 > 1.00", []string{"151.0000", "12"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT WHERE Col1 > 100", []string{"random", "12"}, nil, false, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT WHERE Col1 BETWEEN 100 AND 0", []string{"151", "12"}, nil, false, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT WHERE Col1 BETWEEN 100.0 AND 0.0", []string{"151", "12"}, nil, false, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT AS A WHERE A.1 BETWEEN 160 AND 150", []string{"151", "12"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT AS A WHERE A._1 BETWEEN 160 AND 0", []string{"151", "12"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT AS A WHERE A._1 BETWEEN 0 AND 160", []string{"151", "12"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT A._1 LIKE 'r%'", []string{"record_1,record_2,record_3,record_4"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT s._2 FROM S3Object s WHERE s._2 = 'Steven'", []string{"record_1", "Steven", "Steven", "record_4"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT AS A WHERE Col1 BETWEEN 0 AND 160", []string{"151", "12"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT AS A WHERE Col1 BETWEEN 160 AND 0", []string{"151", "12"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT AS A WHERE UPPER(Col1) BETWEEN 160 AND 0", []string{"151", "12"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT AS A WHERE UPPER(Col1) = 'RANDOM'", []string{"random", "12"}, nil, true, []string{"Col1", "Col2"}}, + {"SELECT * FROM S3OBJECT AS A WHERE LOWER(UPPER(Col1) = 'random'", []string{"random", "12"}, nil, true, []string{"Col1", "Col2"}}, + } + for _, table := range tables { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("name1,name2,name3,name4" + "\n" + "5,is,a,string" + "\n" + "random,random,stuff,stuff")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + s3s.header = table.header + + if err != nil { + t.Error(err) + } + _, alias, _, whereClause, _, _, _ := s3s.ParseSelect(table.myQuery) + myVal, err := matchesMyWhereClause(table.record, columnsMap, alias, whereClause) + if table.err != err { + t.Error() + } + if myVal != table.expected { + t.Error() + } + } +} + +// TestMyStringComparator is a unit test which ensures that the appropriate +// values are being compared for strings. +func TestMyStringComparator(t *testing.T) { + tables := []struct { + operand string + operator string + myVal string + expected bool + err error + }{ + {"random", ">", "myName", "random" > "myName", nil}, + {"12", "!=", "myName", "12" != "myName", nil}, + {"12", "=", "myName", "12" == "myName", nil}, + {"12", "<=", "myName", "12" <= "myName", nil}, + {"12", ">=", "myName", "12" >= "myName", nil}, + {"12", "<", "myName", "12" < "myName", nil}, + {"name", "like", "_x%", false, nil}, + {"12", "randomoperator", "myName", false, ErrUnsupportedSyntax}, + } + for _, table := range tables { + myVal, err := stringEval(table.operand, table.operator, table.myVal) + if err != table.err { + t.Error() + } + if myVal != table.expected { + t.Error() + } + } +} + +// TestMyFloatComparator is a unit test which ensures that the appropriate +// values are being compared for floats. +func TestMyFloatComparator(t *testing.T) { + tables := []struct { + operand float64 + operator string + myVal float64 + expected bool + err error + }{ + {12.000, ">", 13.000, 12.000 > 13.000, nil}, + {1000.000, "!=", 1000.000, 1000.000 != 1000.000, nil}, + {1000.000, "<", 1000.000, 1000.000 < 1000.000, nil}, + {1000.000, "<=", 1000.000, 1000.000 <= 1000.000, nil}, + {1000.000, ">=", 1000.000, 1000.000 >= 1000.000, nil}, + {1000.000, "=", 1000.000, 1000.000 == 1000.000, nil}, + {17.000, "randomoperator", 0.0, false, ErrUnsupportedSyntax}, + } + for _, table := range tables { + myVal, err := floatEval(table.operand, table.operator, table.myVal) + if err != table.err { + t.Error() + } + if myVal != table.expected { + t.Error() + } + } +} + +// TestMyIntComparator is a unit test which ensures that the appropriate values +// are being compared for ints. +func TestMyIntComparator(t *testing.T) { + tables := []struct { + operand int64 + operator string + myVal int64 + expected bool + err error + }{ + {12, ">", 13, 12.000 > 13.000, nil}, + {1000, "!=", 1000, 1000.000 != 1000.000, nil}, + {1000, "<", 1000, 1000.000 < 1000.000, nil}, + {1000, "<=", 1000, 1000.000 <= 1000.000, nil}, + {1000, ">=", 1000, 1000.000 >= 1000.000, nil}, + {1000, "=", 1000, 1000.000 >= 1000.000, nil}, + {17, "randomoperator", 0, false, ErrUnsupportedSyntax}, + } + for _, table := range tables { + myVal, err := intEval(table.operand, table.operator, table.myVal) + if err != table.err { + t.Error() + } + if myVal != table.expected { + t.Error() + } + } +} + +// TestMySizeFunction is a function which provides unit testing for the function +// which calculates size. +func TestMySizeFunction(t *testing.T) { + tables := []struct { + myRecord []string + expected int64 + }{ + {[]string{"test1", "test2", "test3", "test4", "test5"}, 30}, + } + for _, table := range tables { + if processSize(table.myRecord) != table.expected { + t.Error() + } + + } +} + +// TestInterpreter is a function which provides unit testing for the main +// interpreter function. +func TestInterpreter(t *testing.T) { + tables := []struct { + myQuery string + myChan chan *Row + err error + header []string + }{ + {"Select random from S3OBJECT", make(chan *Row), ErrMissingHeaders, []string{"name1", "name2", "name3", "name4"}}, + {"Select * from S3OBJECT as A WHERE name2 > 5.00", make(chan *Row), nil, []string{"name1", "name2", "name3", "name4"}}, + {"Select * from S3OBJECT", make(chan *Row), nil, []string{"name1", "name2", "name3", "name4"}}, + {"Select A_1 from S3OBJECT as A", make(chan *Row), nil, []string{"1", "2", "3", "4"}}, + {"Select count(*) from S3OBJECT", make(chan *Row), nil, []string{"name1", "name2", "name3", "name4"}}, + {"Select * from S3OBJECT WHERE name1 > 5.00", make(chan *Row), nil, []string{"name1", "name2", "name3", "name4"}}, + } + for _, table := range tables { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("name1,name2,name3,name4" + "\n" + "5,is,a,string" + "\n" + "random,random,stuff,stuff")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + s3s.header = table.header + reqCols, alias, myLimit, whereClause, aggFunctionNames, _, err := s3s.ParseSelect(table.myQuery) + if err != table.err { + t.Fatal() + } + if err == nil { + go s3s.processSelectReq(reqCols, alias, whereClause, myLimit, aggFunctionNames, table.myChan, nil) + select { + case row, ok := <-table.myChan: + if ok && len(row.record) > 0 { + } else if ok && row.err != nil { + if row.err != table.err { + t.Error() + } + close(table.myChan) + } else if !ok { + } + } + } + } +} + +// TestMyXMLFunction is a function that provides unit testing for the XML +// creating function. +func TestMyXMLFunction(t *testing.T) { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("name1,name2,name3,name4" + "\n" + "5,is,a,string" + "\n" + "random,random,stuff,stuff")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + tables := []struct { + expectedStat int + expectedProgress int + }{ + {159, 165}, + } + for _, table := range tables { + myVal, _ := s3s.createStatXML() + myOtherVal, _ := s3s.createProgressXML() + if len(myVal) != table.expectedStat { + t.Error() + } + if len(myOtherVal) != table.expectedProgress { + fmt.Println(len(myOtherVal)) + t.Error() + } + } +} + +// TestMyProtocolFunction is a function which provides unit testing for several +// of the functions which write the binary protocol. +func TestMyProtocolFunction(t *testing.T) { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("name1,name2,name3,name4" + "\n" + "5,is,a,string" + "\n" + "random,random,stuff,stuff")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + tables := []struct { + payloadMsg string + expectedRecord int + expectedEnd int + }{ + {"random payload", 115, 56}, + } + for _, table := range tables { + var currentMessage = &bytes.Buffer{} + if len(s3s.writeRecordMessage(table.payloadMsg, currentMessage).Bytes()) != table.expectedRecord { + t.Error() + } + currentMessage.Reset() + if len(s3s.writeEndMessage(currentMessage).Bytes()) != table.expectedEnd { + t.Error() + } + currentMessage.Reset() + if len(s3s.writeContinuationMessage(currentMessage).Bytes()) != 57 { + t.Error() + } + currentMessage.Reset() + } +} + +// TestMyInfoProtocolFunctions is a function which provides unit testing for the +// stat and progress messages of the protocols. +func TestMyInfoProtocolFunctions(t *testing.T) { + options := &Options{ + HasHeader: true, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("name1,name2,name3,name4" + "\n" + "5,is,a,string" + "\n" + "random,random,stuff,stuff")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + myVal, _ := s3s.createStatXML() + myOtherVal, _ := s3s.createProgressXML() + + tables := []struct { + payloadStatMsg string + payloadProgressMsg string + expectedStat int + expectedProgress int + }{ + {myVal, myOtherVal, 242, 252}, + } + for _, table := range tables { + var currBuf = &bytes.Buffer{} + if len(s3s.writeStatMessage(table.payloadStatMsg, currBuf).Bytes()) != table.expectedStat { + t.Error() + } + currBuf.Reset() + if len(s3s.writeProgressMessage(table.payloadProgressMsg, currBuf).Bytes()) != table.expectedProgress { + t.Error() + } + } +} + +// TestMyErrorProtocolFunctions is a function which provides unit testing for +// the error message type of protocol. +func TestMyErrorProtocolFunctions(t *testing.T) { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("name1,name2,name3,name4" + "\n" + "5,is,a,string" + "\n" + "random,random,stuff,stuff")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + tables := []struct { + err error + expectedError int + }{ + {ErrInvalidCast, 248}, + {ErrTruncatedInput, 200}, + {ErrUnsupportedSyntax, 114}, + {ErrCSVParsingError, 157}, + } + for _, table := range tables { + var currentMessage = &bytes.Buffer{} + if len(s3s.writeErrorMessage(table.err, currentMessage).Bytes()) != table.expectedError { + t.Error() + } + + } +} +func TestMatch(t *testing.T) { + testCases := []struct { + pattern string + text string + matched bool + }{ + // Test case - 1. + // Test case so that the match occurs on the opening letter. + { + pattern: "a%", + text: "apple", + matched: true, + }, + // Test case - 2. + // Test case so that the ending letter is true. + { + pattern: "%m", + text: "random", + matched: true, + }, + // Test case - 3. + // Test case so that a character is at the appropriate position. + { + pattern: "_d%", + text: "adam", + matched: true, + }, + // Test case - 4. + // Test case so that a character is at the appropriate position. + { + pattern: "_d%", + text: "apple", + matched: false, + }, + // Test case - 5. + // Test case with checking that it is at least 3 in length + { + pattern: "a_%_%", + text: "ap", + matched: false, + }, + { + pattern: "a_%_%", + text: "apple", + matched: true, + }, + { + pattern: "%or%", + text: "orphan", + matched: true, + }, + { + pattern: "%or%", + text: "dolphin", + matched: false, + }, + { + pattern: "%or%", + text: "dorlphin", + matched: true, + }, + { + pattern: "2__3", + text: "2003", + matched: true, + }, + { + pattern: "_YYYY_", + text: "aYYYYa", + matched: true, + }, + { + pattern: "C%", + text: "CA", + matched: true, + }, + { + pattern: "C%", + text: "SC", + matched: false, + }, + { + pattern: "%C", + text: "SC", + matched: true, + }, + { + pattern: "%C", + text: "CA", + matched: false, + }, + { + pattern: "%C", + text: "ACCC", + matched: true, + }, + { + pattern: "C%", + text: "CCC", + matched: true, + }, + { + pattern: "j%", + text: "mejri", + matched: false, + }, + { + pattern: "a%o", + text: "ando", + matched: true, + }, + { + pattern: "%j", + text: "mejri", + matched: false, + }, + { + pattern: "%ja", + text: "mejrija", + matched: true, + }, + { + pattern: "ja%", + text: "jamal", + matched: true, + }, + { + pattern: "a%o", + text: "andp", + matched: false, + }, + { + pattern: "_r%", + text: "arpa", + matched: true, + }, + { + pattern: "_r%", + text: "apra", + matched: false, + }, + { + pattern: "a_%_%", + text: "appple", + matched: true, + }, + { + pattern: "l_b%", + text: "lebron", + matched: true, + }, + { + pattern: "leb%", + text: "Dalembert", + matched: false, + }, + { + pattern: "leb%", + text: "Landesberg", + matched: false, + }, + { + pattern: "leb%", + text: "Mccalebb", + matched: false, + }, + { + pattern: "%lebb", + text: "Mccalebb", + matched: true, + }, + } + // Iterating over the test cases, call the function under test and asert the output. + for i, testCase := range testCases { + actualResult, err := likeConvert(testCase.pattern, testCase.text) + if err != nil { + t.Error() + } + if testCase.matched != actualResult { + fmt.Println("Expected Pattern", testCase.pattern, "Expected Text", testCase.text) + t.Errorf("Test %d: Expected the result to be `%v`, but instead found it to be `%v`", i+1, testCase.matched, actualResult) + } + } +} + +// TestMyValids is a unit test which ensures that the appropriate values are +// being returned from the isValid... functions. +func TestMyValids(t *testing.T) { + + tables := []struct { + myQuery string + indexList []int + myIndex int + myValIndex bool + header []string + err error + }{ + {"SELECT UPPER(NULLIF(draft_year,random_name))", []int{3, 5, 6, 7, 8, 9}, 3, true, []string{"draft_year", "random_name"}, nil}, + {"SELECT UPPER(NULLIF(draft_year,xandom_name))", []int{3, 5, 6, 7, 8, 9}, 3, true, []string{"draft_year", "random_name"}, ErrMissingHeaders}, + } + for _, table := range tables { + options := &Options{ + HasHeader: false, + FieldDelimiter: ",", + Comments: "", + Name: "S3Object", // Default table name for all objects + ReadFrom: bytes.NewReader([]byte("name1,name2,name3,name4" + "\n" + "5,is,a,string" + "\n" + "random,random,stuff,stuff")), + Compressed: "", + Expression: "", + OutputFieldDelimiter: ",", + StreamSize: 20, + HeaderOpt: true, + } + s3s, err := NewInput(options) + if err != nil { + t.Error(err) + } + s3s.header = table.header + _, _, _, _, _, _, err = s3s.ParseSelect(table.myQuery) + if err != table.err { + t.Fatal() + } + myVal := isValidFunc(table.indexList, table.myIndex) + if myVal != table.myValIndex { + t.Error() + } + } +} + +// TestMyFuncProcessing is a unit test which ensures that the appropriate values are +// being returned from the Processing... functions. +func TestMyFuncProcessing(t *testing.T) { + tables := []struct { + myString string + nullList []string + coalList []string + myValString string + myValCoal string + myValNull string + stringFunc string + }{ + {"lower", []string{"yo", "yo"}, []string{"random", "hello", "random"}, "LOWER", "random", "", "UPPER"}, + {"LOWER", []string{"null", "random"}, []string{"missing", "hello", "random"}, "lower", "hello", "null", "LOWER"}, + } + for _, table := range tables { + if table.coalList != nil { + myVal := processCoalNoIndex(table.coalList) + if myVal != table.myValCoal { + t.Error() + } + } + if table.nullList != nil { + myVal := processNullIf(table.nullList) + if myVal != table.myValNull { + t.Error() + } + } + myVal := applyStrFunc(table.myString, table.stringFunc) + if myVal != table.myValString { + t.Error() + } + + } +} diff --git a/vendor/github.com/klauspost/compress/LICENSE b/vendor/github.com/klauspost/compress/LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/klauspost/compress/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/compress/flate/copy.go b/vendor/github.com/klauspost/compress/flate/copy.go new file mode 100644 index 000000000..a3200a8f4 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/copy.go @@ -0,0 +1,32 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// forwardCopy is like the built-in copy function except that it always goes +// forward from the start, even if the dst and src overlap. +// It is equivalent to: +// for i := 0; i < n; i++ { +// mem[dst+i] = mem[src+i] +// } +func forwardCopy(mem []byte, dst, src, n int) { + if dst <= src { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + for { + if dst >= src+n { + copy(mem[dst:dst+n], mem[src:src+n]) + return + } + // There is some forward overlap. The destination + // will be filled with a repeated pattern of mem[src:src+k]. + // We copy one instance of the pattern here, then repeat. + // Each time around this loop k will double. + k := dst - src + copy(mem[dst:dst+k], mem[src:src+k]) + n -= k + dst += k + } +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.go b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go new file mode 100644 index 000000000..70a6095e6 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.go @@ -0,0 +1,41 @@ +//+build !noasm +//+build !appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +import ( + "github.com/klauspost/cpuid" +) + +// crc32sse returns a hash for the first 4 bytes of the slice +// len(a) must be >= 4. +//go:noescape +func crc32sse(a []byte) uint32 + +// crc32sseAll calculates hashes for each 4-byte set in a. +// dst must be east len(a) - 4 in size. +// The size is not checked by the assembly. +//go:noescape +func crc32sseAll(a []byte, dst []uint32) + +// matchLenSSE4 returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +// +// TODO: drop the "SSE4" name, since it doesn't use any SSE instructions. +// +//go:noescape +func matchLenSSE4(a, b []byte, max int) int + +// histogram accumulates a histogram of b in h. +// h must be at least 256 entries in length, +// and must be cleared before calling this function. +//go:noescape +func histogram(b []byte, h []int32) + +// Detect SSE 4.2 feature. +func init() { + useSSE42 = cpuid.CPU.SSE42() +} diff --git a/vendor/github.com/klauspost/compress/flate/crc32_amd64.s b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s new file mode 100644 index 000000000..2fb2079b9 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_amd64.s @@ -0,0 +1,213 @@ +//+build !noasm +//+build !appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +// func crc32sse(a []byte) uint32 +TEXT ·crc32sse(SB), 4, $0 + MOVQ a+0(FP), R10 + XORQ BX, BX + + // CRC32 dword (R10), EBX + BYTE $0xF2; BYTE $0x41; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0x1a + + MOVL BX, ret+24(FP) + RET + +// func crc32sseAll(a []byte, dst []uint32) +TEXT ·crc32sseAll(SB), 4, $0 + MOVQ a+0(FP), R8 // R8: src + MOVQ a_len+8(FP), R10 // input length + MOVQ dst+24(FP), R9 // R9: dst + SUBQ $4, R10 + JS end + JZ one_crc + MOVQ R10, R13 + SHRQ $2, R10 // len/4 + ANDQ $3, R13 // len&3 + XORQ BX, BX + ADDQ $1, R13 + TESTQ R10, R10 + JZ rem_loop + +crc_loop: + MOVQ (R8), R11 + XORQ BX, BX + XORQ DX, DX + XORQ DI, DI + MOVQ R11, R12 + SHRQ $8, R11 + MOVQ R12, AX + MOVQ R11, CX + SHRQ $16, R12 + SHRQ $16, R11 + MOVQ R12, SI + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + // CRC32 ECX, EDX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd1 + + // CRC32 ESI, EDI + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xfe + MOVL BX, (R9) + MOVL DX, 4(R9) + MOVL DI, 8(R9) + + XORQ BX, BX + MOVL R11, AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + MOVL BX, 12(R9) + + ADDQ $16, R9 + ADDQ $4, R8 + XORQ BX, BX + SUBQ $1, R10 + JNZ crc_loop + +rem_loop: + MOVL (R8), AX + + // CRC32 EAX, EBX + BYTE $0xF2; BYTE $0x0f + BYTE $0x38; BYTE $0xf1; BYTE $0xd8 + + MOVL BX, (R9) + ADDQ $4, R9 + ADDQ $1, R8 + XORQ BX, BX + SUBQ $1, R13 + JNZ rem_loop + +end: + RET + +one_crc: + MOVQ $1, R13 + XORQ BX, BX + JMP rem_loop + +// func matchLenSSE4(a, b []byte, max int) int +TEXT ·matchLenSSE4(SB), 4, $0 + MOVQ a_base+0(FP), SI + MOVQ b_base+24(FP), DI + MOVQ DI, DX + MOVQ max+48(FP), CX + +cmp8: + // As long as we are 8 or more bytes before the end of max, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ CX, $8 + JLT cmp1 + MOVQ (SI), AX + MOVQ (DI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, SI + ADDQ $8, DI + SUBQ $8, CX + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, DI + + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +cmp1: + // In the slices' tail, compare 1 byte at a time. + CMPQ CX, $0 + JEQ matchLenEnd + MOVB (SI), AX + MOVB (DI), BX + CMPB AX, BX + JNE matchLenEnd + ADDQ $1, SI + ADDQ $1, DI + SUBQ $1, CX + JMP cmp1 + +matchLenEnd: + // Subtract off &b[0] to convert from &b[ret] to ret, and return. + SUBQ DX, DI + MOVQ DI, ret+56(FP) + RET + +// func histogram(b []byte, h []int32) +TEXT ·histogram(SB), 4, $0 + MOVQ b+0(FP), SI // SI: &b + MOVQ b_len+8(FP), R9 // R9: len(b) + MOVQ h+24(FP), DI // DI: Histogram + MOVQ R9, R8 + SHRQ $3, R8 + JZ hist1 + XORQ R11, R11 + +loop_hist8: + MOVQ (SI), R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + MOVB R10, R11 + INCL (DI)(R11*4) + SHRQ $8, R10 + + INCL (DI)(R10*4) + + ADDQ $8, SI + DECQ R8 + JNZ loop_hist8 + +hist1: + ANDQ $7, R9 + JZ end_hist + XORQ R10, R10 + +loop_hist1: + MOVB (SI), R10 + INCL (DI)(R10*4) + INCQ SI + DECQ R9 + JNZ loop_hist1 + +end_hist: + RET diff --git a/vendor/github.com/klauspost/compress/flate/crc32_noasm.go b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go new file mode 100644 index 000000000..bd98bd598 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/crc32_noasm.go @@ -0,0 +1,35 @@ +//+build !amd64 noasm appengine + +// Copyright 2015, Klaus Post, see LICENSE for details. + +package flate + +func init() { + useSSE42 = false +} + +// crc32sse should never be called. +func crc32sse(a []byte) uint32 { + panic("no assembler") +} + +// crc32sseAll should never be called. +func crc32sseAll(a []byte, dst []uint32) { + panic("no assembler") +} + +// matchLenSSE4 should never be called. +func matchLenSSE4(a, b []byte, max int) int { + panic("no assembler") + return 0 +} + +// histogram accumulates a histogram of b in h. +// +// len(h) must be >= 256, and h's elements must be all zeroes. +func histogram(b []byte, h []int32) { + h = h[:256] + for _, t := range b { + h[t]++ + } +} diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go new file mode 100644 index 000000000..9e6e7ff0c --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -0,0 +1,1353 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Copyright (c) 2015 Klaus Post +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "fmt" + "io" + "math" +) + +const ( + NoCompression = 0 + BestSpeed = 1 + BestCompression = 9 + DefaultCompression = -1 + + // HuffmanOnly disables Lempel-Ziv match searching and only performs Huffman + // entropy encoding. This mode is useful in compressing data that has + // already been compressed with an LZ style algorithm (e.g. Snappy or LZ4) + // that lacks an entropy encoder. Compression gains are achieved when + // certain bytes in the input stream occur more frequently than others. + // + // Note that HuffmanOnly produces a compressed output that is + // RFC 1951 compliant. That is, any valid DEFLATE decompressor will + // continue to be able to decompress this output. + HuffmanOnly = -2 + ConstantCompression = HuffmanOnly // compatibility alias. + + logWindowSize = 15 + windowSize = 1 << logWindowSize + windowMask = windowSize - 1 + logMaxOffsetSize = 15 // Standard DEFLATE + minMatchLength = 4 // The smallest match that the compressor looks for + maxMatchLength = 258 // The longest match for the compressor + minOffsetSize = 1 // The shortest offset that makes any sense + + // The maximum number of tokens we put into a single flat block, just too + // stop things from getting too large. + maxFlateBlockTokens = 1 << 14 + maxStoreBlockSize = 65535 + hashBits = 17 // After 17 performance degrades + hashSize = 1 << hashBits + hashMask = (1 << hashBits) - 1 + hashShift = (hashBits + minMatchLength - 1) / minMatchLength + maxHashOffset = 1 << 24 + + skipNever = math.MaxInt32 +) + +var useSSE42 bool + +type compressionLevel struct { + good, lazy, nice, chain, fastSkipHashing, level int +} + +// Compression levels have been rebalanced from zlib deflate defaults +// to give a bigger spread in speed and compression. +// See https://blog.klauspost.com/rebalancing-deflate-compression-levels/ +var levels = []compressionLevel{ + {}, // 0 + // Level 1-4 uses specialized algorithm - values not used + {0, 0, 0, 0, 0, 1}, + {0, 0, 0, 0, 0, 2}, + {0, 0, 0, 0, 0, 3}, + {0, 0, 0, 0, 0, 4}, + // For levels 5-6 we don't bother trying with lazy matches. + // Lazy matching is at least 30% slower, with 1.5% increase. + {6, 0, 12, 8, 12, 5}, + {8, 0, 24, 16, 16, 6}, + // Levels 7-9 use increasingly more lazy matching + // and increasingly stringent conditions for "good enough". + {8, 8, 24, 16, skipNever, 7}, + {10, 16, 24, 64, skipNever, 8}, + {32, 258, 258, 4096, skipNever, 9}, +} + +type compressor struct { + compressionLevel + + w *huffmanBitWriter + bulkHasher func([]byte, []uint32) + + // compression algorithm + fill func(*compressor, []byte) int // copy data to window + step func(*compressor) // process window + sync bool // requesting flush + + // Input hash chains + // hashHead[hashValue] contains the largest inputIndex with the specified hash value + // If hashHead[hashValue] is within the current window, then + // hashPrev[hashHead[hashValue] & windowMask] contains the previous index + // with the same hash value. + chainHead int + hashHead [hashSize]uint32 + hashPrev [windowSize]uint32 + hashOffset int + + // input window: unprocessed data is window[index:windowEnd] + index int + window []byte + windowEnd int + blockStart int // window index where current tokens start + byteAvailable bool // if true, still need to process window[index-1]. + + // queued output tokens + tokens tokens + + // deflate state + length int + offset int + hash uint32 + maxInsertIndex int + err error + ii uint16 // position of last match, intended to overflow to reset. + + snap snappyEnc + hashMatch [maxMatchLength + minMatchLength]uint32 +} + +func (d *compressor) fillDeflate(b []byte) int { + if d.index >= 2*windowSize-(minMatchLength+maxMatchLength) { + // shift the window by windowSize + copy(d.window[:], d.window[windowSize:2*windowSize]) + d.index -= windowSize + d.windowEnd -= windowSize + if d.blockStart >= windowSize { + d.blockStart -= windowSize + } else { + d.blockStart = math.MaxInt32 + } + d.hashOffset += windowSize + if d.hashOffset > maxHashOffset { + delta := d.hashOffset - 1 + d.hashOffset -= delta + d.chainHead -= delta + // Iterate over slices instead of arrays to avoid copying + // the entire table onto the stack (Issue #18625). + for i, v := range d.hashPrev[:] { + if int(v) > delta { + d.hashPrev[i] = uint32(int(v) - delta) + } else { + d.hashPrev[i] = 0 + } + } + for i, v := range d.hashHead[:] { + if int(v) > delta { + d.hashHead[i] = uint32(int(v) - delta) + } else { + d.hashHead[i] = 0 + } + } + } + } + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +func (d *compressor) writeBlock(tok tokens, index int, eof bool) error { + if index > 0 || eof { + var window []byte + if d.blockStart <= index { + window = d.window[d.blockStart:index] + } + d.blockStart = index + d.w.writeBlock(tok.tokens[:tok.n], eof, window) + return d.w.err + } + return nil +} + +// writeBlockSkip writes the current block and uses the number of tokens +// to determine if the block should be stored on no matches, or +// only huffman encoded. +func (d *compressor) writeBlockSkip(tok tokens, index int, eof bool) error { + if index > 0 || eof { + if d.blockStart <= index { + window := d.window[d.blockStart:index] + // If we removed less than a 64th of all literals + // we huffman compress the block. + if int(tok.n) > len(window)-int(tok.n>>6) { + d.w.writeBlockHuff(eof, window) + } else { + // Write a dynamic huffman block. + d.w.writeBlockDynamic(tok.tokens[:tok.n], eof, window) + } + } else { + d.w.writeBlock(tok.tokens[:tok.n], eof, nil) + } + d.blockStart = index + return d.w.err + } + return nil +} + +// fillWindow will fill the current window with the supplied +// dictionary and calculate all hashes. +// This is much faster than doing a full encode. +// Should only be used after a start/reset. +func (d *compressor) fillWindow(b []byte) { + // Do not fill window if we are in store-only mode, + // use constant or Snappy compression. + switch d.compressionLevel.level { + case 0, 1, 2: + return + } + // If we are given too much, cut it. + if len(b) > windowSize { + b = b[len(b)-windowSize:] + } + // Add all to window. + n := copy(d.window[d.windowEnd:], b) + + // Calculate 256 hashes at the time (more L1 cache hits) + loops := (n + 256 - minMatchLength) / 256 + for j := 0; j < loops; j++ { + startindex := j * 256 + end := startindex + 256 + minMatchLength - 1 + if end > n { + end = n + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + + if dstSize <= 0 { + continue + } + + dst := d.hashMatch[:dstSize] + d.bulkHasher(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + // Update window information. + d.windowEnd += n + d.index = n +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatch(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLen(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +// Try to find a match starting at index whose length is greater than prevSize. +// We only look at chainCount possibilities before giving up. +// pos = d.index, prevHead = d.chainHead-d.hashOffset, prevLength=minMatchLength-1, lookahead +func (d *compressor) findMatchSSE(pos int, prevHead int, prevLength int, lookahead int) (length, offset int, ok bool) { + minMatchLook := maxMatchLength + if lookahead < minMatchLook { + minMatchLook = lookahead + } + + win := d.window[0 : pos+minMatchLook] + + // We quit when we get a match that's at least nice long + nice := len(win) - pos + if d.nice < nice { + nice = d.nice + } + + // If we've got a match that's good enough, only look in 1/4 the chain. + tries := d.chain + length = prevLength + if length >= d.good { + tries >>= 2 + } + + wEnd := win[pos+length] + wPos := win[pos:] + minIndex := pos - windowSize + + for i := prevHead; tries > 0; tries-- { + if wEnd == win[i+length] { + n := matchLenSSE4(win[i:], wPos, minMatchLook) + + if n > length && (n > minMatchLength || pos-i <= 4096) { + length = n + offset = pos - i + ok = true + if n >= nice { + // The match is good enough that we don't try to find a better one. + break + } + wEnd = win[pos+n] + } + } + if i == minIndex { + // hashPrev[i & windowMask] has already been overwritten, so stop now. + break + } + i = int(d.hashPrev[i&windowMask]) - d.hashOffset + if i < minIndex || i < 0 { + break + } + } + return +} + +func (d *compressor) writeStoredBlock(buf []byte) error { + if d.w.writeStoredHeader(len(buf), false); d.w.err != nil { + return d.w.err + } + d.w.writeBytes(buf) + return d.w.err +} + +const hashmul = 0x1e35a7bd + +// hash4 returns a hash representation of the first 4 bytes +// of the supplied slice. +// The caller must ensure that len(b) >= 4. +func hash4(b []byte) uint32 { + return ((uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24) * hashmul) >> (32 - hashBits) +} + +// bulkHash4 will compute hashes using the same +// algorithm as hash4 +func bulkHash4(b []byte, dst []uint32) { + if len(b) < minMatchLength { + return + } + hb := uint32(b[3]) | uint32(b[2])<<8 | uint32(b[1])<<16 | uint32(b[0])<<24 + dst[0] = (hb * hashmul) >> (32 - hashBits) + end := len(b) - minMatchLength + 1 + for i := 1; i < end; i++ { + hb = (hb << 8) | uint32(b[i+3]) + dst[i] = (hb * hashmul) >> (32 - hashBits) + } +} + +// matchLen returns the number of matching bytes in a and b +// up to length 'max'. Both slices must be at least 'max' +// bytes in size. +func matchLen(a, b []byte, max int) int { + a = a[:max] + b = b[:len(a)] + for i, av := range a { + if b[i] != av { + return i + } + } + return max +} + +func (d *compressor) initDeflate() { + d.window = make([]byte, 2*windowSize) + d.hashOffset = 1 + d.length = minMatchLength - 1 + d.offset = 0 + d.byteAvailable = false + d.index = 0 + d.hash = 0 + d.chainHead = -1 + d.bulkHasher = bulkHash4 + if useSSE42 { + d.bulkHasher = crc32sseAll + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazy +func (d *compressor) deflate() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>uint(d.fastSkipHashing)) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazy() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = hash4(d.window[d.index : d.index+minMatchLength]) + ch := d.hashHead[d.hash&hashMask] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash&hashMask] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatch(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + bulkHash4(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 5) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +// Assumes that d.fastSkipHashing != skipNever, +// otherwise use deflateLazySSE +func (d *compressor) deflateSSE() { + + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + if d.tokens.n > 0 { + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > minMatchLength-1 { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if d.length >= minMatchLength { + d.ii = 0 + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + // "d.length-3" should NOT be "d.length-minMatchLength", since the format always assume 3 + d.tokens.tokens[d.tokens.n] = matchToken(uint32(d.length-3), uint32(d.offset-minOffsetSize)) + d.tokens.n++ + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + if d.length <= d.fastSkipHashing { + var newIndex int + newIndex = d.index + d.length + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + d.index = newIndex + } else { + // For matches this long, we don't bother inserting each individual + // item into the table. + d.index += d.length + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + } + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlockSkip(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + d.ii++ + end := d.index + int(d.ii>>5) + 1 + if end > d.windowEnd { + end = d.windowEnd + } + for i := d.index; i < end; i++ { + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[i])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlockSkip(d.tokens, i+1, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + d.index = end + } + } +} + +// deflateLazy is the same as deflate, but with d.fastSkipHashing == skipNever, +// meaning it always has lazy matching on. +func (d *compressor) deflateLazySSE() { + // Sanity enables additional runtime tests. + // It's intended to be used during development + // to supplement the currently ad-hoc unit tests. + const sanity = false + + if d.windowEnd-d.index < minMatchLength+maxMatchLength && !d.sync { + return + } + + d.maxInsertIndex = d.windowEnd - (minMatchLength - 1) + if d.index < d.maxInsertIndex { + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + } + + for { + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + lookahead := d.windowEnd - d.index + if lookahead < minMatchLength+maxMatchLength { + if !d.sync { + return + } + if sanity && d.index > d.windowEnd { + panic("index > windowEnd") + } + if lookahead == 0 { + // Flush current output block if any. + if d.byteAvailable { + // There is still one pending token that needs to be flushed + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + } + if d.tokens.n > 0 { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + return + } + } + if d.index < d.maxInsertIndex { + // Update the hash + d.hash = crc32sse(d.window[d.index:d.index+minMatchLength]) & hashMask + ch := d.hashHead[d.hash] + d.chainHead = int(ch) + d.hashPrev[d.index&windowMask] = ch + d.hashHead[d.hash] = uint32(d.index + d.hashOffset) + } + prevLength := d.length + prevOffset := d.offset + d.length = minMatchLength - 1 + d.offset = 0 + minIndex := d.index - windowSize + if minIndex < 0 { + minIndex = 0 + } + + if d.chainHead-d.hashOffset >= minIndex && lookahead > prevLength && prevLength < d.lazy { + if newLength, newOffset, ok := d.findMatchSSE(d.index, d.chainHead-d.hashOffset, minMatchLength-1, lookahead); ok { + d.length = newLength + d.offset = newOffset + } + } + if prevLength >= minMatchLength && d.length <= prevLength { + // There was a match at the previous step, and the current match is + // not better. Output the previous match. + d.tokens.tokens[d.tokens.n] = matchToken(uint32(prevLength-3), uint32(prevOffset-minOffsetSize)) + d.tokens.n++ + + // Insert in the hash table all strings up to the end of the match. + // index and index-1 are already inserted. If there is not enough + // lookahead, the last two strings are not inserted into the hash + // table. + var newIndex int + newIndex = d.index + prevLength - 1 + // Calculate missing hashes + end := newIndex + if end > d.maxInsertIndex { + end = d.maxInsertIndex + } + end += minMatchLength - 1 + startindex := d.index + 1 + if startindex > d.maxInsertIndex { + startindex = d.maxInsertIndex + } + tocheck := d.window[startindex:end] + dstSize := len(tocheck) - minMatchLength + 1 + if dstSize > 0 { + dst := d.hashMatch[:dstSize] + crc32sseAll(tocheck, dst) + var newH uint32 + for i, val := range dst { + di := i + startindex + newH = val & hashMask + // Get previous value with the same hash. + // Our chain should point to the previous value. + d.hashPrev[di&windowMask] = d.hashHead[newH] + // Set the head of the hash chain to us. + d.hashHead[newH] = uint32(di + d.hashOffset) + } + d.hash = newH + } + + d.index = newIndex + d.byteAvailable = false + d.length = minMatchLength - 1 + if d.tokens.n == maxFlateBlockTokens { + // The block includes the current character + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } else { + // Reset, if we got a match this run. + if d.length >= minMatchLength { + d.ii = 0 + } + // We have a byte waiting. Emit it. + if d.byteAvailable { + d.ii++ + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + + // If we have a long run of no matches, skip additional bytes + // Resets when d.ii overflows after 64KB. + if d.ii > 31 { + n := int(d.ii >> 6) + for j := 0; j < n; j++ { + if d.index >= d.windowEnd-1 { + break + } + + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + d.index++ + } + // Flush last byte + d.tokens.tokens[d.tokens.n] = literalToken(uint32(d.window[d.index-1])) + d.tokens.n++ + d.byteAvailable = false + // d.length = minMatchLength - 1 // not needed, since d.ii is reset above, so it should never be > minMatchLength + if d.tokens.n == maxFlateBlockTokens { + if d.err = d.writeBlock(d.tokens, d.index, false); d.err != nil { + return + } + d.tokens.n = 0 + } + } + } else { + d.index++ + d.byteAvailable = true + } + } + } +} + +func (d *compressor) store() { + if d.windowEnd > 0 && (d.windowEnd == maxStoreBlockSize || d.sync) { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.windowEnd = 0 + } +} + +// fillWindow will fill the buffer with data for huffman-only compression. +// The number of bytes copied is returned. +func (d *compressor) fillBlock(b []byte) int { + n := copy(d.window[d.windowEnd:], b) + d.windowEnd += n + return n +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeHuff() { + if d.windowEnd < len(d.window) && !d.sync || d.windowEnd == 0 { + return + } + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + d.windowEnd = 0 +} + +// storeHuff will compress and store the currently added data, +// if enough has been accumulated or we at the end of the stream. +// Any error that occurred will be in d.err +func (d *compressor) storeSnappy() { + // We only compress if we have maxStoreBlockSize. + if d.windowEnd < maxStoreBlockSize { + if !d.sync { + return + } + // Handle extremely small sizes. + if d.windowEnd < 128 { + if d.windowEnd == 0 { + return + } + if d.windowEnd <= 32 { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + d.tokens.n = 0 + d.windowEnd = 0 + } else { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 + d.snap.Reset() + return + } + } + + d.snap.Encode(&d.tokens, d.window[:d.windowEnd]) + // If we made zero matches, store the block as is. + if int(d.tokens.n) == d.windowEnd { + d.err = d.writeStoredBlock(d.window[:d.windowEnd]) + // If we removed less than 1/16th, huffman compress the block. + } else if int(d.tokens.n) > d.windowEnd-(d.windowEnd>>4) { + d.w.writeBlockHuff(false, d.window[:d.windowEnd]) + d.err = d.w.err + } else { + d.w.writeBlockDynamic(d.tokens.tokens[:d.tokens.n], false, d.window[:d.windowEnd]) + d.err = d.w.err + } + d.tokens.n = 0 + d.windowEnd = 0 +} + +// write will add input byte to the stream. +// Unless an error occurs all bytes will be consumed. +func (d *compressor) write(b []byte) (n int, err error) { + if d.err != nil { + return 0, d.err + } + n = len(b) + for len(b) > 0 { + d.step(d) + b = b[d.fill(d, b):] + if d.err != nil { + return 0, d.err + } + } + return n, d.err +} + +func (d *compressor) syncFlush() error { + d.sync = true + if d.err != nil { + return d.err + } + d.step(d) + if d.err == nil { + d.w.writeStoredHeader(0, false) + d.w.flush() + d.err = d.w.err + } + d.sync = false + return d.err +} + +func (d *compressor) init(w io.Writer, level int) (err error) { + d.w = newHuffmanBitWriter(w) + + switch { + case level == NoCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).store + case level == ConstantCompression: + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeHuff + case level >= 1 && level <= 4: + d.snap = newSnappy(level) + d.window = make([]byte, maxStoreBlockSize) + d.fill = (*compressor).fillBlock + d.step = (*compressor).storeSnappy + case level == DefaultCompression: + level = 5 + fallthrough + case 5 <= level && level <= 9: + d.compressionLevel = levels[level] + d.initDeflate() + d.fill = (*compressor).fillDeflate + if d.fastSkipHashing == skipNever { + if useSSE42 { + d.step = (*compressor).deflateLazySSE + } else { + d.step = (*compressor).deflateLazy + } + } else { + if useSSE42 { + d.step = (*compressor).deflateSSE + } else { + d.step = (*compressor).deflate + + } + } + default: + return fmt.Errorf("flate: invalid compression level %d: want value in range [-2, 9]", level) + } + return nil +} + +// reset the state of the compressor. +func (d *compressor) reset(w io.Writer) { + d.w.reset(w) + d.sync = false + d.err = nil + // We only need to reset a few things for Snappy. + if d.snap != nil { + d.snap.Reset() + d.windowEnd = 0 + d.tokens.n = 0 + return + } + switch d.compressionLevel.chain { + case 0: + // level was NoCompression or ConstantCompresssion. + d.windowEnd = 0 + default: + d.chainHead = -1 + for i := range d.hashHead { + d.hashHead[i] = 0 + } + for i := range d.hashPrev { + d.hashPrev[i] = 0 + } + d.hashOffset = 1 + d.index, d.windowEnd = 0, 0 + d.blockStart, d.byteAvailable = 0, false + d.tokens.n = 0 + d.length = minMatchLength - 1 + d.offset = 0 + d.hash = 0 + d.ii = 0 + d.maxInsertIndex = 0 + } +} + +func (d *compressor) close() error { + if d.err != nil { + return d.err + } + d.sync = true + d.step(d) + if d.err != nil { + return d.err + } + if d.w.writeStoredHeader(0, true); d.w.err != nil { + return d.w.err + } + d.w.flush() + return d.w.err +} + +// NewWriter returns a new Writer compressing data at the given level. +// Following zlib, levels range from 1 (BestSpeed) to 9 (BestCompression); +// higher levels typically run slower but compress more. +// Level 0 (NoCompression) does not attempt any compression; it only adds the +// necessary DEFLATE framing. +// Level -1 (DefaultCompression) uses the default compression level. +// Level -2 (ConstantCompression) will use Huffman compression only, giving +// a very fast compression for all types of input, but sacrificing considerable +// compression efficiency. +// +// If level is in the range [-2, 9] then the error returned will be nil. +// Otherwise the error returned will be non-nil. +func NewWriter(w io.Writer, level int) (*Writer, error) { + var dw Writer + if err := dw.d.init(w, level); err != nil { + return nil, err + } + return &dw, nil +} + +// NewWriterDict is like NewWriter but initializes the new +// Writer with a preset dictionary. The returned Writer behaves +// as if the dictionary had been written to it without producing +// any compressed output. The compressed data written to w +// can only be decompressed by a Reader initialized with the +// same dictionary. +func NewWriterDict(w io.Writer, level int, dict []byte) (*Writer, error) { + dw := &dictWriter{w} + zw, err := NewWriter(dw, level) + if err != nil { + return nil, err + } + zw.d.fillWindow(dict) + zw.dict = append(zw.dict, dict...) // duplicate dictionary for Reset method. + return zw, err +} + +type dictWriter struct { + w io.Writer +} + +func (w *dictWriter) Write(b []byte) (n int, err error) { + return w.w.Write(b) +} + +// A Writer takes data written to it and writes the compressed +// form of that data to an underlying writer (see NewWriter). +type Writer struct { + d compressor + dict []byte +} + +// Write writes data to w, which will eventually write the +// compressed form of data to its underlying writer. +func (w *Writer) Write(data []byte) (n int, err error) { + return w.d.write(data) +} + +// Flush flushes any pending data to the underlying writer. +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. +// Flush does not return until the data has been written. +// Calling Flush when there is no pending data still causes the Writer +// to emit a sync marker of at least 4 bytes. +// If the underlying writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (w *Writer) Flush() error { + // For more about flushing: + // http://www.bolet.org/~pornin/deflate-flush.html + return w.d.syncFlush() +} + +// Close flushes and closes the writer. +func (w *Writer) Close() error { + return w.d.close() +} + +// Reset discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level and dictionary. +func (w *Writer) Reset(dst io.Writer) { + if dw, ok := w.d.w.writer.(*dictWriter); ok { + // w was created with NewWriterDict + dw.w = dst + w.d.reset(dw) + w.d.fillWindow(w.dict) + } else { + // w was created with NewWriter + w.d.reset(dst) + } +} + +// ResetDict discards the writer's state and makes it equivalent to +// the result of NewWriter or NewWriterDict called with dst +// and w's level, but sets a specific dictionary. +func (w *Writer) ResetDict(dst io.Writer, dict []byte) { + w.dict = dict + w.d.reset(dst) + w.d.fillWindow(w.dict) +} diff --git a/vendor/github.com/klauspost/compress/flate/dict_decoder.go b/vendor/github.com/klauspost/compress/flate/dict_decoder.go new file mode 100644 index 000000000..71c75a065 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/dict_decoder.go @@ -0,0 +1,184 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// dictDecoder implements the LZ77 sliding dictionary as used in decompression. +// LZ77 decompresses data through sequences of two forms of commands: +// +// * Literal insertions: Runs of one or more symbols are inserted into the data +// stream as is. This is accomplished through the writeByte method for a +// single symbol, or combinations of writeSlice/writeMark for multiple symbols. +// Any valid stream must start with a literal insertion if no preset dictionary +// is used. +// +// * Backward copies: Runs of one or more symbols are copied from previously +// emitted data. Backward copies come as the tuple (dist, length) where dist +// determines how far back in the stream to copy from and length determines how +// many bytes to copy. Note that it is valid for the length to be greater than +// the distance. Since LZ77 uses forward copies, that situation is used to +// perform a form of run-length encoding on repeated runs of symbols. +// The writeCopy and tryWriteCopy are used to implement this command. +// +// For performance reasons, this implementation performs little to no sanity +// checks about the arguments. As such, the invariants documented for each +// method call must be respected. +type dictDecoder struct { + hist []byte // Sliding window history + + // Invariant: 0 <= rdPos <= wrPos <= len(hist) + wrPos int // Current output position in buffer + rdPos int // Have emitted hist[:rdPos] already + full bool // Has a full window length been written yet? +} + +// init initializes dictDecoder to have a sliding window dictionary of the given +// size. If a preset dict is provided, it will initialize the dictionary with +// the contents of dict. +func (dd *dictDecoder) init(size int, dict []byte) { + *dd = dictDecoder{hist: dd.hist} + + if cap(dd.hist) < size { + dd.hist = make([]byte, size) + } + dd.hist = dd.hist[:size] + + if len(dict) > len(dd.hist) { + dict = dict[len(dict)-len(dd.hist):] + } + dd.wrPos = copy(dd.hist, dict) + if dd.wrPos == len(dd.hist) { + dd.wrPos = 0 + dd.full = true + } + dd.rdPos = dd.wrPos +} + +// histSize reports the total amount of historical data in the dictionary. +func (dd *dictDecoder) histSize() int { + if dd.full { + return len(dd.hist) + } + return dd.wrPos +} + +// availRead reports the number of bytes that can be flushed by readFlush. +func (dd *dictDecoder) availRead() int { + return dd.wrPos - dd.rdPos +} + +// availWrite reports the available amount of output buffer space. +func (dd *dictDecoder) availWrite() int { + return len(dd.hist) - dd.wrPos +} + +// writeSlice returns a slice of the available buffer to write data to. +// +// This invariant will be kept: len(s) <= availWrite() +func (dd *dictDecoder) writeSlice() []byte { + return dd.hist[dd.wrPos:] +} + +// writeMark advances the writer pointer by cnt. +// +// This invariant must be kept: 0 <= cnt <= availWrite() +func (dd *dictDecoder) writeMark(cnt int) { + dd.wrPos += cnt +} + +// writeByte writes a single byte to the dictionary. +// +// This invariant must be kept: 0 < availWrite() +func (dd *dictDecoder) writeByte(c byte) { + dd.hist[dd.wrPos] = c + dd.wrPos++ +} + +// writeCopy copies a string at a given (dist, length) to the output. +// This returns the number of bytes copied and may be less than the requested +// length if the available space in the output buffer is too small. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) writeCopy(dist, length int) int { + dstBase := dd.wrPos + dstPos := dstBase + srcPos := dstPos - dist + endPos := dstPos + length + if endPos > len(dd.hist) { + endPos = len(dd.hist) + } + + // Copy non-overlapping section after destination position. + // + // This section is non-overlapping in that the copy length for this section + // is always less than or equal to the backwards distance. This can occur + // if a distance refers to data that wraps-around in the buffer. + // Thus, a backwards copy is performed here; that is, the exact bytes in + // the source prior to the copy is placed in the destination. + if srcPos < 0 { + srcPos += len(dd.hist) + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:]) + srcPos = 0 + } + + // Copy possibly overlapping section before destination position. + // + // This section can overlap if the copy length for this section is larger + // than the backwards distance. This is allowed by LZ77 so that repeated + // strings can be succinctly represented using (dist, length) pairs. + // Thus, a forwards copy is performed here; that is, the bytes copied is + // possibly dependent on the resulting bytes in the destination as the copy + // progresses along. This is functionally equivalent to the following: + // + // for i := 0; i < endPos-dstPos; i++ { + // dd.hist[dstPos+i] = dd.hist[srcPos+i] + // } + // dstPos = endPos + // + for dstPos < endPos { + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// tryWriteCopy tries to copy a string at a given (distance, length) to the +// output. This specialized version is optimized for short distances. +// +// This method is designed to be inlined for performance reasons. +// +// This invariant must be kept: 0 < dist <= histSize() +func (dd *dictDecoder) tryWriteCopy(dist, length int) int { + dstPos := dd.wrPos + endPos := dstPos + length + if dstPos < dist || endPos > len(dd.hist) { + return 0 + } + dstBase := dstPos + srcPos := dstPos - dist + + // Copy possibly overlapping section before destination position. +loop: + dstPos += copy(dd.hist[dstPos:endPos], dd.hist[srcPos:dstPos]) + if dstPos < endPos { + goto loop // Avoid for-loop so that this function can be inlined + } + + dd.wrPos = dstPos + return dstPos - dstBase +} + +// readFlush returns a slice of the historical buffer that is ready to be +// emitted to the user. The data returned by readFlush must be fully consumed +// before calling any other dictDecoder methods. +func (dd *dictDecoder) readFlush() []byte { + toRead := dd.hist[dd.rdPos:dd.wrPos] + dd.rdPos = dd.wrPos + if dd.wrPos == len(dd.hist) { + dd.wrPos, dd.rdPos = 0, 0 + dd.full = true + } + return toRead +} diff --git a/vendor/github.com/klauspost/compress/flate/gen.go b/vendor/github.com/klauspost/compress/flate/gen.go new file mode 100644 index 000000000..154c89a48 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/gen.go @@ -0,0 +1,265 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ignore + +// This program generates fixedhuff.go +// Invoke as +// +// go run gen.go -output fixedhuff.go + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "log" +) + +var filename = flag.String("output", "fixedhuff.go", "output file name") + +const maxCodeLen = 16 + +// Note: the definition of the huffmanDecoder struct is copied from +// inflate.go, as it is private to the implementation. + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks [huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.min != 0 { + *h = huffmanDecoder{} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i] = code + code += count[i] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + h.links = make([][]uint32, huffmanNumChunks-link) + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +func main() { + flag.Parse() + + var h huffmanDecoder + var bits [288]int + initReverseByte() + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + h.init(bits[:]) + if h.links != nil { + log.Fatal("Unexpected links table in fixed Huffman decoder") + } + + var buf bytes.Buffer + + fmt.Fprintf(&buf, `// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.`+"\n\n") + + fmt.Fprintln(&buf, "package flate") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "// autogenerated by go run gen.go -output fixedhuff.go, DO NOT EDIT") + fmt.Fprintln(&buf) + fmt.Fprintln(&buf, "var fixedHuffmanDecoder = huffmanDecoder{") + fmt.Fprintf(&buf, "\t%d,\n", h.min) + fmt.Fprintln(&buf, "\t[huffmanNumChunks]uint32{") + for i := 0; i < huffmanNumChunks; i++ { + if i&7 == 0 { + fmt.Fprintf(&buf, "\t\t") + } else { + fmt.Fprintf(&buf, " ") + } + fmt.Fprintf(&buf, "0x%04x,", h.chunks[i]) + if i&7 == 7 { + fmt.Fprintln(&buf) + } + } + fmt.Fprintln(&buf, "\t},") + fmt.Fprintln(&buf, "\tnil, 0,") + fmt.Fprintln(&buf, "}") + + data, err := format.Source(buf.Bytes()) + if err != nil { + log.Fatal(err) + } + err = ioutil.WriteFile(*filename, data, 0644) + if err != nil { + log.Fatal(err) + } +} + +var reverseByte [256]byte + +func initReverseByte() { + for x := 0; x < 256; x++ { + var result byte + for i := uint(0); i < 8; i++ { + result |= byte(((x >> i) & 1) << (7 - i)) + } + reverseByte[x] = result + } +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go new file mode 100644 index 000000000..f9b2a699a --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -0,0 +1,701 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "io" +) + +const ( + // The largest offset code. + offsetCodeCount = 30 + + // The special code used to mark the end of a block. + endBlockMarker = 256 + + // The first length code. + lengthCodesStart = 257 + + // The number of codegen codes. + codegenCodeCount = 19 + badCode = 255 + + // bufferFlushSize indicates the buffer size + // after which bytes are flushed to the writer. + // Should preferably be a multiple of 6, since + // we accumulate 6 bytes between writes to the buffer. + bufferFlushSize = 240 + + // bufferSize is the actual output byte buffer size. + // It must have additional headroom for a flush + // which can contain up to 8 bytes. + bufferSize = bufferFlushSize + 8 +) + +// The number of extra bits needed by length code X - LENGTH_CODES_START. +var lengthExtraBits = []int8{ + /* 257 */ 0, 0, 0, + /* 260 */ 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, + /* 270 */ 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, + /* 280 */ 4, 5, 5, 5, 5, 0, +} + +// The length indicated by length code X - LENGTH_CODES_START. +var lengthBase = []uint32{ + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, + 12, 14, 16, 20, 24, 28, 32, 40, 48, 56, + 64, 80, 96, 112, 128, 160, 192, 224, 255, +} + +// offset code word extra bits. +var offsetExtraBits = []int8{ + 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, + 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, + 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, + /* extended window */ + 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, +} + +var offsetBase = []uint32{ + /* normal deflate */ + 0x000000, 0x000001, 0x000002, 0x000003, 0x000004, + 0x000006, 0x000008, 0x00000c, 0x000010, 0x000018, + 0x000020, 0x000030, 0x000040, 0x000060, 0x000080, + 0x0000c0, 0x000100, 0x000180, 0x000200, 0x000300, + 0x000400, 0x000600, 0x000800, 0x000c00, 0x001000, + 0x001800, 0x002000, 0x003000, 0x004000, 0x006000, + + /* extended window */ + 0x008000, 0x00c000, 0x010000, 0x018000, 0x020000, + 0x030000, 0x040000, 0x060000, 0x080000, 0x0c0000, + 0x100000, 0x180000, 0x200000, 0x300000, +} + +// The odd order in which the codegen code sizes are written. +var codegenOrder = []uint32{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +type huffmanBitWriter struct { + // writer is the underlying writer. + // Do not use it directly; use the write method, which ensures + // that Write errors are sticky. + writer io.Writer + + // Data waiting to be written is bytes[0:nbytes] + // and then the low nbits of bits. + bits uint64 + nbits uint + bytes [bufferSize]byte + codegenFreq [codegenCodeCount]int32 + nbytes int + literalFreq []int32 + offsetFreq []int32 + codegen []uint8 + literalEncoding *huffmanEncoder + offsetEncoding *huffmanEncoder + codegenEncoding *huffmanEncoder + err error +} + +func newHuffmanBitWriter(w io.Writer) *huffmanBitWriter { + return &huffmanBitWriter{ + writer: w, + literalFreq: make([]int32, maxNumLit), + offsetFreq: make([]int32, offsetCodeCount), + codegen: make([]uint8, maxNumLit+offsetCodeCount+1), + literalEncoding: newHuffmanEncoder(maxNumLit), + codegenEncoding: newHuffmanEncoder(codegenCodeCount), + offsetEncoding: newHuffmanEncoder(offsetCodeCount), + } +} + +func (w *huffmanBitWriter) reset(writer io.Writer) { + w.writer = writer + w.bits, w.nbits, w.nbytes, w.err = 0, 0, 0, nil + w.bytes = [bufferSize]byte{} +} + +func (w *huffmanBitWriter) flush() { + if w.err != nil { + w.nbits = 0 + return + } + n := w.nbytes + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + if w.nbits > 8 { // Avoid underflow + w.nbits -= 8 + } else { + w.nbits = 0 + } + n++ + } + w.bits = 0 + w.write(w.bytes[:n]) + w.nbytes = 0 +} + +func (w *huffmanBitWriter) write(b []byte) { + if w.err != nil { + return + } + _, w.err = w.writer.Write(b) +} + +func (w *huffmanBitWriter) writeBits(b int32, nb uint) { + if w.err != nil { + return + } + w.bits |= uint64(b) << w.nbits + w.nbits += nb + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +func (w *huffmanBitWriter) writeBytes(bytes []byte) { + if w.err != nil { + return + } + n := w.nbytes + if w.nbits&7 != 0 { + w.err = InternalError("writeBytes with unfinished bits") + return + } + for w.nbits != 0 { + w.bytes[n] = byte(w.bits) + w.bits >>= 8 + w.nbits -= 8 + n++ + } + if n != 0 { + w.write(w.bytes[:n]) + } + w.nbytes = 0 + w.write(bytes) +} + +// RFC 1951 3.2.7 specifies a special run-length encoding for specifying +// the literal and offset lengths arrays (which are concatenated into a single +// array). This method generates that run-length encoding. +// +// The result is written into the codegen array, and the frequencies +// of each code is written into the codegenFreq array. +// Codes 0-15 are single byte codes. Codes 16-18 are followed by additional +// information. Code badCode is an end marker +// +// numLiterals The number of literals in literalEncoding +// numOffsets The number of offsets in offsetEncoding +// litenc, offenc The literal and offset encoder to use +func (w *huffmanBitWriter) generateCodegen(numLiterals int, numOffsets int, litEnc, offEnc *huffmanEncoder) { + for i := range w.codegenFreq { + w.codegenFreq[i] = 0 + } + // Note that we are using codegen both as a temporary variable for holding + // a copy of the frequencies, and as the place where we put the result. + // This is fine because the output is always shorter than the input used + // so far. + codegen := w.codegen // cache + // Copy the concatenated code sizes to codegen. Put a marker at the end. + cgnl := codegen[:numLiterals] + for i := range cgnl { + cgnl[i] = uint8(litEnc.codes[i].len) + } + + cgnl = codegen[numLiterals : numLiterals+numOffsets] + for i := range cgnl { + cgnl[i] = uint8(offEnc.codes[i].len) + } + codegen[numLiterals+numOffsets] = badCode + + size := codegen[0] + count := 1 + outIndex := 0 + for inIndex := 1; size != badCode; inIndex++ { + // INVARIANT: We have seen "count" copies of size that have not yet + // had output generated for them. + nextSize := codegen[inIndex] + if nextSize == size { + count++ + continue + } + // We need to generate codegen indicating "count" of size. + if size != 0 { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + count-- + for count >= 3 { + n := 6 + if n > count { + n = count + } + codegen[outIndex] = 16 + outIndex++ + codegen[outIndex] = uint8(n - 3) + outIndex++ + w.codegenFreq[16]++ + count -= n + } + } else { + for count >= 11 { + n := 138 + if n > count { + n = count + } + codegen[outIndex] = 18 + outIndex++ + codegen[outIndex] = uint8(n - 11) + outIndex++ + w.codegenFreq[18]++ + count -= n + } + if count >= 3 { + // count >= 3 && count <= 10 + codegen[outIndex] = 17 + outIndex++ + codegen[outIndex] = uint8(count - 3) + outIndex++ + w.codegenFreq[17]++ + count = 0 + } + } + count-- + for ; count >= 0; count-- { + codegen[outIndex] = size + outIndex++ + w.codegenFreq[size]++ + } + // Set up invariant for next time through the loop. + size = nextSize + count = 1 + } + // Marker indicating the end of the codegen. + codegen[outIndex] = badCode +} + +// dynamicSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) dynamicSize(litEnc, offEnc *huffmanEncoder, extraBits int) (size, numCodegens int) { + numCodegens = len(w.codegenFreq) + for numCodegens > 4 && w.codegenFreq[codegenOrder[numCodegens-1]] == 0 { + numCodegens-- + } + header := 3 + 5 + 5 + 4 + (3 * numCodegens) + + w.codegenEncoding.bitLength(w.codegenFreq[:]) + + int(w.codegenFreq[16])*2 + + int(w.codegenFreq[17])*3 + + int(w.codegenFreq[18])*7 + size = header + + litEnc.bitLength(w.literalFreq) + + offEnc.bitLength(w.offsetFreq) + + extraBits + + return size, numCodegens +} + +// fixedSize returns the size of dynamically encoded data in bits. +func (w *huffmanBitWriter) fixedSize(extraBits int) int { + return 3 + + fixedLiteralEncoding.bitLength(w.literalFreq) + + fixedOffsetEncoding.bitLength(w.offsetFreq) + + extraBits +} + +// storedSize calculates the stored size, including header. +// The function returns the size in bits and whether the block +// fits inside a single block. +func (w *huffmanBitWriter) storedSize(in []byte) (int, bool) { + if in == nil { + return 0, false + } + if len(in) <= maxStoreBlockSize { + return (len(in) + 5) * 8, true + } + return 0, false +} + +func (w *huffmanBitWriter) writeCode(c hcode) { + if w.err != nil { + return + } + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits >= 48 { + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + n := w.nbytes + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n >= bufferFlushSize { + w.write(w.bytes[:n]) + n = 0 + } + w.nbytes = n + } +} + +// Write the header of a dynamic Huffman block to the output stream. +// +// numLiterals The number of literals specified in codegen +// numOffsets The number of offsets specified in codegen +// numCodegens The number of codegens used in codegen +func (w *huffmanBitWriter) writeDynamicHeader(numLiterals int, numOffsets int, numCodegens int, isEof bool) { + if w.err != nil { + return + } + var firstBits int32 = 4 + if isEof { + firstBits = 5 + } + w.writeBits(firstBits, 3) + w.writeBits(int32(numLiterals-257), 5) + w.writeBits(int32(numOffsets-1), 5) + w.writeBits(int32(numCodegens-4), 4) + + for i := 0; i < numCodegens; i++ { + value := uint(w.codegenEncoding.codes[codegenOrder[i]].len) + w.writeBits(int32(value), 3) + } + + i := 0 + for { + var codeWord int = int(w.codegen[i]) + i++ + if codeWord == badCode { + break + } + w.writeCode(w.codegenEncoding.codes[uint32(codeWord)]) + + switch codeWord { + case 16: + w.writeBits(int32(w.codegen[i]), 2) + i++ + break + case 17: + w.writeBits(int32(w.codegen[i]), 3) + i++ + break + case 18: + w.writeBits(int32(w.codegen[i]), 7) + i++ + break + } + } +} + +func (w *huffmanBitWriter) writeStoredHeader(length int, isEof bool) { + if w.err != nil { + return + } + var flag int32 + if isEof { + flag = 1 + } + w.writeBits(flag, 3) + w.flush() + w.writeBits(int32(length), 16) + w.writeBits(int32(^uint16(length)), 16) +} + +func (w *huffmanBitWriter) writeFixedHeader(isEof bool) { + if w.err != nil { + return + } + // Indicate that we are a fixed Huffman block + var value int32 = 2 + if isEof { + value = 3 + } + w.writeBits(value, 3) +} + +// writeBlock will write a block of tokens with the smallest encoding. +// The original input can be supplied, and if the huffman encoded data +// is larger than the original bytes, the data will be written as a +// stored block. +// If the input is nil, the tokens will always be Huffman encoded. +func (w *huffmanBitWriter) writeBlock(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + var extraBits int + storedSize, storable := w.storedSize(input) + if storable { + // We only bother calculating the costs of the extra bits required by + // the length of offset fields (which will be the same for both fixed + // and dynamic encoding), if we need to compare those two encodings + // against stored encoding. + for lengthCode := lengthCodesStart + 8; lengthCode < numLiterals; lengthCode++ { + // First eight length codes have extra size = 0. + extraBits += int(w.literalFreq[lengthCode]) * int(lengthExtraBits[lengthCode-lengthCodesStart]) + } + for offsetCode := 4; offsetCode < numOffsets; offsetCode++ { + // First four offset codes have extra size = 0. + extraBits += int(w.offsetFreq[offsetCode]) * int(offsetExtraBits[offsetCode]) + } + } + + // Figure out smallest code. + // Fixed Huffman baseline. + var literalEncoding = fixedLiteralEncoding + var offsetEncoding = fixedOffsetEncoding + var size = w.fixedSize(extraBits) + + // Dynamic Huffman? + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + dynamicSize, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, extraBits) + + if dynamicSize < size { + size = dynamicSize + literalEncoding = w.literalEncoding + offsetEncoding = w.offsetEncoding + } + + // Stored bytes? + if storable && storedSize < size { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + if literalEncoding == fixedLiteralEncoding { + w.writeFixedHeader(eof) + } else { + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + } + + // Write the tokens. + w.writeTokens(tokens, literalEncoding.codes, offsetEncoding.codes) +} + +// writeBlockDynamic encodes a block using a dynamic Huffman table. +// This should be used if the symbols used have a disproportionate +// histogram distribution. +// If input is supplied and the compression savings are below 1/16th of the +// input size the block is stored. +func (w *huffmanBitWriter) writeBlockDynamic(tokens []token, eof bool, input []byte) { + if w.err != nil { + return + } + + tokens = append(tokens, endBlockMarker) + numLiterals, numOffsets := w.indexTokens(tokens) + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, w.offsetEncoding) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, w.offsetEncoding, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Write Huffman table. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + + // Write the tokens. + w.writeTokens(tokens, w.literalEncoding.codes, w.offsetEncoding.codes) +} + +// indexTokens indexes a slice of tokens, and updates +// literalFreq and offsetFreq, and generates literalEncoding +// and offsetEncoding. +// The number of literal and offset tokens is returned. +func (w *huffmanBitWriter) indexTokens(tokens []token) (numLiterals, numOffsets int) { + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + for i := range w.offsetFreq { + w.offsetFreq[i] = 0 + } + + for _, t := range tokens { + if t < matchType { + w.literalFreq[t.literal()]++ + continue + } + length := t.length() + offset := t.offset() + w.literalFreq[lengthCodesStart+lengthCode(length)]++ + w.offsetFreq[offsetCode(offset)]++ + } + + // get the number of literals + numLiterals = len(w.literalFreq) + for w.literalFreq[numLiterals-1] == 0 { + numLiterals-- + } + // get the number of offsets + numOffsets = len(w.offsetFreq) + for numOffsets > 0 && w.offsetFreq[numOffsets-1] == 0 { + numOffsets-- + } + if numOffsets == 0 { + // We haven't found a single match. If we want to go with the dynamic encoding, + // we should count at least one offset to be sure that the offset huffman tree could be encoded. + w.offsetFreq[0] = 1 + numOffsets = 1 + } + w.literalEncoding.generate(w.literalFreq, 15) + w.offsetEncoding.generate(w.offsetFreq, 15) + return +} + +// writeTokens writes a slice of tokens to the output. +// codes for literal and offset encoding must be supplied. +func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) { + if w.err != nil { + return + } + for _, t := range tokens { + if t < matchType { + w.writeCode(leCodes[t.literal()]) + continue + } + // Write the length + length := t.length() + lengthCode := lengthCode(length) + w.writeCode(leCodes[lengthCode+lengthCodesStart]) + extraLengthBits := uint(lengthExtraBits[lengthCode]) + if extraLengthBits > 0 { + extraLength := int32(length - lengthBase[lengthCode]) + w.writeBits(extraLength, extraLengthBits) + } + // Write the offset + offset := t.offset() + offsetCode := offsetCode(offset) + w.writeCode(oeCodes[offsetCode]) + extraOffsetBits := uint(offsetExtraBits[offsetCode]) + if extraOffsetBits > 0 { + extraOffset := int32(offset - offsetBase[offsetCode]) + w.writeBits(extraOffset, extraOffsetBits) + } + } +} + +// huffOffset is a static offset encoder used for huffman only encoding. +// It can be reused since we will not be encoding offset values. +var huffOffset *huffmanEncoder + +func init() { + w := newHuffmanBitWriter(nil) + w.offsetFreq[0] = 1 + huffOffset = newHuffmanEncoder(offsetCodeCount) + huffOffset.generate(w.offsetFreq, 15) +} + +// writeBlockHuff encodes a block of bytes as either +// Huffman encoded literals or uncompressed bytes if the +// results only gains very little from compression. +func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte) { + if w.err != nil { + return + } + + // Clear histogram + for i := range w.literalFreq { + w.literalFreq[i] = 0 + } + + // Add everything as literals + histogram(input, w.literalFreq) + + w.literalFreq[endBlockMarker] = 1 + + const numLiterals = endBlockMarker + 1 + const numOffsets = 1 + + w.literalEncoding.generate(w.literalFreq, 15) + + // Figure out smallest code. + // Always use dynamic Huffman or Store + var numCodegens int + + // Generate codegen and codegenFrequencies, which indicates how to encode + // the literalEncoding and the offsetEncoding. + w.generateCodegen(numLiterals, numOffsets, w.literalEncoding, huffOffset) + w.codegenEncoding.generate(w.codegenFreq[:], 7) + size, numCodegens := w.dynamicSize(w.literalEncoding, huffOffset, 0) + + // Store bytes, if we don't get a reasonable improvement. + if ssize, storable := w.storedSize(input); storable && ssize < (size+size>>4) { + w.writeStoredHeader(len(input), eof) + w.writeBytes(input) + return + } + + // Huffman. + w.writeDynamicHeader(numLiterals, numOffsets, numCodegens, eof) + encoding := w.literalEncoding.codes[:257] + n := w.nbytes + for _, t := range input { + // Bitwriting inlined, ~30% speedup + c := encoding[t] + w.bits |= uint64(c.code) << w.nbits + w.nbits += uint(c.len) + if w.nbits < 48 { + continue + } + // Store 6 bytes + bits := w.bits + w.bits >>= 48 + w.nbits -= 48 + bytes := w.bytes[n : n+6] + bytes[0] = byte(bits) + bytes[1] = byte(bits >> 8) + bytes[2] = byte(bits >> 16) + bytes[3] = byte(bits >> 24) + bytes[4] = byte(bits >> 32) + bytes[5] = byte(bits >> 40) + n += 6 + if n < bufferFlushSize { + continue + } + w.write(w.bytes[:n]) + if w.err != nil { + return // Return early in the event of write failures + } + n = 0 + } + w.nbytes = n + w.writeCode(encoding[endBlockMarker]) +} diff --git a/vendor/github.com/klauspost/compress/flate/huffman_code.go b/vendor/github.com/klauspost/compress/flate/huffman_code.go new file mode 100644 index 000000000..bdcbd823b --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/huffman_code.go @@ -0,0 +1,344 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import ( + "math" + "sort" +) + +// hcode is a huffman code with a bit code and bit length. +type hcode struct { + code, len uint16 +} + +type huffmanEncoder struct { + codes []hcode + freqcache []literalNode + bitCount [17]int32 + lns byLiteral // stored to avoid repeated allocation in generate + lfs byFreq // stored to avoid repeated allocation in generate +} + +type literalNode struct { + literal uint16 + freq int32 +} + +// A levelInfo describes the state of the constructed tree for a given depth. +type levelInfo struct { + // Our level. for better printing + level int32 + + // The frequency of the last node at this level + lastFreq int32 + + // The frequency of the next character to add to this level + nextCharFreq int32 + + // The frequency of the next pair (from level below) to add to this level. + // Only valid if the "needed" value of the next lower level is 0. + nextPairFreq int32 + + // The number of chains remaining to generate for this level before moving + // up to the next level + needed int32 +} + +// set sets the code and length of an hcode. +func (h *hcode) set(code uint16, length uint16) { + h.len = length + h.code = code +} + +func maxNode() literalNode { return literalNode{math.MaxUint16, math.MaxInt32} } + +func newHuffmanEncoder(size int) *huffmanEncoder { + return &huffmanEncoder{codes: make([]hcode, size)} +} + +// Generates a HuffmanCode corresponding to the fixed literal table +func generateFixedLiteralEncoding() *huffmanEncoder { + h := newHuffmanEncoder(maxNumLit) + codes := h.codes + var ch uint16 + for ch = 0; ch < maxNumLit; ch++ { + var bits uint16 + var size uint16 + switch { + case ch < 144: + // size 8, 000110000 .. 10111111 + bits = ch + 48 + size = 8 + break + case ch < 256: + // size 9, 110010000 .. 111111111 + bits = ch + 400 - 144 + size = 9 + break + case ch < 280: + // size 7, 0000000 .. 0010111 + bits = ch - 256 + size = 7 + break + default: + // size 8, 11000000 .. 11000111 + bits = ch + 192 - 280 + size = 8 + } + codes[ch] = hcode{code: reverseBits(bits, byte(size)), len: size} + } + return h +} + +func generateFixedOffsetEncoding() *huffmanEncoder { + h := newHuffmanEncoder(30) + codes := h.codes + for ch := range codes { + codes[ch] = hcode{code: reverseBits(uint16(ch), 5), len: 5} + } + return h +} + +var fixedLiteralEncoding *huffmanEncoder = generateFixedLiteralEncoding() +var fixedOffsetEncoding *huffmanEncoder = generateFixedOffsetEncoding() + +func (h *huffmanEncoder) bitLength(freq []int32) int { + var total int + for i, f := range freq { + if f != 0 { + total += int(f) * int(h.codes[i].len) + } + } + return total +} + +const maxBitsLimit = 16 + +// Return the number of literals assigned to each bit size in the Huffman encoding +// +// This method is only called when list.length >= 3 +// The cases of 0, 1, and 2 literals are handled by special case code. +// +// list An array of the literals with non-zero frequencies +// and their associated frequencies. The array is in order of increasing +// frequency, and has as its last element a special element with frequency +// MaxInt32 +// maxBits The maximum number of bits that should be used to encode any literal. +// Must be less than 16. +// return An integer array in which array[i] indicates the number of literals +// that should be encoded in i bits. +func (h *huffmanEncoder) bitCounts(list []literalNode, maxBits int32) []int32 { + if maxBits >= maxBitsLimit { + panic("flate: maxBits too large") + } + n := int32(len(list)) + list = list[0 : n+1] + list[n] = maxNode() + + // The tree can't have greater depth than n - 1, no matter what. This + // saves a little bit of work in some small cases + if maxBits > n-1 { + maxBits = n - 1 + } + + // Create information about each of the levels. + // A bogus "Level 0" whose sole purpose is so that + // level1.prev.needed==0. This makes level1.nextPairFreq + // be a legitimate value that never gets chosen. + var levels [maxBitsLimit]levelInfo + // leafCounts[i] counts the number of literals at the left + // of ancestors of the rightmost node at level i. + // leafCounts[i][j] is the number of literals at the left + // of the level j ancestor. + var leafCounts [maxBitsLimit][maxBitsLimit]int32 + + for level := int32(1); level <= maxBits; level++ { + // For every level, the first two items are the first two characters. + // We initialize the levels as if we had already figured this out. + levels[level] = levelInfo{ + level: level, + lastFreq: list[1].freq, + nextCharFreq: list[2].freq, + nextPairFreq: list[0].freq + list[1].freq, + } + leafCounts[level][level] = 2 + if level == 1 { + levels[level].nextPairFreq = math.MaxInt32 + } + } + + // We need a total of 2*n - 2 items at top level and have already generated 2. + levels[maxBits].needed = 2*n - 4 + + level := maxBits + for { + l := &levels[level] + if l.nextPairFreq == math.MaxInt32 && l.nextCharFreq == math.MaxInt32 { + // We've run out of both leafs and pairs. + // End all calculations for this level. + // To make sure we never come back to this level or any lower level, + // set nextPairFreq impossibly large. + l.needed = 0 + levels[level+1].nextPairFreq = math.MaxInt32 + level++ + continue + } + + prevFreq := l.lastFreq + if l.nextCharFreq < l.nextPairFreq { + // The next item on this row is a leaf node. + n := leafCounts[level][level] + 1 + l.lastFreq = l.nextCharFreq + // Lower leafCounts are the same of the previous node. + leafCounts[level][level] = n + l.nextCharFreq = list[n].freq + } else { + // The next item on this row is a pair from the previous row. + // nextPairFreq isn't valid until we generate two + // more values in the level below + l.lastFreq = l.nextPairFreq + // Take leaf counts from the lower level, except counts[level] remains the same. + copy(leafCounts[level][:level], leafCounts[level-1][:level]) + levels[l.level-1].needed = 2 + } + + if l.needed--; l.needed == 0 { + // We've done everything we need to do for this level. + // Continue calculating one level up. Fill in nextPairFreq + // of that level with the sum of the two nodes we've just calculated on + // this level. + if l.level == maxBits { + // All done! + break + } + levels[l.level+1].nextPairFreq = prevFreq + l.lastFreq + level++ + } else { + // If we stole from below, move down temporarily to replenish it. + for levels[level-1].needed > 0 { + level-- + } + } + } + + // Somethings is wrong if at the end, the top level is null or hasn't used + // all of the leaves. + if leafCounts[maxBits][maxBits] != n { + panic("leafCounts[maxBits][maxBits] != n") + } + + bitCount := h.bitCount[:maxBits+1] + bits := 1 + counts := &leafCounts[maxBits] + for level := maxBits; level > 0; level-- { + // chain.leafCount gives the number of literals requiring at least "bits" + // bits to encode. + bitCount[bits] = counts[level] - counts[level-1] + bits++ + } + return bitCount +} + +// Look at the leaves and assign them a bit count and an encoding as specified +// in RFC 1951 3.2.2 +func (h *huffmanEncoder) assignEncodingAndSize(bitCount []int32, list []literalNode) { + code := uint16(0) + for n, bits := range bitCount { + code <<= 1 + if n == 0 || bits == 0 { + continue + } + // The literals list[len(list)-bits] .. list[len(list)-bits] + // are encoded using "bits" bits, and get the values + // code, code + 1, .... The code values are + // assigned in literal order (not frequency order). + chunk := list[len(list)-int(bits):] + + h.lns.sort(chunk) + for _, node := range chunk { + h.codes[node.literal] = hcode{code: reverseBits(code, uint8(n)), len: uint16(n)} + code++ + } + list = list[0 : len(list)-int(bits)] + } +} + +// Update this Huffman Code object to be the minimum code for the specified frequency count. +// +// freq An array of frequencies, in which frequency[i] gives the frequency of literal i. +// maxBits The maximum number of bits to use for any literal. +func (h *huffmanEncoder) generate(freq []int32, maxBits int32) { + if h.freqcache == nil { + // Allocate a reusable buffer with the longest possible frequency table. + // Possible lengths are codegenCodeCount, offsetCodeCount and maxNumLit. + // The largest of these is maxNumLit, so we allocate for that case. + h.freqcache = make([]literalNode, maxNumLit+1) + } + list := h.freqcache[:len(freq)+1] + // Number of non-zero literals + count := 0 + // Set list to be the set of all non-zero literals and their frequencies + for i, f := range freq { + if f != 0 { + list[count] = literalNode{uint16(i), f} + count++ + } else { + list[count] = literalNode{} + h.codes[i].len = 0 + } + } + list[len(freq)] = literalNode{} + + list = list[:count] + if count <= 2 { + // Handle the small cases here, because they are awkward for the general case code. With + // two or fewer literals, everything has bit length 1. + for i, node := range list { + // "list" is in order of increasing literal value. + h.codes[node.literal].set(uint16(i), 1) + } + return + } + h.lfs.sort(list) + + // Get the number of literals for each bit count + bitCount := h.bitCounts(list, maxBits) + // And do the assignment + h.assignEncodingAndSize(bitCount, list) +} + +type byLiteral []literalNode + +func (s *byLiteral) sort(a []literalNode) { + *s = byLiteral(a) + sort.Sort(s) +} + +func (s byLiteral) Len() int { return len(s) } + +func (s byLiteral) Less(i, j int) bool { + return s[i].literal < s[j].literal +} + +func (s byLiteral) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +type byFreq []literalNode + +func (s *byFreq) sort(a []literalNode) { + *s = byFreq(a) + sort.Sort(s) +} + +func (s byFreq) Len() int { return len(s) } + +func (s byFreq) Less(i, j int) bool { + if s[i].freq == s[j].freq { + return s[i].literal < s[j].literal + } + return s[i].freq < s[j].freq +} + +func (s byFreq) Swap(i, j int) { s[i], s[j] = s[j], s[i] } diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go new file mode 100644 index 000000000..075901b5f --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -0,0 +1,868 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flate implements the DEFLATE compressed data format, described in +// RFC 1951. The gzip and zlib packages implement access to DEFLATE-based file +// formats. +package flate + +import ( + "bufio" + "io" + "strconv" + "sync" +) + +const ( + maxCodeLen = 16 // max length of Huffman code + maxCodeLenMask = 15 // mask for max length of Huffman code + // The next three numbers come from the RFC section 3.2.7, with the + // additional proviso in section 3.2.5 which implies that distance codes + // 30 and 31 should never occur in compressed data. + maxNumLit = 286 + maxNumDist = 30 + numCodes = 19 // number of codes in Huffman meta-code +) + +// Initialize the fixedHuffmanDecoder only once upon first use. +var fixedOnce sync.Once +var fixedHuffmanDecoder huffmanDecoder + +// A CorruptInputError reports the presence of corrupt input at a given offset. +type CorruptInputError int64 + +func (e CorruptInputError) Error() string { + return "flate: corrupt input before offset " + strconv.FormatInt(int64(e), 10) +} + +// An InternalError reports an error in the flate code itself. +type InternalError string + +func (e InternalError) Error() string { return "flate: internal error: " + string(e) } + +// A ReadError reports an error encountered while reading input. +// +// Deprecated: No longer returned. +type ReadError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Read +} + +func (e *ReadError) Error() string { + return "flate: read error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// A WriteError reports an error encountered while writing output. +// +// Deprecated: No longer returned. +type WriteError struct { + Offset int64 // byte offset where error occurred + Err error // error returned by underlying Write +} + +func (e *WriteError) Error() string { + return "flate: write error at offset " + strconv.FormatInt(e.Offset, 10) + ": " + e.Err.Error() +} + +// Resetter resets a ReadCloser returned by NewReader or NewReaderDict to +// to switch to a new underlying Reader. This permits reusing a ReadCloser +// instead of allocating a new one. +type Resetter interface { + // Reset discards any buffered data and resets the Resetter as if it was + // newly initialized with the given reader. + Reset(r io.Reader, dict []byte) error +} + +// The data structure for decoding Huffman tables is based on that of +// zlib. There is a lookup table of a fixed bit width (huffmanChunkBits), +// For codes smaller than the table width, there are multiple entries +// (each combination of trailing bits has the same value). For codes +// larger than the table width, the table contains a link to an overflow +// table. The width of each entry in the link table is the maximum code +// size minus the chunk width. +// +// Note that you can do a lookup in the table even without all bits +// filled. Since the extra bits are zero, and the DEFLATE Huffman codes +// have the property that shorter codes come before longer ones, the +// bit length estimate in the result is a lower bound on the actual +// number of bits. +// +// See the following: +// http://www.gzip.org/algorithm.txt + +// chunk & 15 is number of bits +// chunk >> 4 is value, including table link + +const ( + huffmanChunkBits = 9 + huffmanNumChunks = 1 << huffmanChunkBits + huffmanCountMask = 15 + huffmanValueShift = 4 +) + +type huffmanDecoder struct { + min int // the minimum code length + chunks *[huffmanNumChunks]uint32 // chunks as described above + links [][]uint32 // overflow links + linkMask uint32 // mask the width of the link table +} + +// Initialize Huffman decoding tables from array of code lengths. +// Following this function, h is guaranteed to be initialized into a complete +// tree (i.e., neither over-subscribed nor under-subscribed). The exception is a +// degenerate case where the tree has only a single symbol with length 1. Empty +// trees are permitted. +func (h *huffmanDecoder) init(bits []int) bool { + // Sanity enables additional runtime tests during Huffman + // table construction. It's intended to be used during + // development to supplement the currently ad-hoc unit tests. + const sanity = false + + if h.chunks == nil { + h.chunks = &[huffmanNumChunks]uint32{} + } + if h.min != 0 { + *h = huffmanDecoder{chunks: h.chunks, links: h.links} + } + + // Count number of codes of each length, + // compute min and max length. + var count [maxCodeLen]int + var min, max int + for _, n := range bits { + if n == 0 { + continue + } + if min == 0 || n < min { + min = n + } + if n > max { + max = n + } + count[n&maxCodeLenMask]++ + } + + // Empty tree. The decompressor.huffSym function will fail later if the tree + // is used. Technically, an empty tree is only valid for the HDIST tree and + // not the HCLEN and HLIT tree. However, a stream with an empty HCLEN tree + // is guaranteed to fail since it will attempt to use the tree to decode the + // codes for the HLIT and HDIST trees. Similarly, an empty HLIT tree is + // guaranteed to fail later since the compressed data section must be + // composed of at least one symbol (the end-of-block marker). + if max == 0 { + return true + } + + code := 0 + var nextcode [maxCodeLen]int + for i := min; i <= max; i++ { + code <<= 1 + nextcode[i&maxCodeLenMask] = code + code += count[i&maxCodeLenMask] + } + + // Check that the coding is complete (i.e., that we've + // assigned all 2-to-the-max possible bit sequences). + // Exception: To be compatible with zlib, we also need to + // accept degenerate single-code codings. See also + // TestDegenerateHuffmanCoding. + if code != 1< huffmanChunkBits { + numLinks := 1 << (uint(max) - huffmanChunkBits) + h.linkMask = uint32(numLinks - 1) + + // create link tables + link := nextcode[huffmanChunkBits+1] >> 1 + if cap(h.links) < huffmanNumChunks-link { + h.links = make([][]uint32, huffmanNumChunks-link) + } else { + h.links = h.links[:huffmanNumChunks-link] + } + for j := uint(link); j < huffmanNumChunks; j++ { + reverse := int(reverseByte[j>>8]) | int(reverseByte[j&0xff])<<8 + reverse >>= uint(16 - huffmanChunkBits) + off := j - uint(link) + if sanity && h.chunks[reverse] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[reverse] = uint32(off<>8]) | int(reverseByte[code&0xff])<<8 + reverse >>= uint(16 - n) + if n <= huffmanChunkBits { + for off := reverse; off < len(h.chunks); off += 1 << uint(n) { + // We should never need to overwrite + // an existing chunk. Also, 0 is + // never a valid chunk, because the + // lower 4 "count" bits should be + // between 1 and 15. + if sanity && h.chunks[off] != 0 { + panic("impossible: overwriting existing chunk") + } + h.chunks[off] = chunk + } + } else { + j := reverse & (huffmanNumChunks - 1) + if sanity && h.chunks[j]&huffmanCountMask != huffmanChunkBits+1 { + // Longer codes should have been + // associated with a link table above. + panic("impossible: not an indirect chunk") + } + value := h.chunks[j] >> huffmanValueShift + linktab := h.links[value] + reverse >>= huffmanChunkBits + for off := reverse; off < len(linktab); off += 1 << uint(n-huffmanChunkBits) { + if sanity && linktab[off] != 0 { + panic("impossible: overwriting existing chunk") + } + linktab[off] = chunk + } + } + } + + if sanity { + // Above we've sanity checked that we never overwrote + // an existing entry. Here we additionally check that + // we filled the tables completely. + for i, chunk := range h.chunks { + if chunk == 0 { + // As an exception, in the degenerate + // single-code case, we allow odd + // chunks to be missing. + if code == 1 && i%2 == 1 { + continue + } + panic("impossible: missing chunk") + } + } + for _, linktab := range h.links { + for _, chunk := range linktab { + if chunk == 0 { + panic("impossible: missing chunk") + } + } + } + } + + return true +} + +// The actual read interface needed by NewReader. +// If the passed in io.Reader does not also have ReadByte, +// the NewReader will introduce its own buffering. +type Reader interface { + io.Reader + io.ByteReader +} + +// Decompress state. +type decompressor struct { + // Input source. + r Reader + roffset int64 + + // Input bits, in top of b. + b uint32 + nb uint + + // Huffman decoders for literal/length, distance. + h1, h2 huffmanDecoder + + // Length arrays used to define Huffman codes. + bits *[maxNumLit + maxNumDist]int + codebits *[numCodes]int + + // Output history, buffer. + dict dictDecoder + + // Temporary buffer (avoids repeated allocation). + buf [4]byte + + // Next step in the decompression, + // and decompression state. + step func(*decompressor) + stepState int + final bool + err error + toRead []byte + hl, hd *huffmanDecoder + copyLen int + copyDist int +} + +func (f *decompressor) nextBlock() { + for f.nb < 1+2 { + if f.err = f.moreBits(); f.err != nil { + return + } + } + f.final = f.b&1 == 1 + f.b >>= 1 + typ := f.b & 3 + f.b >>= 2 + f.nb -= 1 + 2 + switch typ { + case 0: + f.dataBlock() + case 1: + // compressed, fixed Huffman tables + f.hl = &fixedHuffmanDecoder + f.hd = nil + f.huffmanBlock() + case 2: + // compressed, dynamic Huffman tables + if f.err = f.readHuffman(); f.err != nil { + break + } + f.hl = &f.h1 + f.hd = &f.h2 + f.huffmanBlock() + default: + // 3 is reserved. + f.err = CorruptInputError(f.roffset) + } +} + +func (f *decompressor) Read(b []byte) (int, error) { + for { + if len(f.toRead) > 0 { + n := copy(b, f.toRead) + f.toRead = f.toRead[n:] + if len(f.toRead) == 0 { + return n, f.err + } + return n, nil + } + if f.err != nil { + return 0, f.err + } + f.step(f) + if f.err != nil && len(f.toRead) == 0 { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + } + } +} + +// Support the io.WriteTo interface for io.Copy and friends. +func (f *decompressor) WriteTo(w io.Writer) (int64, error) { + total := int64(0) + flushed := false + for { + if len(f.toRead) > 0 { + n, err := w.Write(f.toRead) + total += int64(n) + if err != nil { + f.err = err + return total, err + } + if n != len(f.toRead) { + return total, io.ErrShortWrite + } + f.toRead = f.toRead[:0] + } + if f.err != nil && flushed { + if f.err == io.EOF { + return total, nil + } + return total, f.err + } + if f.err == nil { + f.step(f) + } + if len(f.toRead) == 0 && f.err != nil && !flushed { + f.toRead = f.dict.readFlush() // Flush what's left in case of error + flushed = true + } + } +} + +func (f *decompressor) Close() error { + if f.err == io.EOF { + return nil + } + return f.err +} + +// RFC 1951 section 3.2.7. +// Compression with dynamic Huffman codes + +var codeOrder = [...]int{16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15} + +func (f *decompressor) readHuffman() error { + // HLIT[5], HDIST[5], HCLEN[4]. + for f.nb < 5+5+4 { + if err := f.moreBits(); err != nil { + return err + } + } + nlit := int(f.b&0x1F) + 257 + if nlit > maxNumLit { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + ndist := int(f.b&0x1F) + 1 + if ndist > maxNumDist { + return CorruptInputError(f.roffset) + } + f.b >>= 5 + nclen := int(f.b&0xF) + 4 + // numCodes is 19, so nclen is always valid. + f.b >>= 4 + f.nb -= 5 + 5 + 4 + + // (HCLEN+4)*3 bits: code lengths in the magic codeOrder order. + for i := 0; i < nclen; i++ { + for f.nb < 3 { + if err := f.moreBits(); err != nil { + return err + } + } + f.codebits[codeOrder[i]] = int(f.b & 0x7) + f.b >>= 3 + f.nb -= 3 + } + for i := nclen; i < len(codeOrder); i++ { + f.codebits[codeOrder[i]] = 0 + } + if !f.h1.init(f.codebits[0:]) { + return CorruptInputError(f.roffset) + } + + // HLIT + 257 code lengths, HDIST + 1 code lengths, + // using the code length Huffman code. + for i, n := 0, nlit+ndist; i < n; { + x, err := f.huffSym(&f.h1) + if err != nil { + return err + } + if x < 16 { + // Actual length. + f.bits[i] = x + i++ + continue + } + // Repeat previous length or zero. + var rep int + var nb uint + var b int + switch x { + default: + return InternalError("unexpected length code") + case 16: + rep = 3 + nb = 2 + if i == 0 { + return CorruptInputError(f.roffset) + } + b = f.bits[i-1] + case 17: + rep = 3 + nb = 3 + b = 0 + case 18: + rep = 11 + nb = 7 + b = 0 + } + for f.nb < nb { + if err := f.moreBits(); err != nil { + return err + } + } + rep += int(f.b & uint32(1<>= nb + f.nb -= nb + if i+rep > n { + return CorruptInputError(f.roffset) + } + for j := 0; j < rep; j++ { + f.bits[i] = b + i++ + } + } + + if !f.h1.init(f.bits[0:nlit]) || !f.h2.init(f.bits[nlit:nlit+ndist]) { + return CorruptInputError(f.roffset) + } + + // As an optimization, we can initialize the min bits to read at a time + // for the HLIT tree to the length of the EOB marker since we know that + // every block must terminate with one. This preserves the property that + // we never read any extra bytes after the end of the DEFLATE stream. + if f.h1.min < f.bits[endBlockMarker] { + f.h1.min = f.bits[endBlockMarker] + } + + return nil +} + +// Decode a single Huffman block from f. +// hl and hd are the Huffman states for the lit/length values +// and the distance values, respectively. If hd == nil, using the +// fixed distance encoding associated with fixed Huffman blocks. +func (f *decompressor) huffmanBlock() { + const ( + stateInit = iota // Zero value must be stateInit + stateDict + ) + + switch f.stepState { + case stateInit: + goto readLiteral + case stateDict: + goto copyHistory + } + +readLiteral: + // Read literal and/or (length, distance) according to RFC section 3.2.3. + { + v, err := f.huffSym(f.hl) + if err != nil { + f.err = err + return + } + var n uint // number of bits extra + var length int + switch { + case v < 256: + f.dict.writeByte(byte(v)) + if f.dict.availWrite() == 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock + f.stepState = stateInit + return + } + goto readLiteral + case v == 256: + f.finishBlock() + return + // otherwise, reference to older data + case v < 265: + length = v - (257 - 3) + n = 0 + case v < 269: + length = v*2 - (265*2 - 11) + n = 1 + case v < 273: + length = v*4 - (269*4 - 19) + n = 2 + case v < 277: + length = v*8 - (273*8 - 35) + n = 3 + case v < 281: + length = v*16 - (277*16 - 67) + n = 4 + case v < 285: + length = v*32 - (281*32 - 131) + n = 5 + case v < maxNumLit: + length = 258 + n = 0 + default: + f.err = CorruptInputError(f.roffset) + return + } + if n > 0 { + for f.nb < n { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + length += int(f.b & uint32(1<>= n + f.nb -= n + } + + var dist int + if f.hd == nil { + for f.nb < 5 { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + dist = int(reverseByte[(f.b&0x1F)<<3]) + f.b >>= 5 + f.nb -= 5 + } else { + if dist, err = f.huffSym(f.hd); err != nil { + f.err = err + return + } + } + + switch { + case dist < 4: + dist++ + case dist < maxNumDist: + nb := uint(dist-2) >> 1 + // have 1 bit in bottom of dist, need nb more. + extra := (dist & 1) << nb + for f.nb < nb { + if err = f.moreBits(); err != nil { + f.err = err + return + } + } + extra |= int(f.b & uint32(1<>= nb + f.nb -= nb + dist = 1<<(nb+1) + 1 + extra + default: + f.err = CorruptInputError(f.roffset) + return + } + + // No check on length; encoding can be prescient. + if dist > f.dict.histSize() { + f.err = CorruptInputError(f.roffset) + return + } + + f.copyLen, f.copyDist = length, dist + goto copyHistory + } + +copyHistory: + // Perform a backwards copy according to RFC section 3.2.3. + { + cnt := f.dict.tryWriteCopy(f.copyDist, f.copyLen) + if cnt == 0 { + cnt = f.dict.writeCopy(f.copyDist, f.copyLen) + } + f.copyLen -= cnt + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).huffmanBlock // We need to continue this work + f.stepState = stateDict + return + } + goto readLiteral + } +} + +// Copy a single uncompressed data block from input to output. +func (f *decompressor) dataBlock() { + // Uncompressed. + // Discard current half-byte. + f.nb = 0 + f.b = 0 + + // Length then ones-complement of length. + nr, err := io.ReadFull(f.r, f.buf[0:4]) + f.roffset += int64(nr) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + f.err = err + return + } + n := int(f.buf[0]) | int(f.buf[1])<<8 + nn := int(f.buf[2]) | int(f.buf[3])<<8 + if uint16(nn) != uint16(^n) { + f.err = CorruptInputError(f.roffset) + return + } + + if n == 0 { + f.toRead = f.dict.readFlush() + f.finishBlock() + return + } + + f.copyLen = n + f.copyData() +} + +// copyData copies f.copyLen bytes from the underlying reader into f.hist. +// It pauses for reads when f.hist is full. +func (f *decompressor) copyData() { + buf := f.dict.writeSlice() + if len(buf) > f.copyLen { + buf = buf[:f.copyLen] + } + + cnt, err := io.ReadFull(f.r, buf) + f.roffset += int64(cnt) + f.copyLen -= cnt + f.dict.writeMark(cnt) + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + f.err = err + return + } + + if f.dict.availWrite() == 0 || f.copyLen > 0 { + f.toRead = f.dict.readFlush() + f.step = (*decompressor).copyData + return + } + f.finishBlock() +} + +func (f *decompressor) finishBlock() { + if f.final { + if f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() + } + f.err = io.EOF + } + f.step = (*decompressor).nextBlock +} + +func (f *decompressor) moreBits() error { + c, err := f.r.ReadByte() + if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return err + } + f.roffset++ + f.b |= uint32(c) << f.nb + f.nb += 8 + return nil +} + +// Read the next Huffman-encoded symbol from f according to h. +func (f *decompressor) huffSym(h *huffmanDecoder) (int, error) { + // Since a huffmanDecoder can be empty or be composed of a degenerate tree + // with single element, huffSym must error on these two edge cases. In both + // cases, the chunks slice will be 0 for the invalid sequence, leading it + // satisfy the n == 0 check below. + n := uint(h.min) + for { + for f.nb < n { + if err := f.moreBits(); err != nil { + return 0, err + } + } + chunk := h.chunks[f.b&(huffmanNumChunks-1)] + n = uint(chunk & huffmanCountMask) + if n > huffmanChunkBits { + chunk = h.links[chunk>>huffmanValueShift][(f.b>>huffmanChunkBits)&h.linkMask] + n = uint(chunk & huffmanCountMask) + } + if n <= f.nb { + if n == 0 { + f.err = CorruptInputError(f.roffset) + return 0, f.err + } + f.b >>= n + f.nb -= n + return int(chunk >> huffmanValueShift), nil + } + } +} + +func makeReader(r io.Reader) Reader { + if rr, ok := r.(Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +func fixedHuffmanDecoderInit() { + fixedOnce.Do(func() { + // These come from the RFC section 3.2.6. + var bits [288]int + for i := 0; i < 144; i++ { + bits[i] = 8 + } + for i := 144; i < 256; i++ { + bits[i] = 9 + } + for i := 256; i < 280; i++ { + bits[i] = 7 + } + for i := 280; i < 288; i++ { + bits[i] = 8 + } + fixedHuffmanDecoder.init(bits[:]) + }) +} + +func (f *decompressor) Reset(r io.Reader, dict []byte) error { + *f = decompressor{ + r: makeReader(r), + bits: f.bits, + codebits: f.codebits, + h1: f.h1, + h2: f.h2, + dict: f.dict, + step: (*decompressor).nextBlock, + } + f.dict.init(maxMatchOffset, dict) + return nil +} + +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, nil) + return &f +} + +// NewReaderDict is like NewReader but initializes the reader +// with a preset dictionary. The returned Reader behaves as if +// the uncompressed data stream started with the given dictionary, +// which has already been read. NewReaderDict is typically used +// to read data compressed by NewWriterDict. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { + fixedHuffmanDecoderInit() + + var f decompressor + f.r = makeReader(r) + f.bits = new([maxNumLit + maxNumDist]int) + f.codebits = new([numCodes]int) + f.step = (*decompressor).nextBlock + f.dict.init(maxMatchOffset, dict) + return &f +} diff --git a/vendor/github.com/klauspost/compress/flate/reverse_bits.go b/vendor/github.com/klauspost/compress/flate/reverse_bits.go new file mode 100644 index 000000000..c1a02720d --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/reverse_bits.go @@ -0,0 +1,48 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +var reverseByte = [256]byte{ + 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, + 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, + 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, + 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, + 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, + 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, + 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, + 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, + 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, + 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, + 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, + 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, + 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, + 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, + 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, + 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, + 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, + 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, + 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, + 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, + 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, + 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, + 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, + 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, + 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, + 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, + 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, + 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, + 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, + 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, + 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, + 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, +} + +func reverseUint16(v uint16) uint16 { + return uint16(reverseByte[v>>8]) | uint16(reverseByte[v&0xFF])<<8 +} + +func reverseBits(number uint16, bitLength byte) uint16 { + return reverseUint16(number << uint8(16-bitLength)) +} diff --git a/vendor/github.com/klauspost/compress/flate/snappy.go b/vendor/github.com/klauspost/compress/flate/snappy.go new file mode 100644 index 000000000..d853320a7 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/snappy.go @@ -0,0 +1,900 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Modified for deflate by Klaus Post (c) 2015. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +// emitLiteral writes a literal chunk and returns the number of bytes written. +func emitLiteral(dst *tokens, lit []byte) { + ol := int(dst.n) + for i, v := range lit { + dst.tokens[(i+ol)&maxStoreBlockSize] = token(v) + } + dst.n += uint16(len(lit)) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +func emitCopy(dst *tokens, offset, length int) { + dst.tokens[dst.n] = matchToken(uint32(length-3), uint32(offset-minOffsetSize)) + dst.n++ +} + +type snappyEnc interface { + Encode(dst *tokens, src []byte) + Reset() +} + +func newSnappy(level int) snappyEnc { + switch level { + case 1: + return &snappyL1{} + case 2: + return &snappyL2{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 3: + return &snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}} + case 4: + return &snappyL4{snappyL3{snappyGen: snappyGen{cur: maxStoreBlockSize, prev: make([]byte, 0, maxStoreBlockSize)}}} + default: + panic("invalid level specified") + } +} + +const ( + tableBits = 14 // Bits used in the table + tableSize = 1 << tableBits // Size of the table + tableMask = tableSize - 1 // Mask for table indices. Redundant, but can eliminate bounds checks. + tableShift = 32 - tableBits // Right-shift to get the tableBits most significant bits of a uint32. + baseMatchOffset = 1 // The smallest match offset + baseMatchLength = 3 // The smallest match length per the RFC section 3.2.5 + maxMatchOffset = 1 << 15 // The largest match offset +) + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +func hash(u uint32) uint32 { + return (u * 0x1e35a7bd) >> tableShift +} + +// snappyL1 encapsulates level 1 compression +type snappyL1 struct{} + +func (e *snappyL1) Reset() {} + +func (e *snappyL1) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 16 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + return + } + + // Initialize the hash table. + // + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxStoreBlockSize and maxStoreBlockSize == 65535. + var table [tableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s)) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS)) + if s-candidate <= maxMatchOffset && load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of Snappy's: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + s1 := base + maxMatchLength + if s1 > len(src) { + s1 = len(src) + } + a := src[s:s1] + b := src[candidate+4:] + b = b[:len(a)] + l := len(a) + for i := range a { + if a[i] != b[i] { + l = i + break + } + } + s += l + + // matchToken is flate's equivalent of Snappy's emitCopy. + dst.tokens[dst.n] = matchToken(uint32(s-base-baseMatchLength), uint32(base-candidate-baseMatchOffset)) + dst.n++ + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x >> 0)) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x >> 8)) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if s-candidate > maxMatchOffset || uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x >> 16)) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + emitLiteral(dst, src[nextEmit:]) + } +} + +type tableEntry struct { + val uint32 + offset int32 +} + +func load3232(b []byte, i int32) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load6432(b []byte, i int32) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyGen struct { + prev []byte + cur int32 +} + +// snappyGen maintains the table for matches, +// and the previous byte block for level 2. +// This is the generic implementation. +type snappyL2 struct { + snappyGen + table [tableSize]tableEntry +} + +// EncodeL2 uses a similar algorithm to level 1, but is capable +// of matching across blocks giving better compression at a small slowdown. +func (e *snappyL2) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntry{} + } + e.cur = maxStoreBlockSize + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntry{offset: s + e.cur, val: cv} + nextHash = hash(now) + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || cv != candidate.val { + // Out of range or not matched. + cv = now + continue + } + break + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + e.table[hash(cv)&tableMask] = tableEntry{offset: t + e.cur, val: cv} + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-1) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntry{offset: e.cur + s - 1, val: uint32(x)} + x >>= 8 + currHash := hash(uint32(x)) + candidate = e.table[currHash&tableMask] + e.table[currHash&tableMask] = tableEntry{offset: e.cur + s, val: uint32(x)} + + offset := s - (candidate.offset - e.cur) + if offset > maxMatchOffset || uint32(x) != candidate.val { + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +type tableEntryPrev struct { + Cur tableEntry + Prev tableEntry +} + +// snappyL3 +type snappyL3 struct { + snappyGen + table [tableSize]tableEntryPrev +} + +// Encode uses a similar algorithm to level 2, will check up to two candidates. +func (e *snappyL3) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 1 + minNonLiteralBlockSize = 1 + 1 + inputMargin + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +// snappyL4 +type snappyL4 struct { + snappyL3 +} + +// Encode uses a similar algorithm to level 3, +// but will check up to two candidates if first isn't long enough. +func (e *snappyL4) Encode(dst *tokens, src []byte) { + const ( + inputMargin = 8 - 3 + minNonLiteralBlockSize = 1 + 1 + inputMargin + matchLenGood = 12 + ) + + // Protect against e.cur wraparound. + if e.cur > 1<<30 { + for i := range e.table[:] { + e.table[i] = tableEntryPrev{} + } + e.snappyGen = snappyGen{cur: maxStoreBlockSize, prev: e.prev[:0]} + } + + // This check isn't in the Snappy implementation, but there, the caller + // instead of the callee handles this case. + if len(src) < minNonLiteralBlockSize { + // We do not fill the token table. + // This will be picked up by caller. + dst.n = uint16(len(src)) + e.cur += maxStoreBlockSize + e.prev = e.prev[:0] + return + } + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := int32(len(src) - inputMargin) + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := int32(0) + s := int32(0) + cv := load3232(src, s) + nextHash := hash(cv) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := int32(32) + + nextS := s + var candidate tableEntry + var candidateAlt tableEntry + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidates := e.table[nextHash&tableMask] + now := load3232(src, nextS) + e.table[nextHash&tableMask] = tableEntryPrev{Prev: candidates.Cur, Cur: tableEntry{offset: s + e.cur, val: cv}} + nextHash = hash(now) + + // Check both candidates + candidate = candidates.Cur + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset < maxMatchOffset { + candidateAlt = candidates.Prev + } + break + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset < maxMatchOffset { + break + } + } + } + cv = now + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + emitLiteral(dst, src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + + // Extend the 4-byte match as long as possible. + // + s += 4 + t := candidate.offset - e.cur + 4 + l := e.matchlen(s, t, src) + // Try alternative candidate if match length < matchLenGood. + if l < matchLenGood-4 && candidateAlt.offset != 0 { + t2 := candidateAlt.offset - e.cur + 4 + l2 := e.matchlen(s, t2, src) + if l2 > l { + l = l2 + t = t2 + } + } + // matchToken is flate's equivalent of Snappy's emitCopy. (length,offset) + dst.tokens[dst.n] = matchToken(uint32(l+4-baseMatchLength), uint32(s-t-baseMatchOffset)) + dst.n++ + s += l + nextEmit = s + if s >= sLimit { + t += l + // Index first pair after match end. + if int(t+4) < len(src) && t > 0 { + cv := load3232(src, t) + nextHash = hash(cv) + e.table[nextHash&tableMask] = tableEntryPrev{ + Prev: e.table[nextHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + t, val: cv}, + } + } + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-3 to s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load6432(src, s-3) + prevHash := hash(uint32(x)) + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 3, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 2, val: uint32(x)}, + } + x >>= 8 + prevHash = hash(uint32(x)) + + e.table[prevHash&tableMask] = tableEntryPrev{ + Prev: e.table[prevHash&tableMask].Cur, + Cur: tableEntry{offset: e.cur + s - 1, val: uint32(x)}, + } + x >>= 8 + currHash := hash(uint32(x)) + candidates := e.table[currHash&tableMask] + cv = uint32(x) + e.table[currHash&tableMask] = tableEntryPrev{ + Prev: candidates.Cur, + Cur: tableEntry{offset: s + e.cur, val: cv}, + } + + // Check both candidates + candidate = candidates.Cur + candidateAlt = tableEntry{} + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + offset = s - (candidates.Prev.offset - e.cur) + if cv == candidates.Prev.val && offset <= maxMatchOffset { + candidateAlt = candidates.Prev + } + continue + } + } else { + // We only check if value mismatches. + // Offset will always be invalid in other cases. + candidate = candidates.Prev + if cv == candidate.val { + offset := s - (candidate.offset - e.cur) + if offset <= maxMatchOffset { + continue + } + } + } + cv = uint32(x >> 8) + nextHash = hash(cv) + s++ + break + } + } + +emitRemainder: + if int(nextEmit) < len(src) { + emitLiteral(dst, src[nextEmit:]) + } + e.cur += int32(len(src)) + e.prev = e.prev[:len(src)] + copy(e.prev, src) +} + +func (e *snappyGen) matchlen(s, t int32, src []byte) int32 { + s1 := int(s) + maxMatchLength - 4 + if s1 > len(src) { + s1 = len(src) + } + + // If we are inside the current block + if t >= 0 { + b := src[t:] + a := src[s:s1] + b = b[:len(a)] + // Extend the match to be as long as possible. + for i := range a { + if a[i] != b[i] { + return int32(i) + } + } + return int32(len(a)) + } + + // We found a match in the previous block. + tp := int32(len(e.prev)) + t + if tp < 0 { + return 0 + } + + // Extend the match to be as long as possible. + a := src[s:s1] + b := e.prev[tp:] + if len(b) > len(a) { + b = b[:len(a)] + } + a = a[:len(b)] + for i := range b { + if a[i] != b[i] { + return int32(i) + } + } + + // If we reached our limit, we matched everything we are + // allowed to in the previous block and we return. + n := int32(len(b)) + if int(s+n) == s1 { + return n + } + + // Continue looking for more matches in the current block. + a = src[s+n : s1] + b = src[:len(a)] + for i := range a { + if a[i] != b[i] { + return int32(i) + n + } + } + return int32(len(a)) + n +} + +// Reset the encoding table. +func (e *snappyGen) Reset() { + e.prev = e.prev[:0] + e.cur += maxMatchOffset +} diff --git a/vendor/github.com/klauspost/compress/flate/token.go b/vendor/github.com/klauspost/compress/flate/token.go new file mode 100644 index 000000000..4f275ea61 --- /dev/null +++ b/vendor/github.com/klauspost/compress/flate/token.go @@ -0,0 +1,115 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flate + +import "fmt" + +const ( + // 2 bits: type 0 = literal 1=EOF 2=Match 3=Unused + // 8 bits: xlength = length - MIN_MATCH_LENGTH + // 22 bits xoffset = offset - MIN_OFFSET_SIZE, or literal + lengthShift = 22 + offsetMask = 1< pair into a match token. +func matchToken(xlength uint32, xoffset uint32) token { + return token(matchType + xlength< maxMatchLength || xoffset > maxMatchOffset { + panic(fmt.Sprintf("Invalid match: len: %d, offset: %d\n", xlength, xoffset)) + return token(matchType) + } + return token(matchType + xlength<> lengthShift) } + +func lengthCode(len uint32) uint32 { return lengthCodes[len] } + +// Returns the offset code corresponding to a specific offset +func offsetCode(off uint32) uint32 { + if off < uint32(len(offsetCodes)) { + return offsetCodes[off] + } else if off>>7 < uint32(len(offsetCodes)) { + return offsetCodes[off>>7] + 14 + } else { + return offsetCodes[off>>14] + 28 + } +} diff --git a/vendor/github.com/klauspost/pgzip/GO_LICENSE b/vendor/github.com/klauspost/pgzip/GO_LICENSE new file mode 100644 index 000000000..744875676 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/GO_LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/klauspost/pgzip/LICENSE b/vendor/github.com/klauspost/pgzip/LICENSE new file mode 100644 index 000000000..2bdc0d751 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2014 Klaus Post + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/klauspost/pgzip/README.md b/vendor/github.com/klauspost/pgzip/README.md new file mode 100644 index 000000000..81000996c --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/README.md @@ -0,0 +1,136 @@ +pgzip +===== + +Go parallel gzip compression/decompression. This is a fully gzip compatible drop in replacement for "compress/gzip". + +This will split compression into blocks that are compressed in parallel. +This can be useful for compressing big amounts of data. The output is a standard gzip file. + +The gzip decompression is modified so it decompresses ahead of the current reader. +This means that reads will be non-blocking if the decompressor can keep ahead of your code reading from it. +CRC calculation also takes place in a separate goroutine. + +You should only use this if you are (de)compressing big amounts of data, +say **more than 1MB** at the time, otherwise you will not see any benefit, +and it will likely be faster to use the internal gzip library +or [this package](https://github.com/klauspost/compress). + +It is important to note that this library creates and reads *standard gzip files*. +You do not have to match the compressor/decompressor to get the described speedups, +and the gzip files are fully compatible with other gzip readers/writers. + +A golang variant of this is [bgzf](https://godoc.org/github.com/biogo/hts/bgzf), +which has the same feature, as well as seeking in the resulting file. +The only drawback is a slightly bigger overhead compared to this and pure gzip. +See a comparison below. + +[![GoDoc][1]][2] [![Build Status][3]][4] + +[1]: https://godoc.org/github.com/klauspost/pgzip?status.svg +[2]: https://godoc.org/github.com/klauspost/pgzip +[3]: https://travis-ci.org/klauspost/pgzip.svg +[4]: https://travis-ci.org/klauspost/pgzip + +Installation +==== +```go get github.com/klauspost/pgzip/...``` + +You might need to get/update the dependencies: + +``` +go get -u github.com/klauspost/compress +go get -u github.com/klauspost/crc32 +``` + +Usage +==== +[Godoc Doumentation](https://godoc.org/github.com/klauspost/pgzip) + +To use as a replacement for gzip, exchange + +```import "compress/gzip"``` +with +```import gzip "github.com/klauspost/pgzip"```. + +# Changes + +* Oct 6, 2016: Fixed an issue if the destination writer returned an error. +* Oct 6, 2016: Better buffer reuse, should now generate less garbage. +* Oct 6, 2016: Output does not change based on write sizes. +* Dec 8, 2015: Decoder now supports the io.WriterTo interface, giving a speedup and less GC pressure. +* Oct 9, 2015: Reduced allocations by ~35 by using sync.Pool. ~15% overall speedup. + +Changes in [github.com/klauspost/compress](https://github.com/klauspost/compress#changelog) are also carried over, so see that for more changes. + +## Compression +The simplest way to use this is to simply do the same as you would when using [compress/gzip](http://golang.org/pkg/compress/gzip). + +To change the block size, use the added (*pgzip.Writer).SetConcurrency(blockSize, blocks int) function. With this you can control the approximate size of your blocks, as well as how many you want to be processing in parallel. Default values for this is SetConcurrency(250000, 16), meaning blocks are split at 250000 bytes and up to 16 blocks can be processing at once before the writer blocks. + + +Example: +``` +var b bytes.Buffer +w := gzip.NewWriter(&b) +w.SetConcurrency(100000, 10) +w.Write([]byte("hello, world\n")) +w.Close() +``` + +To get any performance gains, you should at least be compressing more than 1 megabyte of data at the time. + +You should at least have a block size of 100k and at least a number of blocks that match the number of cores your would like to utilize, but about twice the number of blocks would be the best. + +Another side effect of this is, that it is likely to speed up your other code, since writes to the compressor only blocks if the compressor is already compressing the number of blocks you have specified. This also means you don't have worry about buffering input to the compressor. + +## Decompression + +Decompression works similar to compression. That means that you simply call pgzip the same way as you would call [compress/gzip](http://golang.org/pkg/compress/gzip). + +The only difference is that if you want to specify your own readahead, you have to use `pgzip.NewReaderN(r io.Reader, blockSize, blocks int)` to get a reader with your custom blocksizes. The `blockSize` is the size of each block decoded, and `blocks` is the maximum number of blocks that is decoded ahead. + +See [Example on playground](http://play.golang.org/p/uHv1B5NbDh) + +Performance +==== +## Compression + +See my blog post in [Benchmarks of Golang Gzip](https://blog.klauspost.com/go-gzipdeflate-benchmarks/). + +Compression cost is usually about 0.2% with default settings with a block size of 250k. + +Example with GOMAXPROC set to 8 (quad core with 8 hyperthreads) + +Content is [Matt Mahoneys 10GB corpus](http://mattmahoney.net/dc/10gb.html). Compression level 6. + +Compressor | MB/sec | speedup | size | size overhead (lower=better) +------------|----------|---------|------|--------- +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 7.21MB/s | 1.0x | 4786608902 | 0% +[gzip](http://github.com/klauspost/compress/gzip) (klauspost) | 10.98MB/s | 1.52x | 4781331645 | -0.11% +[pgzip](https://github.com/klauspost/pgzip) (klauspost) | 50.76MB/s|7.04x | 4784121440 | -0.052% +[bgzf](https://godoc.org/github.com/biogo/hts/bgzf) (biogo) | 38.65MB/s | 5.36x | 4924899484 | 2.889% +[pargzip](https://godoc.org/github.com/golang/build/pargzip) (builder) | 32.00MB/s | 4.44x | 4791226567 | 0.096% + +pgzip also contains a [linear time compression](https://github.com/klauspost/compress#linear-time-compression) mode, that will allow compression at ~150MB per core per second, independent of the content. + +See the [complete sheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) for different content types and compression settings. + +## Decompression + +The decompression speedup is there because it allows you to do other work while the decompression is taking place. + +In the example above, the numbers are as follows on a 4 CPU machine: + +Decompressor | Time | Speedup +-------------|------|-------- +[gzip](http://golang.org/pkg/compress/gzip) (golang) | 1m28.85s | 0% +[pgzip](https://github.com/klauspost/pgzip) (golang) | 43.48s | 104% + +But wait, since gzip decompression is inherently singlethreaded (aside from CRC calculation) how can it be more than 100% faster? Because pgzip due to its design also acts as a buffer. When using unbuffered gzip, you are also waiting for io when you are decompressing. If the gzip decoder can keep up, it will always have data ready for your reader, and you will not be waiting for input to the gzip decompressor to complete. + +This is pretty much an optimal situation for pgzip, but it reflects most common usecases for CPU intensive gzip usage. + +I haven't included [bgzf](https://godoc.org/github.com/biogo/hts/bgzf) in this comparison, since it only can decompress files created by a compatible encoder, and therefore cannot be considered a generic gzip decompressor. But if you are able to compress your files with a bgzf compatible program, you can expect it to scale beyond 100%. + +# License +This contains large portions of code from the go repository - see GO_LICENSE for more information. The changes are released under MIT License. See LICENSE for more information. diff --git a/vendor/github.com/klauspost/pgzip/circle.yml b/vendor/github.com/klauspost/pgzip/circle.yml new file mode 100644 index 000000000..67b2b1628 --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/circle.yml @@ -0,0 +1,7 @@ +test: + pre: + - go vet ./... + + override: + - go test -v -cpu=1,2,4 . + - go test -v -cpu=2 -race -short . \ No newline at end of file diff --git a/vendor/github.com/klauspost/pgzip/gunzip.go b/vendor/github.com/klauspost/pgzip/gunzip.go new file mode 100644 index 000000000..f0e8fcb3d --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gunzip.go @@ -0,0 +1,573 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pgzip implements reading and writing of gzip format compressed files, +// as specified in RFC 1952. +// +// This is a drop in replacement for "compress/gzip". +// This will split compression into blocks that are compressed in parallel. +// This can be useful for compressing big amounts of data. +// The gzip decompression has not been modified, but remains in the package, +// so you can use it as a complete replacement for "compress/gzip". +// +// See more at https://github.com/klauspost/pgzip +package pgzip + +import ( + "bufio" + "errors" + "hash" + "io" + "sync" + "time" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + flagText = 1 << 0 + flagHdrCrc = 1 << 1 + flagExtra = 1 << 2 + flagName = 1 << 3 + flagComment = 1 << 4 +) + +func makeReader(r io.Reader) flate.Reader { + if rr, ok := r.(flate.Reader); ok { + return rr + } + return bufio.NewReader(r) +} + +var ( + // ErrChecksum is returned when reading GZIP data that has an invalid checksum. + ErrChecksum = errors.New("gzip: invalid checksum") + // ErrHeader is returned when reading GZIP data that has an invalid header. + ErrHeader = errors.New("gzip: invalid header") +) + +// The gzip file stores a header giving metadata about the compressed file. +// That header is exposed as the fields of the Writer and Reader structs. +type Header struct { + Comment string // comment + Extra []byte // "extra data" + ModTime time.Time // modification time + Name string // file name + OS byte // operating system type +} + +// A Reader is an io.Reader that can be read to retrieve +// uncompressed data from a gzip-format compressed file. +// +// In general, a gzip file can be a concatenation of gzip files, +// each with its own header. Reads from the Reader +// return the concatenation of the uncompressed data of each. +// Only the first header is recorded in the Reader fields. +// +// Gzip files store a length and checksum of the uncompressed data. +// The Reader will return a ErrChecksum when Read +// reaches the end of the uncompressed data if it does not +// have the expected length or checksum. Clients should treat data +// returned by Read as tentative until they receive the io.EOF +// marking the end of the data. +type Reader struct { + Header + r flate.Reader + decompressor io.ReadCloser + digest hash.Hash32 + size uint32 + flg byte + buf [512]byte + err error + closeErr chan error + multistream bool + + readAhead chan read + roff int // read offset + current []byte + closeReader chan struct{} + lastBlock bool + blockSize int + blocks int + + activeRA bool // Indication if readahead is active + mu sync.Mutex // Lock for above + + blockPool chan []byte +} + +type read struct { + b []byte + err error +} + +// NewReader creates a new Reader reading the given reader. +// The implementation buffers input and may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +func NewReader(r io.Reader) (*Reader, error) { + z := new(Reader) + z.blocks = defaultBlocks + z.blockSize = defaultBlockSize + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.multistream = true + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// NewReaderN creates a new Reader reading the given reader. +// The implementation buffers input and may read more data than necessary from r. +// It is the caller's responsibility to call Close on the Reader when done. +// +// With this you can control the approximate size of your blocks, +// as well as how many blocks you want to have prefetched. +// +// Default values for this is blockSize = 250000, blocks = 16, +// meaning up to 16 blocks of maximum 250000 bytes will be +// prefetched. +func NewReaderN(r io.Reader, blockSize, blocks int) (*Reader, error) { + z := new(Reader) + z.blocks = blocks + z.blockSize = blockSize + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.multistream = true + + // Account for too small values + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + if err := z.readHeader(true); err != nil { + return nil, err + } + return z, nil +} + +// Reset discards the Reader z's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) error { + z.killReadAhead() + z.r = makeReader(r) + z.digest = crc32.NewIEEE() + z.size = 0 + z.err = nil + z.multistream = true + + // Account for uninitialized values + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + + if z.blockPool == nil { + z.blockPool = make(chan []byte, z.blocks) + for i := 0; i < z.blocks; i++ { + z.blockPool <- make([]byte, z.blockSize) + } + } + + return z.readHeader(true) +} + +// Multistream controls whether the reader supports multistream files. +// +// If enabled (the default), the Reader expects the input to be a sequence +// of individually gzipped data streams, each with its own header and +// trailer, ending at EOF. The effect is that the concatenation of a sequence +// of gzipped files is treated as equivalent to the gzip of the concatenation +// of the sequence. This is standard behavior for gzip readers. +// +// Calling Multistream(false) disables this behavior; disabling the behavior +// can be useful when reading file formats that distinguish individual gzip +// data streams or mix gzip data streams with other data streams. +// In this mode, when the Reader reaches the end of the data stream, +// Read returns io.EOF. If the underlying reader implements io.ByteReader, +// it will be left positioned just after the gzip stream. +// To start the next stream, call z.Reset(r) followed by z.Multistream(false). +// If there is no next stream, z.Reset(r) will return io.EOF. +func (z *Reader) Multistream(ok bool) { + z.multistream = ok +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func get4(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func (z *Reader) readString() (string, error) { + var err error + needconv := false + for i := 0; ; i++ { + if i >= len(z.buf) { + return "", ErrHeader + } + z.buf[i], err = z.r.ReadByte() + if err != nil { + return "", err + } + if z.buf[i] > 0x7f { + needconv = true + } + if z.buf[i] == 0 { + // GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). + if needconv { + s := make([]rune, 0, i) + for _, v := range z.buf[0:i] { + s = append(s, rune(v)) + } + return string(s), nil + } + return string(z.buf[0:i]), nil + } + } +} + +func (z *Reader) read2() (uint32, error) { + _, err := io.ReadFull(z.r, z.buf[0:2]) + if err != nil { + return 0, err + } + return uint32(z.buf[0]) | uint32(z.buf[1])<<8, nil +} + +func (z *Reader) readHeader(save bool) error { + z.killReadAhead() + + _, err := io.ReadFull(z.r, z.buf[0:10]) + if err != nil { + return err + } + if z.buf[0] != gzipID1 || z.buf[1] != gzipID2 || z.buf[2] != gzipDeflate { + return ErrHeader + } + z.flg = z.buf[3] + if save { + z.ModTime = time.Unix(int64(get4(z.buf[4:8])), 0) + // z.buf[8] is xfl, ignored + z.OS = z.buf[9] + } + z.digest.Reset() + z.digest.Write(z.buf[0:10]) + + if z.flg&flagExtra != 0 { + n, err := z.read2() + if err != nil { + return err + } + data := make([]byte, n) + if _, err = io.ReadFull(z.r, data); err != nil { + return err + } + if save { + z.Extra = data + } + } + + var s string + if z.flg&flagName != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Name = s + } + } + + if z.flg&flagComment != 0 { + if s, err = z.readString(); err != nil { + return err + } + if save { + z.Comment = s + } + } + + if z.flg&flagHdrCrc != 0 { + n, err := z.read2() + if err != nil { + return err + } + sum := z.digest.Sum32() & 0xFFFF + if n != sum { + return ErrHeader + } + } + + z.digest.Reset() + z.decompressor = flate.NewReader(z.r) + z.doReadAhead() + return nil +} + +func (z *Reader) killReadAhead() error { + z.mu.Lock() + defer z.mu.Unlock() + if z.activeRA { + if z.closeReader != nil { + close(z.closeReader) + } + + // Wait for decompressor to be closed and return error, if any. + e, ok := <-z.closeErr + z.activeRA = false + if !ok { + // Channel is closed, so if there was any error it has already been returned. + return nil + } + return e + } + return nil +} + +// Starts readahead. +// Will return on error (including io.EOF) +// or when z.closeReader is closed. +func (z *Reader) doReadAhead() { + z.mu.Lock() + defer z.mu.Unlock() + z.activeRA = true + + if z.blocks <= 0 { + z.blocks = defaultBlocks + } + if z.blockSize <= 512 { + z.blockSize = defaultBlockSize + } + ra := make(chan read, z.blocks) + z.readAhead = ra + closeReader := make(chan struct{}, 0) + z.closeReader = closeReader + z.lastBlock = false + closeErr := make(chan error, 1) + z.closeErr = closeErr + z.size = 0 + z.roff = 0 + z.current = nil + decomp := z.decompressor + + go func() { + defer func() { + closeErr <- decomp.Close() + close(closeErr) + close(ra) + }() + + // We hold a local reference to digest, since + // it way be changed by reset. + digest := z.digest + var wg sync.WaitGroup + for { + var buf []byte + select { + case buf = <-z.blockPool: + case <-closeReader: + return + } + buf = buf[0:z.blockSize] + // Try to fill the buffer + n, err := io.ReadFull(decomp, buf) + if err == io.ErrUnexpectedEOF { + if n > 0 { + err = nil + } else { + // If we got zero bytes, we need to establish if + // we reached end of stream or truncated stream. + _, err = decomp.Read([]byte{}) + if err == io.EOF { + err = nil + } + } + } + if n < len(buf) { + buf = buf[0:n] + } + wg.Wait() + wg.Add(1) + go func() { + digest.Write(buf) + wg.Done() + }() + z.size += uint32(n) + + // If we return any error, out digest must be ready + if err != nil { + wg.Wait() + } + select { + case z.readAhead <- read{b: buf, err: err}: + case <-closeReader: + // Sent on close, we don't care about the next results + return + } + if err != nil { + return + } + } + }() +} + +func (z *Reader) Read(p []byte) (n int, err error) { + if z.err != nil { + return 0, z.err + } + if len(p) == 0 { + return 0, nil + } + + for { + if len(z.current) == 0 && !z.lastBlock { + read := <-z.readAhead + + if read.err != nil { + // If not nil, the reader will have exited + z.closeReader = nil + + if read.err != io.EOF { + z.err = read.err + return + } + if read.err == io.EOF { + z.lastBlock = true + err = nil + } + } + z.current = read.b + z.roff = 0 + } + avail := z.current[z.roff:] + if len(p) >= len(avail) { + // If len(p) >= len(current), return all content of current + n = copy(p, avail) + z.blockPool <- z.current + z.current = nil + if z.lastBlock { + err = io.EOF + break + } + } else { + // We copy as much as there is space for + n = copy(p, avail) + z.roff += n + } + return + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return 0, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return 0, z.err + } + + // File is ok; should we attempt reading one more? + if !z.multistream { + return 0, io.EOF + } + + // Is there another? + if err = z.readHeader(false); err != nil { + z.err = err + return + } + + // Yes. Reset and read from it. + return z.Read(p) +} + +func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { + total := int64(0) + for { + if z.err != nil { + return total, z.err + } + // We write both to output and digest. + for { + // Read from input + read := <-z.readAhead + if read.err != nil { + // If not nil, the reader will have exited + z.closeReader = nil + + if read.err != io.EOF { + z.err = read.err + return total, z.err + } + if read.err == io.EOF { + z.lastBlock = true + err = nil + } + } + // Write what we got + n, err := w.Write(read.b) + if n != len(read.b) { + return total, io.ErrShortWrite + } + total += int64(n) + if err != nil { + return total, err + } + // Put block back + z.blockPool <- read.b + if z.lastBlock { + break + } + } + + // Finished file; check checksum + size. + if _, err := io.ReadFull(z.r, z.buf[0:8]); err != nil { + z.err = err + return total, err + } + crc32, isize := get4(z.buf[0:4]), get4(z.buf[4:8]) + sum := z.digest.Sum32() + if sum != crc32 || isize != z.size { + z.err = ErrChecksum + return total, z.err + } + // File is ok; should we attempt reading one more? + if !z.multistream { + return total, nil + } + + // Is there another? + err = z.readHeader(false) + if err == io.EOF { + return total, nil + } + if err != nil { + z.err = err + return total, err + } + } +} + +// Close closes the Reader. It does not close the underlying io.Reader. +func (z *Reader) Close() error { + return z.killReadAhead() +} diff --git a/vendor/github.com/klauspost/pgzip/gzip.go b/vendor/github.com/klauspost/pgzip/gzip.go new file mode 100644 index 000000000..9b97a0f5a --- /dev/null +++ b/vendor/github.com/klauspost/pgzip/gzip.go @@ -0,0 +1,501 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package pgzip + +import ( + "bytes" + "errors" + "fmt" + "hash" + "io" + "sync" + "time" + + "github.com/klauspost/compress/flate" + "github.com/klauspost/crc32" +) + +const ( + defaultBlockSize = 256 << 10 + tailSize = 16384 + defaultBlocks = 16 +) + +// These constants are copied from the flate package, so that code that imports +// "compress/gzip" does not also have to import "compress/flate". +const ( + NoCompression = flate.NoCompression + BestSpeed = flate.BestSpeed + BestCompression = flate.BestCompression + DefaultCompression = flate.DefaultCompression + ConstantCompression = flate.ConstantCompression + HuffmanOnly = flate.HuffmanOnly +) + +// A Writer is an io.WriteCloser. +// Writes to a Writer are compressed and written to w. +type Writer struct { + Header + w io.Writer + level int + wroteHeader bool + blockSize int + blocks int + currentBuffer []byte + prevTail []byte + digest hash.Hash32 + size int + closed bool + buf [10]byte + errMu sync.RWMutex + err error + pushedErr chan struct{} + results chan result + dictFlatePool sync.Pool + dstPool sync.Pool + wg sync.WaitGroup +} + +type result struct { + result chan []byte + notifyWritten chan struct{} +} + +// Use SetConcurrency to finetune the concurrency level if needed. +// +// With this you can control the approximate size of your blocks, +// as well as how many you want to be processing in parallel. +// +// Default values for this is SetConcurrency(250000, 16), +// meaning blocks are split at 250000 bytes and up to 16 blocks +// can be processing at once before the writer blocks. +func (z *Writer) SetConcurrency(blockSize, blocks int) error { + if blockSize <= tailSize { + return fmt.Errorf("gzip: block size cannot be less than or equal to %d", tailSize) + } + if blocks <= 0 { + return errors.New("gzip: blocks cannot be zero or less") + } + if blockSize == z.blockSize && blocks == z.blocks { + return nil + } + z.blockSize = blockSize + z.results = make(chan result, blocks) + z.blocks = blocks + z.dstPool = sync.Pool{New: func() interface{} { return make([]byte, 0, blockSize+(blockSize)>>4) }} + return nil +} + +// NewWriter returns a new Writer. +// Writes to the returned writer are compressed and written to w. +// +// It is the caller's responsibility to call Close on the WriteCloser when done. +// Writes may be buffered and not flushed until Close. +// +// Callers that wish to set the fields in Writer.Header must do so before +// the first call to Write or Close. The Comment and Name header fields are +// UTF-8 strings in Go, but the underlying format requires NUL-terminated ISO +// 8859-1 (Latin-1). NUL or non-Latin-1 runes in those strings will lead to an +// error on Write. +func NewWriter(w io.Writer) *Writer { + z, _ := NewWriterLevel(w, DefaultCompression) + return z +} + +// NewWriterLevel is like NewWriter but specifies the compression level instead +// of assuming DefaultCompression. +// +// The compression level can be DefaultCompression, NoCompression, or any +// integer value between BestSpeed and BestCompression inclusive. The error +// returned will be nil if the level is valid. +func NewWriterLevel(w io.Writer, level int) (*Writer, error) { + if level < ConstantCompression || level > BestCompression { + return nil, fmt.Errorf("gzip: invalid compression level: %d", level) + } + z := new(Writer) + z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.init(w, level) + return z, nil +} + +// This function must be used by goroutines to set an +// error condition, since z.err access is restricted +// to the callers goruotine. +func (z *Writer) pushError(err error) { + z.errMu.Lock() + if z.err != nil { + z.errMu.Unlock() + return + } + z.err = err + close(z.pushedErr) + z.errMu.Unlock() +} + +func (z *Writer) init(w io.Writer, level int) { + z.wg.Wait() + digest := z.digest + if digest != nil { + digest.Reset() + } else { + digest = crc32.NewIEEE() + } + z.Header = Header{OS: 255} + z.w = w + z.level = level + z.digest = digest + z.pushedErr = make(chan struct{}, 0) + z.results = make(chan result, z.blocks) + z.err = nil + z.closed = false + z.Comment = "" + z.Extra = nil + z.ModTime = time.Time{} + z.wroteHeader = false + z.currentBuffer = nil + z.buf = [10]byte{} + z.prevTail = nil + z.size = 0 + if z.dictFlatePool.New == nil { + z.dictFlatePool.New = func() interface{} { + f, _ := flate.NewWriterDict(w, level, nil) + return f + } + } +} + +// Reset discards the Writer z's state and makes it equivalent to the +// result of its original state from NewWriter or NewWriterLevel, but +// writing to w instead. This permits reusing a Writer rather than +// allocating a new one. +func (z *Writer) Reset(w io.Writer) { + if z.results != nil && !z.closed { + close(z.results) + } + z.SetConcurrency(defaultBlockSize, defaultBlocks) + z.init(w, z.level) +} + +// GZIP (RFC 1952) is little-endian, unlike ZLIB (RFC 1950). +func put2(p []byte, v uint16) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) +} + +func put4(p []byte, v uint32) { + p[0] = uint8(v >> 0) + p[1] = uint8(v >> 8) + p[2] = uint8(v >> 16) + p[3] = uint8(v >> 24) +} + +// writeBytes writes a length-prefixed byte slice to z.w. +func (z *Writer) writeBytes(b []byte) error { + if len(b) > 0xffff { + return errors.New("gzip.Write: Extra data is too large") + } + put2(z.buf[0:2], uint16(len(b))) + _, err := z.w.Write(z.buf[0:2]) + if err != nil { + return err + } + _, err = z.w.Write(b) + return err +} + +// writeString writes a UTF-8 string s in GZIP's format to z.w. +// GZIP (RFC 1952) specifies that strings are NUL-terminated ISO 8859-1 (Latin-1). +func (z *Writer) writeString(s string) (err error) { + // GZIP stores Latin-1 strings; error if non-Latin-1; convert if non-ASCII. + needconv := false + for _, v := range s { + if v == 0 || v > 0xff { + return errors.New("gzip.Write: non-Latin-1 header string") + } + if v > 0x7f { + needconv = true + } + } + if needconv { + b := make([]byte, 0, len(s)) + for _, v := range s { + b = append(b, byte(v)) + } + _, err = z.w.Write(b) + } else { + _, err = io.WriteString(z.w, s) + } + if err != nil { + return err + } + // GZIP strings are NUL-terminated. + z.buf[0] = 0 + _, err = z.w.Write(z.buf[0:1]) + return err +} + +// compressCurrent will compress the data currently buffered +// This should only be called from the main writer/flush/closer +func (z *Writer) compressCurrent(flush bool) { + r := result{} + r.result = make(chan []byte, 1) + r.notifyWritten = make(chan struct{}, 0) + select { + case z.results <- r: + case <-z.pushedErr: + return + } + + // If block given is more than twice the block size, split it. + c := z.currentBuffer + if len(c) > z.blockSize*2 { + c = c[:z.blockSize] + z.wg.Add(1) + go z.compressBlock(c, z.prevTail, r, false) + z.prevTail = c[len(c)-tailSize:] + z.currentBuffer = z.currentBuffer[z.blockSize:] + z.compressCurrent(flush) + // Last one flushes if needed + return + } + + z.wg.Add(1) + go z.compressBlock(c, z.prevTail, r, z.closed) + if len(c) > tailSize { + z.prevTail = c[len(c)-tailSize:] + } else { + z.prevTail = nil + } + z.currentBuffer = z.dstPool.Get().([]byte) + z.currentBuffer = z.currentBuffer[:0] + + // Wait if flushing + if flush { + <-r.notifyWritten + } +} + +// Returns an error if it has been set. +// Cannot be used by functions that are from internal goroutines. +func (z *Writer) checkError() error { + z.errMu.RLock() + err := z.err + z.errMu.RUnlock() + return err +} + +// Write writes a compressed form of p to the underlying io.Writer. The +// compressed bytes are not necessarily flushed to output until +// the Writer is closed or Flush() is called. +// +// The function will return quickly, if there are unused buffers. +// The sent slice (p) is copied, and the caller is free to re-use the buffer +// when the function returns. +// +// Errors that occur during compression will be reported later, and a nil error +// does not signify that the compression succeeded (since it is most likely still running) +// That means that the call that returns an error may not be the call that caused it. +// Only Flush and Close functions are guaranteed to return any errors up to that point. +func (z *Writer) Write(p []byte) (int, error) { + if err := z.checkError(); err != nil { + return 0, err + } + // Write the GZIP header lazily. + if !z.wroteHeader { + z.wroteHeader = true + z.buf[0] = gzipID1 + z.buf[1] = gzipID2 + z.buf[2] = gzipDeflate + z.buf[3] = 0 + if z.Extra != nil { + z.buf[3] |= 0x04 + } + if z.Name != "" { + z.buf[3] |= 0x08 + } + if z.Comment != "" { + z.buf[3] |= 0x10 + } + put4(z.buf[4:8], uint32(z.ModTime.Unix())) + if z.level == BestCompression { + z.buf[8] = 2 + } else if z.level == BestSpeed { + z.buf[8] = 4 + } else { + z.buf[8] = 0 + } + z.buf[9] = z.OS + var n int + var err error + n, err = z.w.Write(z.buf[0:10]) + if err != nil { + z.pushError(err) + return n, err + } + if z.Extra != nil { + err = z.writeBytes(z.Extra) + if err != nil { + z.pushError(err) + return n, err + } + } + if z.Name != "" { + err = z.writeString(z.Name) + if err != nil { + z.pushError(err) + return n, err + } + } + if z.Comment != "" { + err = z.writeString(z.Comment) + if err != nil { + z.pushError(err) + return n, err + } + } + // Start receiving data from compressors + go func() { + listen := z.results + for { + r, ok := <-listen + // If closed, we are finished. + if !ok { + return + } + buf := <-r.result + n, err := z.w.Write(buf) + if err != nil { + z.pushError(err) + close(r.notifyWritten) + return + } + if n != len(buf) { + z.pushError(fmt.Errorf("gzip: short write %d should be %d", n, len(buf))) + close(r.notifyWritten) + return + } + z.dstPool.Put(buf) + close(r.notifyWritten) + } + }() + z.currentBuffer = make([]byte, 0, z.blockSize) + } + q := p + for len(q) > 0 { + length := len(q) + if length+len(z.currentBuffer) > z.blockSize { + length = z.blockSize - len(z.currentBuffer) + } + z.digest.Write(q[:length]) + z.currentBuffer = append(z.currentBuffer, q[:length]...) + if len(z.currentBuffer) >= z.blockSize { + z.compressCurrent(false) + if err := z.checkError(); err != nil { + return len(p) - len(q) - length, err + } + } + z.size += length + q = q[length:] + } + return len(p), z.checkError() +} + +// Step 1: compresses buffer to buffer +// Step 2: send writer to channel +// Step 3: Close result channel to indicate we are done +func (z *Writer) compressBlock(p, prevTail []byte, r result, closed bool) { + defer func() { + close(r.result) + z.wg.Done() + }() + buf := z.dstPool.Get().([]byte) + dest := bytes.NewBuffer(buf[:0]) + + compressor := z.dictFlatePool.Get().(*flate.Writer) + compressor.ResetDict(dest, prevTail) + compressor.Write(p) + + err := compressor.Flush() + if err != nil { + z.pushError(err) + return + } + if closed { + err = compressor.Close() + if err != nil { + z.pushError(err) + return + } + } + z.dictFlatePool.Put(compressor) + // Read back buffer + buf = dest.Bytes() + r.result <- buf +} + +// Flush flushes any pending compressed data to the underlying writer. +// +// It is useful mainly in compressed network protocols, to ensure that +// a remote reader has enough data to reconstruct a packet. Flush does +// not return until the data has been written. If the underlying +// writer returns an error, Flush returns that error. +// +// In the terminology of the zlib library, Flush is equivalent to Z_SYNC_FLUSH. +func (z *Writer) Flush() error { + if err := z.checkError(); err != nil { + return err + } + if z.closed { + return nil + } + if !z.wroteHeader { + _, err := z.Write(nil) + if err != nil { + return err + } + } + // We send current block to compression + z.compressCurrent(true) + + return z.checkError() +} + +// UncompressedSize will return the number of bytes written. +// pgzip only, not a function in the official gzip package. +func (z *Writer) UncompressedSize() int { + return z.size +} + +// Close closes the Writer, flushing any unwritten data to the underlying +// io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if err := z.checkError(); err != nil { + return err + } + if z.closed { + return nil + } + + z.closed = true + if !z.wroteHeader { + z.Write(nil) + if err := z.checkError(); err != nil { + return err + } + } + z.compressCurrent(true) + if err := z.checkError(); err != nil { + return err + } + close(z.results) + put4(z.buf[0:4], z.digest.Sum32()) + put4(z.buf[4:8], uint32(z.size)) + _, err := z.w.Write(z.buf[0:8]) + if err != nil { + z.pushError(err) + return err + } + return nil +} diff --git a/vendor/github.com/xwb1989/sqlparser/CONTRIBUTORS.md b/vendor/github.com/xwb1989/sqlparser/CONTRIBUTORS.md new file mode 100644 index 000000000..a44885cd9 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/CONTRIBUTORS.md @@ -0,0 +1,9 @@ +This project is originally a fork of [https://github.com/youtube/vitess](https://github.com/youtube/vitess) +Copyright Google Inc + +# Contributors +Wenbin Xiao 2015 +Started this project and maintained it. + +Andrew Brampton 2017 +Merged in multiple upstream fixes/changes. \ No newline at end of file diff --git a/vendor/github.com/xwb1989/sqlparser/LICENSE.md b/vendor/github.com/xwb1989/sqlparser/LICENSE.md new file mode 100644 index 000000000..f49a4e16e --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/LICENSE.md @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/vendor/github.com/xwb1989/sqlparser/Makefile b/vendor/github.com/xwb1989/sqlparser/Makefile new file mode 100644 index 000000000..215f422e6 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/Makefile @@ -0,0 +1,22 @@ +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +MAKEFLAGS = -s + +sql.go: sql.y + goyacc -o sql.go sql.y + gofmt -w sql.go + +clean: + rm -f y.output sql.go diff --git a/vendor/github.com/xwb1989/sqlparser/README.md b/vendor/github.com/xwb1989/sqlparser/README.md new file mode 100644 index 000000000..58d475967 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/README.md @@ -0,0 +1,150 @@ +# sqlparser [![Build Status](https://img.shields.io/travis/xwb1989/sqlparser.svg)](https://travis-ci.org/xwb1989/sqlparser) [![Coverage](https://img.shields.io/coveralls/xwb1989/sqlparser.svg)](https://coveralls.io/github/xwb1989/sqlparser) [![Report card](https://goreportcard.com/badge/github.com/xwb1989/sqlparser)](https://goreportcard.com/report/github.com/xwb1989/sqlparser) [![GoDoc](https://godoc.org/github.com/xwb1989/sqlparser?status.svg)](https://godoc.org/github.com/xwb1989/sqlparser) + +Go package for parsing MySQL SQL queries. + +## Notice + +The backbone of this repo is extracted from [vitessio/vitess](https://github.com/vitessio/vitess). + +Inside vitessio/vitess there is a very nicely written sql parser. However as it's not a self-contained application, I created this one. +It applies the same LICENSE as vitessio/vitess. + +## Usage + +```go +import ( + "github.com/xwb1989/sqlparser" +) +``` + +Then use: + +```go +sql := "SELECT * FROM table WHERE a = 'abc'" +stmt, err := sqlparser.Parse(sql) +if err != nil { + // Do something with the err +} + +// Otherwise do something with stmt +switch stmt := stmt.(type) { +case *sqlparser.Select: + _ = stmt +case *sqlparser.Insert: +} +``` + +Alternative to read many queries from a io.Reader: + +```go +r := strings.NewReader("INSERT INTO table1 VALUES (1, 'a'); INSERT INTO table2 VALUES (3, 4);") + +tokens := sqlparser.NewTokenizer(r) +for { + stmt, err := sqlparser.ParseNext(tokens) + if err == io.EOF { + break + } + // Do something with stmt or err. +} +``` + +See [parse_test.go](https://github.com/xwb1989/sqlparser/blob/master/parse_test.go) for more examples, or read the [godoc](https://godoc.org/github.com/xwb1989/sqlparser). + + +## Porting Instructions + +You only need the below if you plan to try and keep this library up to date with [vitessio/vitess](https://github.com/vitessio/vitess). + +### Keeping up to date + +```bash +shopt -s nullglob +VITESS=${GOPATH?}/src/vitess.io/vitess/go/ +XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/ + +# Create patches for everything that changed +LASTIMPORT=1b7879cb91f1dfe1a2dfa06fea96e951e3a7aec5 +for path in ${VITESS?}/{vt/sqlparser,sqltypes,bytes2,hack}; do + cd ${path} + git format-patch ${LASTIMPORT?} . +done; + +# Apply patches to the dependencies +cd ${XWB1989?} +git am --directory dependency -p2 ${VITESS?}/{sqltypes,bytes2,hack}/*.patch + +# Apply the main patches to the repo +cd ${XWB1989?} +git am -p4 ${VITESS?}/vt/sqlparser/*.patch + +# If you encounter diff failures, manually fix them with +patch -p4 < .git/rebase-apply/patch +... +git add name_of_files +git am --continue + +# Cleanup +rm ${VITESS?}/{sqltypes,bytes2,hack}/*.patch ${VITESS?}/*.patch + +# and Finally update the LASTIMPORT in this README. +``` + +### Fresh install + +TODO: Change these instructions to use git to copy the files, that'll make later patching easier. + +```bash +VITESS=${GOPATH?}/src/vitess.io/vitess/go/ +XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/ + +cd ${XWB1989?} + +# Copy all the code +cp -pr ${VITESS?}/vt/sqlparser/ . +cp -pr ${VITESS?}/sqltypes dependency +cp -pr ${VITESS?}/bytes2 dependency +cp -pr ${VITESS?}/hack dependency + +# Delete some code we haven't ported +rm dependency/sqltypes/arithmetic.go dependency/sqltypes/arithmetic_test.go dependency/sqltypes/event_token.go dependency/sqltypes/event_token_test.go dependency/sqltypes/proto3.go dependency/sqltypes/proto3_test.go dependency/sqltypes/query_response.go dependency/sqltypes/result.go dependency/sqltypes/result_test.go + +# Some automated fixes + +# Fix imports +sed -i '.bak' 's_vitess.io/vitess/go/vt/proto/query_github.com/xwb1989/sqlparser/dependency/querypb_g' *.go dependency/sqltypes/*.go +sed -i '.bak' 's_vitess.io/vitess/go/_github.com/xwb1989/sqlparser/dependency/_g' *.go dependency/sqltypes/*.go + +# Copy the proto, but basically drop everything we don't want +cp -pr ${VITESS?}/vt/proto/query dependency/querypb + +sed -i '.bak' 's_.*Descriptor.*__g' dependency/querypb/*.go +sed -i '.bak' 's_.*ProtoMessage.*__g' dependency/querypb/*.go + +sed -i '.bak' 's/proto.CompactTextString(m)/"TODO"/g' dependency/querypb/*.go +sed -i '.bak' 's/proto.EnumName/EnumName/g' dependency/querypb/*.go + +sed -i '.bak' 's/proto.Equal/reflect.DeepEqual/g' dependency/sqltypes/*.go + +# Remove the error library +sed -i '.bak' 's/vterrors.Errorf([^,]*, /fmt.Errorf(/g' *.go dependency/sqltypes/*.go +sed -i '.bak' 's/vterrors.New([^,]*, /errors.New(/g' *.go dependency/sqltypes/*.go +``` + +### Testing + +```bash +VITESS=${GOPATH?}/src/vitess.io/vitess/go/ +XWB1989=${GOPATH?}/src/github.com/xwb1989/sqlparser/ + +cd ${XWB1989?} + +# Test, fix and repeat +go test ./... + +# Finally make some diffs (for later reference) +diff -u ${VITESS?}/sqltypes/ ${XWB1989?}/dependency/sqltypes/ > ${XWB1989?}/patches/sqltypes.patch +diff -u ${VITESS?}/bytes2/ ${XWB1989?}/dependency/bytes2/ > ${XWB1989?}/patches/bytes2.patch +diff -u ${VITESS?}/vt/proto/query/ ${XWB1989?}/dependency/querypb/ > ${XWB1989?}/patches/querypb.patch +diff -u ${VITESS?}/vt/sqlparser/ ${XWB1989?}/ > ${XWB1989?}/patches/sqlparser.patch +``` \ No newline at end of file diff --git a/vendor/github.com/xwb1989/sqlparser/analyzer.go b/vendor/github.com/xwb1989/sqlparser/analyzer.go new file mode 100644 index 000000000..95f97d355 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/analyzer.go @@ -0,0 +1,343 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +// analyzer.go contains utility analysis functions. + +import ( + "errors" + "fmt" + "strconv" + "strings" + "unicode" + + "github.com/xwb1989/sqlparser/dependency/sqltypes" +) + +// These constants are used to identify the SQL statement type. +const ( + StmtSelect = iota + StmtStream + StmtInsert + StmtReplace + StmtUpdate + StmtDelete + StmtDDL + StmtBegin + StmtCommit + StmtRollback + StmtSet + StmtShow + StmtUse + StmtOther + StmtUnknown + StmtComment +) + +// Preview analyzes the beginning of the query using a simpler and faster +// textual comparison to identify the statement type. +func Preview(sql string) int { + trimmed := StripLeadingComments(sql) + + firstWord := trimmed + if end := strings.IndexFunc(trimmed, unicode.IsSpace); end != -1 { + firstWord = trimmed[:end] + } + firstWord = strings.TrimLeftFunc(firstWord, func(r rune) bool { return !unicode.IsLetter(r) }) + // Comparison is done in order of priority. + loweredFirstWord := strings.ToLower(firstWord) + switch loweredFirstWord { + case "select": + return StmtSelect + case "stream": + return StmtStream + case "insert": + return StmtInsert + case "replace": + return StmtReplace + case "update": + return StmtUpdate + case "delete": + return StmtDelete + } + // For the following statements it is not sufficient to rely + // on loweredFirstWord. This is because they are not statements + // in the grammar and we are relying on Preview to parse them. + // For instance, we don't want: "BEGIN JUNK" to be parsed + // as StmtBegin. + trimmedNoComments, _ := SplitMarginComments(trimmed) + switch strings.ToLower(trimmedNoComments) { + case "begin", "start transaction": + return StmtBegin + case "commit": + return StmtCommit + case "rollback": + return StmtRollback + } + switch loweredFirstWord { + case "create", "alter", "rename", "drop", "truncate": + return StmtDDL + case "set": + return StmtSet + case "show": + return StmtShow + case "use": + return StmtUse + case "analyze", "describe", "desc", "explain", "repair", "optimize": + return StmtOther + } + if strings.Index(trimmed, "/*!") == 0 { + return StmtComment + } + return StmtUnknown +} + +// StmtType returns the statement type as a string +func StmtType(stmtType int) string { + switch stmtType { + case StmtSelect: + return "SELECT" + case StmtStream: + return "STREAM" + case StmtInsert: + return "INSERT" + case StmtReplace: + return "REPLACE" + case StmtUpdate: + return "UPDATE" + case StmtDelete: + return "DELETE" + case StmtDDL: + return "DDL" + case StmtBegin: + return "BEGIN" + case StmtCommit: + return "COMMIT" + case StmtRollback: + return "ROLLBACK" + case StmtSet: + return "SET" + case StmtShow: + return "SHOW" + case StmtUse: + return "USE" + case StmtOther: + return "OTHER" + default: + return "UNKNOWN" + } +} + +// IsDML returns true if the query is an INSERT, UPDATE or DELETE statement. +func IsDML(sql string) bool { + switch Preview(sql) { + case StmtInsert, StmtReplace, StmtUpdate, StmtDelete: + return true + } + return false +} + +// GetTableName returns the table name from the SimpleTableExpr +// only if it's a simple expression. Otherwise, it returns "". +func GetTableName(node SimpleTableExpr) TableIdent { + if n, ok := node.(TableName); ok && n.Qualifier.IsEmpty() { + return n.Name + } + // sub-select or '.' expression + return NewTableIdent("") +} + +// IsColName returns true if the Expr is a *ColName. +func IsColName(node Expr) bool { + _, ok := node.(*ColName) + return ok +} + +// IsValue returns true if the Expr is a string, integral or value arg. +// NULL is not considered to be a value. +func IsValue(node Expr) bool { + switch v := node.(type) { + case *SQLVal: + switch v.Type { + case StrVal, HexVal, IntVal, ValArg: + return true + } + } + return false +} + +// IsNull returns true if the Expr is SQL NULL +func IsNull(node Expr) bool { + switch node.(type) { + case *NullVal: + return true + } + return false +} + +// IsSimpleTuple returns true if the Expr is a ValTuple that +// contains simple values or if it's a list arg. +func IsSimpleTuple(node Expr) bool { + switch vals := node.(type) { + case ValTuple: + for _, n := range vals { + if !IsValue(n) { + return false + } + } + return true + case ListArg: + return true + } + // It's a subquery + return false +} + +// NewPlanValue builds a sqltypes.PlanValue from an Expr. +func NewPlanValue(node Expr) (sqltypes.PlanValue, error) { + switch node := node.(type) { + case *SQLVal: + switch node.Type { + case ValArg: + return sqltypes.PlanValue{Key: string(node.Val[1:])}, nil + case IntVal: + n, err := sqltypes.NewIntegral(string(node.Val)) + if err != nil { + return sqltypes.PlanValue{}, fmt.Errorf("%v", err) + } + return sqltypes.PlanValue{Value: n}, nil + case StrVal: + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val)}, nil + case HexVal: + v, err := node.HexDecode() + if err != nil { + return sqltypes.PlanValue{}, fmt.Errorf("%v", err) + } + return sqltypes.PlanValue{Value: sqltypes.MakeTrusted(sqltypes.VarBinary, v)}, nil + } + case ListArg: + return sqltypes.PlanValue{ListKey: string(node[2:])}, nil + case ValTuple: + pv := sqltypes.PlanValue{ + Values: make([]sqltypes.PlanValue, 0, len(node)), + } + for _, val := range node { + innerpv, err := NewPlanValue(val) + if err != nil { + return sqltypes.PlanValue{}, err + } + if innerpv.ListKey != "" || innerpv.Values != nil { + return sqltypes.PlanValue{}, errors.New("unsupported: nested lists") + } + pv.Values = append(pv.Values, innerpv) + } + return pv, nil + case *NullVal: + return sqltypes.PlanValue{}, nil + } + return sqltypes.PlanValue{}, fmt.Errorf("expression is too complex '%v'", String(node)) +} + +// StringIn is a convenience function that returns +// true if str matches any of the values. +func StringIn(str string, values ...string) bool { + for _, val := range values { + if str == val { + return true + } + } + return false +} + +// SetKey is the extracted key from one SetExpr +type SetKey struct { + Key string + Scope string +} + +// ExtractSetValues returns a map of key-value pairs +// if the query is a SET statement. Values can be bool, int64 or string. +// Since set variable names are case insensitive, all keys are returned +// as lower case. +func ExtractSetValues(sql string) (keyValues map[SetKey]interface{}, scope string, err error) { + stmt, err := Parse(sql) + if err != nil { + return nil, "", err + } + setStmt, ok := stmt.(*Set) + if !ok { + return nil, "", fmt.Errorf("ast did not yield *sqlparser.Set: %T", stmt) + } + result := make(map[SetKey]interface{}) + for _, expr := range setStmt.Exprs { + scope := SessionStr + key := expr.Name.Lowered() + switch { + case strings.HasPrefix(key, "@@global."): + scope = GlobalStr + key = strings.TrimPrefix(key, "@@global.") + case strings.HasPrefix(key, "@@session."): + key = strings.TrimPrefix(key, "@@session.") + case strings.HasPrefix(key, "@@"): + key = strings.TrimPrefix(key, "@@") + } + + if strings.HasPrefix(expr.Name.Lowered(), "@@") { + if setStmt.Scope != "" && scope != "" { + return nil, "", fmt.Errorf("unsupported in set: mixed using of variable scope") + } + _, out := NewStringTokenizer(key).Scan() + key = string(out) + } + + setKey := SetKey{ + Key: key, + Scope: scope, + } + + switch expr := expr.Expr.(type) { + case *SQLVal: + switch expr.Type { + case StrVal: + result[setKey] = strings.ToLower(string(expr.Val)) + case IntVal: + num, err := strconv.ParseInt(string(expr.Val), 0, 64) + if err != nil { + return nil, "", err + } + result[setKey] = num + default: + return nil, "", fmt.Errorf("invalid value type: %v", String(expr)) + } + case BoolVal: + var val int64 + if expr { + val = 1 + } + result[setKey] = val + case *ColName: + result[setKey] = expr.Name.String() + case *NullVal: + result[setKey] = nil + case *Default: + result[setKey] = "default" + default: + return nil, "", fmt.Errorf("invalid syntax: %s", String(expr)) + } + } + return result, strings.ToLower(setStmt.Scope), nil +} diff --git a/vendor/github.com/xwb1989/sqlparser/ast.go b/vendor/github.com/xwb1989/sqlparser/ast.go new file mode 100644 index 000000000..c3a0d0837 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/ast.go @@ -0,0 +1,3450 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "io" + "log" + "strings" + + "github.com/xwb1989/sqlparser/dependency/querypb" + "github.com/xwb1989/sqlparser/dependency/sqltypes" +) + +// Instructions for creating new types: If a type +// needs to satisfy an interface, declare that function +// along with that interface. This will help users +// identify the list of types to which they can assert +// those interfaces. +// If the member of a type has a string with a predefined +// list of values, declare those values as const following +// the type. +// For interfaces that define dummy functions to consolidate +// a set of types, define the function as iTypeName. +// This will help avoid name collisions. + +// Parse parses the SQL in full and returns a Statement, which +// is the AST representation of the query. If a DDL statement +// is partially parsed but still contains a syntax error, the +// error is ignored and the DDL is returned anyway. +func Parse(sql string) (Statement, error) { + tokenizer := NewStringTokenizer(sql) + if yyParse(tokenizer) != 0 { + if tokenizer.partialDDL != nil { + log.Printf("ignoring error parsing DDL '%s': %v", sql, tokenizer.LastError) + tokenizer.ParseTree = tokenizer.partialDDL + return tokenizer.ParseTree, nil + } + return nil, tokenizer.LastError + } + return tokenizer.ParseTree, nil +} + +// ParseStrictDDL is the same as Parse except it errors on +// partially parsed DDL statements. +func ParseStrictDDL(sql string) (Statement, error) { + tokenizer := NewStringTokenizer(sql) + if yyParse(tokenizer) != 0 { + return nil, tokenizer.LastError + } + return tokenizer.ParseTree, nil +} + +// ParseNext parses a single SQL statement from the tokenizer +// returning a Statement which is the AST representation of the query. +// The tokenizer will always read up to the end of the statement, allowing for +// the next call to ParseNext to parse any subsequent SQL statements. When +// there are no more statements to parse, a error of io.EOF is returned. +func ParseNext(tokenizer *Tokenizer) (Statement, error) { + if tokenizer.lastChar == ';' { + tokenizer.next() + tokenizer.skipBlank() + } + if tokenizer.lastChar == eofChar { + return nil, io.EOF + } + + tokenizer.reset() + tokenizer.multi = true + if yyParse(tokenizer) != 0 { + if tokenizer.partialDDL != nil { + tokenizer.ParseTree = tokenizer.partialDDL + return tokenizer.ParseTree, nil + } + return nil, tokenizer.LastError + } + return tokenizer.ParseTree, nil +} + +// SplitStatement returns the first sql statement up to either a ; or EOF +// and the remainder from the given buffer +func SplitStatement(blob string) (string, string, error) { + tokenizer := NewStringTokenizer(blob) + tkn := 0 + for { + tkn, _ = tokenizer.Scan() + if tkn == 0 || tkn == ';' || tkn == eofChar { + break + } + } + if tokenizer.LastError != nil { + return "", "", tokenizer.LastError + } + if tkn == ';' { + return blob[:tokenizer.Position-2], blob[tokenizer.Position-1:], nil + } + return blob, "", nil +} + +// SplitStatementToPieces split raw sql statement that may have multi sql pieces to sql pieces +// returns the sql pieces blob contains; or error if sql cannot be parsed +func SplitStatementToPieces(blob string) (pieces []string, err error) { + pieces = make([]string, 0, 16) + tokenizer := NewStringTokenizer(blob) + + tkn := 0 + var stmt string + stmtBegin := 0 + for { + tkn, _ = tokenizer.Scan() + if tkn == ';' { + stmt = blob[stmtBegin : tokenizer.Position-2] + pieces = append(pieces, stmt) + stmtBegin = tokenizer.Position - 1 + + } else if tkn == 0 || tkn == eofChar { + blobTail := tokenizer.Position - 2 + + if stmtBegin < blobTail { + stmt = blob[stmtBegin : blobTail+1] + pieces = append(pieces, stmt) + } + break + } + } + + err = tokenizer.LastError + return +} + +// SQLNode defines the interface for all nodes +// generated by the parser. +type SQLNode interface { + Format(buf *TrackedBuffer) + // walkSubtree calls visit on all underlying nodes + // of the subtree, but not the current one. Walking + // must be interrupted if visit returns an error. + walkSubtree(visit Visit) error +} + +// Visit defines the signature of a function that +// can be used to visit all nodes of a parse tree. +type Visit func(node SQLNode) (kontinue bool, err error) + +// Walk calls visit on every node. +// If visit returns true, the underlying nodes +// are also visited. If it returns an error, walking +// is interrupted, and the error is returned. +func Walk(visit Visit, nodes ...SQLNode) error { + for _, node := range nodes { + if node == nil { + continue + } + kontinue, err := visit(node) + if err != nil { + return err + } + if kontinue { + err = node.walkSubtree(visit) + if err != nil { + return err + } + } + } + return nil +} + +// String returns a string representation of an SQLNode. +func String(node SQLNode) string { + if node == nil { + return "" + } + + buf := NewTrackedBuffer(nil) + buf.Myprintf("%v", node) + return buf.String() +} + +// Append appends the SQLNode to the buffer. +func Append(buf *bytes.Buffer, node SQLNode) { + tbuf := &TrackedBuffer{ + Buffer: buf, + } + node.Format(tbuf) +} + +// Statement represents a statement. +type Statement interface { + iStatement() + SQLNode +} + +func (*Union) iStatement() {} +func (*Select) iStatement() {} +func (*Stream) iStatement() {} +func (*Insert) iStatement() {} +func (*Update) iStatement() {} +func (*Delete) iStatement() {} +func (*Set) iStatement() {} +func (*DBDDL) iStatement() {} +func (*DDL) iStatement() {} +func (*Show) iStatement() {} +func (*Use) iStatement() {} +func (*Begin) iStatement() {} +func (*Commit) iStatement() {} +func (*Rollback) iStatement() {} +func (*OtherRead) iStatement() {} +func (*OtherAdmin) iStatement() {} + +// ParenSelect can actually not be a top level statement, +// but we have to allow it because it's a requirement +// of SelectStatement. +func (*ParenSelect) iStatement() {} + +// SelectStatement any SELECT statement. +type SelectStatement interface { + iSelectStatement() + iStatement() + iInsertRows() + AddOrder(*Order) + SetLimit(*Limit) + SQLNode +} + +func (*Select) iSelectStatement() {} +func (*Union) iSelectStatement() {} +func (*ParenSelect) iSelectStatement() {} + +// Select represents a SELECT statement. +type Select struct { + Cache string + Comments Comments + Distinct string + Hints string + SelectExprs SelectExprs + From TableExprs + Where *Where + GroupBy GroupBy + Having *Where + OrderBy OrderBy + Limit *Limit + Lock string +} + +// Select.Distinct +const ( + DistinctStr = "distinct " + StraightJoinHint = "straight_join " +) + +// Select.Lock +const ( + ForUpdateStr = " for update" + ShareModeStr = " lock in share mode" +) + +// Select.Cache +const ( + SQLCacheStr = "sql_cache " + SQLNoCacheStr = "sql_no_cache " +) + +// AddOrder adds an order by element +func (node *Select) AddOrder(order *Order) { + node.OrderBy = append(node.OrderBy, order) +} + +// SetLimit sets the limit clause +func (node *Select) SetLimit(limit *Limit) { + node.Limit = limit +} + +// Format formats the node. +func (node *Select) Format(buf *TrackedBuffer) { + buf.Myprintf("select %v%s%s%s%v from %v%v%v%v%v%v%s", + node.Comments, node.Cache, node.Distinct, node.Hints, node.SelectExprs, + node.From, node.Where, + node.GroupBy, node.Having, node.OrderBy, + node.Limit, node.Lock) +} + +func (node *Select) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.SelectExprs, + node.From, + node.Where, + node.GroupBy, + node.Having, + node.OrderBy, + node.Limit, + ) +} + +// AddWhere adds the boolean expression to the +// WHERE clause as an AND condition. If the expression +// is an OR clause, it parenthesizes it. Currently, +// the OR operator is the only one that's lower precedence +// than AND. +func (node *Select) AddWhere(expr Expr) { + if _, ok := expr.(*OrExpr); ok { + expr = &ParenExpr{Expr: expr} + } + if node.Where == nil { + node.Where = &Where{ + Type: WhereStr, + Expr: expr, + } + return + } + node.Where.Expr = &AndExpr{ + Left: node.Where.Expr, + Right: expr, + } + return +} + +// AddHaving adds the boolean expression to the +// HAVING clause as an AND condition. If the expression +// is an OR clause, it parenthesizes it. Currently, +// the OR operator is the only one that's lower precedence +// than AND. +func (node *Select) AddHaving(expr Expr) { + if _, ok := expr.(*OrExpr); ok { + expr = &ParenExpr{Expr: expr} + } + if node.Having == nil { + node.Having = &Where{ + Type: HavingStr, + Expr: expr, + } + return + } + node.Having.Expr = &AndExpr{ + Left: node.Having.Expr, + Right: expr, + } + return +} + +// ParenSelect is a parenthesized SELECT statement. +type ParenSelect struct { + Select SelectStatement +} + +// AddOrder adds an order by element +func (node *ParenSelect) AddOrder(order *Order) { + panic("unreachable") +} + +// SetLimit sets the limit clause +func (node *ParenSelect) SetLimit(limit *Limit) { + panic("unreachable") +} + +// Format formats the node. +func (node *ParenSelect) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", node.Select) +} + +func (node *ParenSelect) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Select, + ) +} + +// Union represents a UNION statement. +type Union struct { + Type string + Left, Right SelectStatement + OrderBy OrderBy + Limit *Limit + Lock string +} + +// Union.Type +const ( + UnionStr = "union" + UnionAllStr = "union all" + UnionDistinctStr = "union distinct" +) + +// AddOrder adds an order by element +func (node *Union) AddOrder(order *Order) { + node.OrderBy = append(node.OrderBy, order) +} + +// SetLimit sets the limit clause +func (node *Union) SetLimit(limit *Limit) { + node.Limit = limit +} + +// Format formats the node. +func (node *Union) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v%v%v%s", node.Left, node.Type, node.Right, + node.OrderBy, node.Limit, node.Lock) +} + +func (node *Union) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + ) +} + +// Stream represents a SELECT statement. +type Stream struct { + Comments Comments + SelectExpr SelectExpr + Table TableName +} + +// Format formats the node. +func (node *Stream) Format(buf *TrackedBuffer) { + buf.Myprintf("stream %v%v from %v", + node.Comments, node.SelectExpr, node.Table) +} + +func (node *Stream) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.SelectExpr, + node.Table, + ) +} + +// Insert represents an INSERT or REPLACE statement. +// Per the MySQL docs, http://dev.mysql.com/doc/refman/5.7/en/replace.html +// Replace is the counterpart to `INSERT IGNORE`, and works exactly like a +// normal INSERT except if the row exists. In that case it first deletes +// the row and re-inserts with new values. For that reason we keep it as an Insert struct. +// Replaces are currently disallowed in sharded schemas because +// of the implications the deletion part may have on vindexes. +// If you add fields here, consider adding them to calls to validateSubquerySamePlan. +type Insert struct { + Action string + Comments Comments + Ignore string + Table TableName + Partitions Partitions + Columns Columns + Rows InsertRows + OnDup OnDup +} + +// DDL strings. +const ( + InsertStr = "insert" + ReplaceStr = "replace" +) + +// Format formats the node. +func (node *Insert) Format(buf *TrackedBuffer) { + buf.Myprintf("%s %v%sinto %v%v%v %v%v", + node.Action, + node.Comments, node.Ignore, + node.Table, node.Partitions, node.Columns, node.Rows, node.OnDup) +} + +func (node *Insert) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.Table, + node.Columns, + node.Rows, + node.OnDup, + ) +} + +// InsertRows represents the rows for an INSERT statement. +type InsertRows interface { + iInsertRows() + SQLNode +} + +func (*Select) iInsertRows() {} +func (*Union) iInsertRows() {} +func (Values) iInsertRows() {} +func (*ParenSelect) iInsertRows() {} + +// Update represents an UPDATE statement. +// If you add fields here, consider adding them to calls to validateSubquerySamePlan. +type Update struct { + Comments Comments + TableExprs TableExprs + Exprs UpdateExprs + Where *Where + OrderBy OrderBy + Limit *Limit +} + +// Format formats the node. +func (node *Update) Format(buf *TrackedBuffer) { + buf.Myprintf("update %v%v set %v%v%v%v", + node.Comments, node.TableExprs, + node.Exprs, node.Where, node.OrderBy, node.Limit) +} + +func (node *Update) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.TableExprs, + node.Exprs, + node.Where, + node.OrderBy, + node.Limit, + ) +} + +// Delete represents a DELETE statement. +// If you add fields here, consider adding them to calls to validateSubquerySamePlan. +type Delete struct { + Comments Comments + Targets TableNames + TableExprs TableExprs + Partitions Partitions + Where *Where + OrderBy OrderBy + Limit *Limit +} + +// Format formats the node. +func (node *Delete) Format(buf *TrackedBuffer) { + buf.Myprintf("delete %v", node.Comments) + if node.Targets != nil { + buf.Myprintf("%v ", node.Targets) + } + buf.Myprintf("from %v%v%v%v%v", node.TableExprs, node.Partitions, node.Where, node.OrderBy, node.Limit) +} + +func (node *Delete) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.Targets, + node.TableExprs, + node.Where, + node.OrderBy, + node.Limit, + ) +} + +// Set represents a SET statement. +type Set struct { + Comments Comments + Exprs SetExprs + Scope string +} + +// Set.Scope or Show.Scope +const ( + SessionStr = "session" + GlobalStr = "global" +) + +// Format formats the node. +func (node *Set) Format(buf *TrackedBuffer) { + if node.Scope == "" { + buf.Myprintf("set %v%v", node.Comments, node.Exprs) + } else { + buf.Myprintf("set %v%s %v", node.Comments, node.Scope, node.Exprs) + } +} + +func (node *Set) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Comments, + node.Exprs, + ) +} + +// DBDDL represents a CREATE, DROP database statement. +type DBDDL struct { + Action string + DBName string + IfExists bool + Collate string + Charset string +} + +// Format formats the node. +func (node *DBDDL) Format(buf *TrackedBuffer) { + switch node.Action { + case CreateStr: + buf.WriteString(fmt.Sprintf("%s database %s", node.Action, node.DBName)) + case DropStr: + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.WriteString(fmt.Sprintf("%s database%s %v", node.Action, exists, node.DBName)) + } +} + +// walkSubtree walks the nodes of the subtree. +func (node *DBDDL) walkSubtree(visit Visit) error { + return nil +} + +// DDL represents a CREATE, ALTER, DROP, RENAME or TRUNCATE statement. +// Table is set for AlterStr, DropStr, RenameStr, TruncateStr +// NewName is set for AlterStr, CreateStr, RenameStr. +// VindexSpec is set for CreateVindexStr, DropVindexStr, AddColVindexStr, DropColVindexStr +// VindexCols is set for AddColVindexStr +type DDL struct { + Action string + Table TableName + NewName TableName + IfExists bool + TableSpec *TableSpec + PartitionSpec *PartitionSpec + VindexSpec *VindexSpec + VindexCols []ColIdent +} + +// DDL strings. +const ( + CreateStr = "create" + AlterStr = "alter" + DropStr = "drop" + RenameStr = "rename" + TruncateStr = "truncate" + CreateVindexStr = "create vindex" + AddColVindexStr = "add vindex" + DropColVindexStr = "drop vindex" + + // Vindex DDL param to specify the owner of a vindex + VindexOwnerStr = "owner" +) + +// Format formats the node. +func (node *DDL) Format(buf *TrackedBuffer) { + switch node.Action { + case CreateStr: + if node.TableSpec == nil { + buf.Myprintf("%s table %v", node.Action, node.NewName) + } else { + buf.Myprintf("%s table %v %v", node.Action, node.NewName, node.TableSpec) + } + case DropStr: + exists := "" + if node.IfExists { + exists = " if exists" + } + buf.Myprintf("%s table%s %v", node.Action, exists, node.Table) + case RenameStr: + buf.Myprintf("%s table %v to %v", node.Action, node.Table, node.NewName) + case AlterStr: + if node.PartitionSpec != nil { + buf.Myprintf("%s table %v %v", node.Action, node.Table, node.PartitionSpec) + } else { + buf.Myprintf("%s table %v", node.Action, node.Table) + } + case CreateVindexStr: + buf.Myprintf("%s %v %v", node.Action, node.VindexSpec.Name, node.VindexSpec) + case AddColVindexStr: + buf.Myprintf("alter table %v %s %v (", node.Table, node.Action, node.VindexSpec.Name) + for i, col := range node.VindexCols { + if i != 0 { + buf.Myprintf(", %v", col) + } else { + buf.Myprintf("%v", col) + } + } + buf.Myprintf(")") + if node.VindexSpec.Type.String() != "" { + buf.Myprintf(" %v", node.VindexSpec) + } + case DropColVindexStr: + buf.Myprintf("alter table %v %s %v", node.Table, node.Action, node.VindexSpec.Name) + default: + buf.Myprintf("%s table %v", node.Action, node.Table) + } +} + +func (node *DDL) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Table, + node.NewName, + ) +} + +// Partition strings +const ( + ReorganizeStr = "reorganize partition" +) + +// PartitionSpec describe partition actions (for alter and create) +type PartitionSpec struct { + Action string + Name ColIdent + Definitions []*PartitionDefinition +} + +// Format formats the node. +func (node *PartitionSpec) Format(buf *TrackedBuffer) { + switch node.Action { + case ReorganizeStr: + buf.Myprintf("%s %v into (", node.Action, node.Name) + var prefix string + for _, pd := range node.Definitions { + buf.Myprintf("%s%v", prefix, pd) + prefix = ", " + } + buf.Myprintf(")") + default: + panic("unimplemented") + } +} + +func (node *PartitionSpec) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + if err := Walk(visit, node.Name); err != nil { + return err + } + for _, def := range node.Definitions { + if err := Walk(visit, def); err != nil { + return err + } + } + return nil +} + +// PartitionDefinition describes a very minimal partition definition +type PartitionDefinition struct { + Name ColIdent + Limit Expr + Maxvalue bool +} + +// Format formats the node +func (node *PartitionDefinition) Format(buf *TrackedBuffer) { + if !node.Maxvalue { + buf.Myprintf("partition %v values less than (%v)", node.Name, node.Limit) + } else { + buf.Myprintf("partition %v values less than (maxvalue)", node.Name) + } +} + +func (node *PartitionDefinition) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + node.Limit, + ) +} + +// TableSpec describes the structure of a table from a CREATE TABLE statement +type TableSpec struct { + Columns []*ColumnDefinition + Indexes []*IndexDefinition + Options string +} + +// Format formats the node. +func (ts *TableSpec) Format(buf *TrackedBuffer) { + buf.Myprintf("(\n") + for i, col := range ts.Columns { + if i == 0 { + buf.Myprintf("\t%v", col) + } else { + buf.Myprintf(",\n\t%v", col) + } + } + for _, idx := range ts.Indexes { + buf.Myprintf(",\n\t%v", idx) + } + + buf.Myprintf("\n)%s", strings.Replace(ts.Options, ", ", ",\n ", -1)) +} + +// AddColumn appends the given column to the list in the spec +func (ts *TableSpec) AddColumn(cd *ColumnDefinition) { + ts.Columns = append(ts.Columns, cd) +} + +// AddIndex appends the given index to the list in the spec +func (ts *TableSpec) AddIndex(id *IndexDefinition) { + ts.Indexes = append(ts.Indexes, id) +} + +func (ts *TableSpec) walkSubtree(visit Visit) error { + if ts == nil { + return nil + } + + for _, n := range ts.Columns { + if err := Walk(visit, n); err != nil { + return err + } + } + + for _, n := range ts.Indexes { + if err := Walk(visit, n); err != nil { + return err + } + } + + return nil +} + +// ColumnDefinition describes a column in a CREATE TABLE statement +type ColumnDefinition struct { + Name ColIdent + Type ColumnType +} + +// Format formats the node. +func (col *ColumnDefinition) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %v", col.Name, &col.Type) +} + +func (col *ColumnDefinition) walkSubtree(visit Visit) error { + if col == nil { + return nil + } + return Walk( + visit, + col.Name, + &col.Type, + ) +} + +// ColumnType represents a sql type in a CREATE TABLE statement +// All optional fields are nil if not specified +type ColumnType struct { + // The base type string + Type string + + // Generic field options. + NotNull BoolVal + Autoincrement BoolVal + Default *SQLVal + OnUpdate *SQLVal + Comment *SQLVal + + // Numeric field options + Length *SQLVal + Unsigned BoolVal + Zerofill BoolVal + Scale *SQLVal + + // Text field options + Charset string + Collate string + + // Enum values + EnumValues []string + + // Key specification + KeyOpt ColumnKeyOption +} + +// Format returns a canonical string representation of the type and all relevant options +func (ct *ColumnType) Format(buf *TrackedBuffer) { + buf.Myprintf("%s", ct.Type) + + if ct.Length != nil && ct.Scale != nil { + buf.Myprintf("(%v,%v)", ct.Length, ct.Scale) + + } else if ct.Length != nil { + buf.Myprintf("(%v)", ct.Length) + } + + if ct.EnumValues != nil { + buf.Myprintf("(%s)", strings.Join(ct.EnumValues, ", ")) + } + + opts := make([]string, 0, 16) + if ct.Unsigned { + opts = append(opts, keywordStrings[UNSIGNED]) + } + if ct.Zerofill { + opts = append(opts, keywordStrings[ZEROFILL]) + } + if ct.Charset != "" { + opts = append(opts, keywordStrings[CHARACTER], keywordStrings[SET], ct.Charset) + } + if ct.Collate != "" { + opts = append(opts, keywordStrings[COLLATE], ct.Collate) + } + if ct.NotNull { + opts = append(opts, keywordStrings[NOT], keywordStrings[NULL]) + } + if ct.Default != nil { + opts = append(opts, keywordStrings[DEFAULT], String(ct.Default)) + } + if ct.OnUpdate != nil { + opts = append(opts, keywordStrings[ON], keywordStrings[UPDATE], String(ct.OnUpdate)) + } + if ct.Autoincrement { + opts = append(opts, keywordStrings[AUTO_INCREMENT]) + } + if ct.Comment != nil { + opts = append(opts, keywordStrings[COMMENT_KEYWORD], String(ct.Comment)) + } + if ct.KeyOpt == colKeyPrimary { + opts = append(opts, keywordStrings[PRIMARY], keywordStrings[KEY]) + } + if ct.KeyOpt == colKeyUnique { + opts = append(opts, keywordStrings[UNIQUE]) + } + if ct.KeyOpt == colKeyUniqueKey { + opts = append(opts, keywordStrings[UNIQUE], keywordStrings[KEY]) + } + if ct.KeyOpt == colKeySpatialKey { + opts = append(opts, keywordStrings[SPATIAL], keywordStrings[KEY]) + } + if ct.KeyOpt == colKey { + opts = append(opts, keywordStrings[KEY]) + } + + if len(opts) != 0 { + buf.Myprintf(" %s", strings.Join(opts, " ")) + } +} + +// DescribeType returns the abbreviated type information as required for +// describe table +func (ct *ColumnType) DescribeType() string { + buf := NewTrackedBuffer(nil) + buf.Myprintf("%s", ct.Type) + if ct.Length != nil && ct.Scale != nil { + buf.Myprintf("(%v,%v)", ct.Length, ct.Scale) + } else if ct.Length != nil { + buf.Myprintf("(%v)", ct.Length) + } + + opts := make([]string, 0, 16) + if ct.Unsigned { + opts = append(opts, keywordStrings[UNSIGNED]) + } + if ct.Zerofill { + opts = append(opts, keywordStrings[ZEROFILL]) + } + if len(opts) != 0 { + buf.Myprintf(" %s", strings.Join(opts, " ")) + } + return buf.String() +} + +// SQLType returns the sqltypes type code for the given column +func (ct *ColumnType) SQLType() querypb.Type { + switch ct.Type { + case keywordStrings[TINYINT]: + if ct.Unsigned { + return sqltypes.Uint8 + } + return sqltypes.Int8 + case keywordStrings[SMALLINT]: + if ct.Unsigned { + return sqltypes.Uint16 + } + return sqltypes.Int16 + case keywordStrings[MEDIUMINT]: + if ct.Unsigned { + return sqltypes.Uint24 + } + return sqltypes.Int24 + case keywordStrings[INT]: + fallthrough + case keywordStrings[INTEGER]: + if ct.Unsigned { + return sqltypes.Uint32 + } + return sqltypes.Int32 + case keywordStrings[BIGINT]: + if ct.Unsigned { + return sqltypes.Uint64 + } + return sqltypes.Int64 + case keywordStrings[TEXT]: + return sqltypes.Text + case keywordStrings[TINYTEXT]: + return sqltypes.Text + case keywordStrings[MEDIUMTEXT]: + return sqltypes.Text + case keywordStrings[LONGTEXT]: + return sqltypes.Text + case keywordStrings[BLOB]: + return sqltypes.Blob + case keywordStrings[TINYBLOB]: + return sqltypes.Blob + case keywordStrings[MEDIUMBLOB]: + return sqltypes.Blob + case keywordStrings[LONGBLOB]: + return sqltypes.Blob + case keywordStrings[CHAR]: + return sqltypes.Char + case keywordStrings[VARCHAR]: + return sqltypes.VarChar + case keywordStrings[BINARY]: + return sqltypes.Binary + case keywordStrings[VARBINARY]: + return sqltypes.VarBinary + case keywordStrings[DATE]: + return sqltypes.Date + case keywordStrings[TIME]: + return sqltypes.Time + case keywordStrings[DATETIME]: + return sqltypes.Datetime + case keywordStrings[TIMESTAMP]: + return sqltypes.Timestamp + case keywordStrings[YEAR]: + return sqltypes.Year + case keywordStrings[FLOAT_TYPE]: + return sqltypes.Float32 + case keywordStrings[DOUBLE]: + return sqltypes.Float64 + case keywordStrings[DECIMAL]: + return sqltypes.Decimal + case keywordStrings[BIT]: + return sqltypes.Bit + case keywordStrings[ENUM]: + return sqltypes.Enum + case keywordStrings[SET]: + return sqltypes.Set + case keywordStrings[JSON]: + return sqltypes.TypeJSON + case keywordStrings[GEOMETRY]: + return sqltypes.Geometry + case keywordStrings[POINT]: + return sqltypes.Geometry + case keywordStrings[LINESTRING]: + return sqltypes.Geometry + case keywordStrings[POLYGON]: + return sqltypes.Geometry + case keywordStrings[GEOMETRYCOLLECTION]: + return sqltypes.Geometry + case keywordStrings[MULTIPOINT]: + return sqltypes.Geometry + case keywordStrings[MULTILINESTRING]: + return sqltypes.Geometry + case keywordStrings[MULTIPOLYGON]: + return sqltypes.Geometry + } + panic("unimplemented type " + ct.Type) +} + +func (ct *ColumnType) walkSubtree(visit Visit) error { + return nil +} + +// IndexDefinition describes an index in a CREATE TABLE statement +type IndexDefinition struct { + Info *IndexInfo + Columns []*IndexColumn + Options []*IndexOption +} + +// Format formats the node. +func (idx *IndexDefinition) Format(buf *TrackedBuffer) { + buf.Myprintf("%v (", idx.Info) + for i, col := range idx.Columns { + if i != 0 { + buf.Myprintf(", %v", col.Column) + } else { + buf.Myprintf("%v", col.Column) + } + if col.Length != nil { + buf.Myprintf("(%v)", col.Length) + } + } + buf.Myprintf(")") + + for _, opt := range idx.Options { + buf.Myprintf(" %s", opt.Name) + if opt.Using != "" { + buf.Myprintf(" %s", opt.Using) + } else { + buf.Myprintf(" %v", opt.Value) + } + } +} + +func (idx *IndexDefinition) walkSubtree(visit Visit) error { + if idx == nil { + return nil + } + + for _, n := range idx.Columns { + if err := Walk(visit, n.Column); err != nil { + return err + } + } + + return nil +} + +// IndexInfo describes the name and type of an index in a CREATE TABLE statement +type IndexInfo struct { + Type string + Name ColIdent + Primary bool + Spatial bool + Unique bool +} + +// Format formats the node. +func (ii *IndexInfo) Format(buf *TrackedBuffer) { + if ii.Primary { + buf.Myprintf("%s", ii.Type) + } else { + buf.Myprintf("%s %v", ii.Type, ii.Name) + } +} + +func (ii *IndexInfo) walkSubtree(visit Visit) error { + return Walk(visit, ii.Name) +} + +// IndexColumn describes a column in an index definition with optional length +type IndexColumn struct { + Column ColIdent + Length *SQLVal +} + +// LengthScaleOption is used for types that have an optional length +// and scale +type LengthScaleOption struct { + Length *SQLVal + Scale *SQLVal +} + +// IndexOption is used for trailing options for indexes: COMMENT, KEY_BLOCK_SIZE, USING +type IndexOption struct { + Name string + Value *SQLVal + Using string +} + +// ColumnKeyOption indicates whether or not the given column is defined as an +// index element and contains the type of the option +type ColumnKeyOption int + +const ( + colKeyNone ColumnKeyOption = iota + colKeyPrimary + colKeySpatialKey + colKeyUnique + colKeyUniqueKey + colKey +) + +// VindexSpec defines a vindex for a CREATE VINDEX or DROP VINDEX statement +type VindexSpec struct { + Name ColIdent + Type ColIdent + Params []VindexParam +} + +// ParseParams parses the vindex parameter list, pulling out the special-case +// "owner" parameter +func (node *VindexSpec) ParseParams() (string, map[string]string) { + var owner string + params := map[string]string{} + for _, p := range node.Params { + if p.Key.Lowered() == VindexOwnerStr { + owner = p.Val + } else { + params[p.Key.String()] = p.Val + } + } + return owner, params +} + +// Format formats the node. The "CREATE VINDEX" preamble was formatted in +// the containing DDL node Format, so this just prints the type, any +// parameters, and optionally the owner +func (node *VindexSpec) Format(buf *TrackedBuffer) { + buf.Myprintf("using %v", node.Type) + + numParams := len(node.Params) + if numParams != 0 { + buf.Myprintf(" with ") + for i, p := range node.Params { + if i != 0 { + buf.Myprintf(", ") + } + buf.Myprintf("%v", p) + } + } +} + +func (node *VindexSpec) walkSubtree(visit Visit) error { + err := Walk(visit, + node.Name, + ) + + if err != nil { + return err + } + + for _, p := range node.Params { + err := Walk(visit, p) + + if err != nil { + return err + } + } + return nil +} + +// VindexParam defines a key/value parameter for a CREATE VINDEX statement +type VindexParam struct { + Key ColIdent + Val string +} + +// Format formats the node. +func (node VindexParam) Format(buf *TrackedBuffer) { + buf.Myprintf("%s=%s", node.Key.String(), node.Val) +} + +func (node VindexParam) walkSubtree(visit Visit) error { + return Walk(visit, + node.Key, + ) +} + +// Show represents a show statement. +type Show struct { + Type string + OnTable TableName + ShowTablesOpt *ShowTablesOpt + Scope string +} + +// Format formats the node. +func (node *Show) Format(buf *TrackedBuffer) { + if node.Type == "tables" && node.ShowTablesOpt != nil { + opt := node.ShowTablesOpt + if opt.DbName != "" { + if opt.Filter != nil { + buf.Myprintf("show %s%stables from %s %v", opt.Extended, opt.Full, opt.DbName, opt.Filter) + } else { + buf.Myprintf("show %s%stables from %s", opt.Extended, opt.Full, opt.DbName) + } + } else { + if opt.Filter != nil { + buf.Myprintf("show %s%stables %v", opt.Extended, opt.Full, opt.Filter) + } else { + buf.Myprintf("show %s%stables", opt.Extended, opt.Full) + } + } + return + } + if node.Scope == "" { + buf.Myprintf("show %s", node.Type) + } else { + buf.Myprintf("show %s %s", node.Scope, node.Type) + } + if node.HasOnTable() { + buf.Myprintf(" on %v", node.OnTable) + } +} + +// HasOnTable returns true if the show statement has an "on" clause +func (node *Show) HasOnTable() bool { + return node.OnTable.Name.v != "" +} + +func (node *Show) walkSubtree(visit Visit) error { + return nil +} + +// ShowTablesOpt is show tables option +type ShowTablesOpt struct { + Extended string + Full string + DbName string + Filter *ShowFilter +} + +// ShowFilter is show tables filter +type ShowFilter struct { + Like string + Filter Expr +} + +// Format formats the node. +func (node *ShowFilter) Format(buf *TrackedBuffer) { + if node.Like != "" { + buf.Myprintf("like '%s'", node.Like) + } else { + buf.Myprintf("where %v", node.Filter) + } +} + +func (node *ShowFilter) walkSubtree(visit Visit) error { + return nil +} + +// Use represents a use statement. +type Use struct { + DBName TableIdent +} + +// Format formats the node. +func (node *Use) Format(buf *TrackedBuffer) { + if node.DBName.v != "" { + buf.Myprintf("use %v", node.DBName) + } else { + buf.Myprintf("use") + } +} + +func (node *Use) walkSubtree(visit Visit) error { + return Walk(visit, node.DBName) +} + +// Begin represents a Begin statement. +type Begin struct{} + +// Format formats the node. +func (node *Begin) Format(buf *TrackedBuffer) { + buf.WriteString("begin") +} + +func (node *Begin) walkSubtree(visit Visit) error { + return nil +} + +// Commit represents a Commit statement. +type Commit struct{} + +// Format formats the node. +func (node *Commit) Format(buf *TrackedBuffer) { + buf.WriteString("commit") +} + +func (node *Commit) walkSubtree(visit Visit) error { + return nil +} + +// Rollback represents a Rollback statement. +type Rollback struct{} + +// Format formats the node. +func (node *Rollback) Format(buf *TrackedBuffer) { + buf.WriteString("rollback") +} + +func (node *Rollback) walkSubtree(visit Visit) error { + return nil +} + +// OtherRead represents a DESCRIBE, or EXPLAIN statement. +// It should be used only as an indicator. It does not contain +// the full AST for the statement. +type OtherRead struct{} + +// Format formats the node. +func (node *OtherRead) Format(buf *TrackedBuffer) { + buf.WriteString("otherread") +} + +func (node *OtherRead) walkSubtree(visit Visit) error { + return nil +} + +// OtherAdmin represents a misc statement that relies on ADMIN privileges, +// such as REPAIR, OPTIMIZE, or TRUNCATE statement. +// It should be used only as an indicator. It does not contain +// the full AST for the statement. +type OtherAdmin struct{} + +// Format formats the node. +func (node *OtherAdmin) Format(buf *TrackedBuffer) { + buf.WriteString("otheradmin") +} + +func (node *OtherAdmin) walkSubtree(visit Visit) error { + return nil +} + +// Comments represents a list of comments. +type Comments [][]byte + +// Format formats the node. +func (node Comments) Format(buf *TrackedBuffer) { + for _, c := range node { + buf.Myprintf("%s ", c) + } +} + +func (node Comments) walkSubtree(visit Visit) error { + return nil +} + +// SelectExprs represents SELECT expressions. +type SelectExprs []SelectExpr + +// Format formats the node. +func (node SelectExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node SelectExprs) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// SelectExpr represents a SELECT expression. +type SelectExpr interface { + iSelectExpr() + SQLNode +} + +func (*StarExpr) iSelectExpr() {} +func (*AliasedExpr) iSelectExpr() {} +func (Nextval) iSelectExpr() {} + +// StarExpr defines a '*' or 'table.*' expression. +type StarExpr struct { + TableName TableName +} + +// Format formats the node. +func (node *StarExpr) Format(buf *TrackedBuffer) { + if !node.TableName.IsEmpty() { + buf.Myprintf("%v.", node.TableName) + } + buf.Myprintf("*") +} + +func (node *StarExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.TableName, + ) +} + +// AliasedExpr defines an aliased SELECT expression. +type AliasedExpr struct { + Expr Expr + As ColIdent +} + +// Format formats the node. +func (node *AliasedExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v", node.Expr) + if !node.As.IsEmpty() { + buf.Myprintf(" as %v", node.As) + } +} + +func (node *AliasedExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + node.As, + ) +} + +// Nextval defines the NEXT VALUE expression. +type Nextval struct { + Expr Expr +} + +// Format formats the node. +func (node Nextval) Format(buf *TrackedBuffer) { + buf.Myprintf("next %v values", node.Expr) +} + +func (node Nextval) walkSubtree(visit Visit) error { + return Walk(visit, node.Expr) +} + +// Columns represents an insert column list. +type Columns []ColIdent + +// Format formats the node. +func (node Columns) Format(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := "(" + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } + buf.WriteString(")") +} + +func (node Columns) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// FindColumn finds a column in the column list, returning +// the index if it exists or -1 otherwise +func (node Columns) FindColumn(col ColIdent) int { + for i, colName := range node { + if colName.Equal(col) { + return i + } + } + return -1 +} + +// Partitions is a type alias for Columns so we can handle printing efficiently +type Partitions Columns + +// Format formats the node +func (node Partitions) Format(buf *TrackedBuffer) { + if node == nil { + return + } + prefix := " partition (" + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } + buf.WriteString(")") +} + +func (node Partitions) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// TableExprs represents a list of table expressions. +type TableExprs []TableExpr + +// Format formats the node. +func (node TableExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node TableExprs) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// TableExpr represents a table expression. +type TableExpr interface { + iTableExpr() + SQLNode +} + +func (*AliasedTableExpr) iTableExpr() {} +func (*ParenTableExpr) iTableExpr() {} +func (*JoinTableExpr) iTableExpr() {} + +// AliasedTableExpr represents a table expression +// coupled with an optional alias or index hint. +// If As is empty, no alias was used. +type AliasedTableExpr struct { + Expr SimpleTableExpr + Partitions Partitions + As TableIdent + Hints *IndexHints +} + +// Format formats the node. +func (node *AliasedTableExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v%v", node.Expr, node.Partitions) + if !node.As.IsEmpty() { + buf.Myprintf(" as %v", node.As) + } + if node.Hints != nil { + // Hint node provides the space padding. + buf.Myprintf("%v", node.Hints) + } +} + +func (node *AliasedTableExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + node.As, + node.Hints, + ) +} + +// RemoveHints returns a new AliasedTableExpr with the hints removed. +func (node *AliasedTableExpr) RemoveHints() *AliasedTableExpr { + noHints := *node + noHints.Hints = nil + return &noHints +} + +// SimpleTableExpr represents a simple table expression. +type SimpleTableExpr interface { + iSimpleTableExpr() + SQLNode +} + +func (TableName) iSimpleTableExpr() {} +func (*Subquery) iSimpleTableExpr() {} + +// TableNames is a list of TableName. +type TableNames []TableName + +// Format formats the node. +func (node TableNames) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node TableNames) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// TableName represents a table name. +// Qualifier, if specified, represents a database or keyspace. +// TableName is a value struct whose fields are case sensitive. +// This means two TableName vars can be compared for equality +// and a TableName can also be used as key in a map. +type TableName struct { + Name, Qualifier TableIdent +} + +// Format formats the node. +func (node TableName) Format(buf *TrackedBuffer) { + if node.IsEmpty() { + return + } + if !node.Qualifier.IsEmpty() { + buf.Myprintf("%v.", node.Qualifier) + } + buf.Myprintf("%v", node.Name) +} + +func (node TableName) walkSubtree(visit Visit) error { + return Walk( + visit, + node.Name, + node.Qualifier, + ) +} + +// IsEmpty returns true if TableName is nil or empty. +func (node TableName) IsEmpty() bool { + // If Name is empty, Qualifer is also empty. + return node.Name.IsEmpty() +} + +// ToViewName returns a TableName acceptable for use as a VIEW. VIEW names are +// always lowercase, so ToViewName lowercasese the name. Databases are case-sensitive +// so Qualifier is left untouched. +func (node TableName) ToViewName() TableName { + return TableName{ + Qualifier: node.Qualifier, + Name: NewTableIdent(strings.ToLower(node.Name.v)), + } +} + +// ParenTableExpr represents a parenthesized list of TableExpr. +type ParenTableExpr struct { + Exprs TableExprs +} + +// Format formats the node. +func (node *ParenTableExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", node.Exprs) +} + +func (node *ParenTableExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Exprs, + ) +} + +// JoinCondition represents the join conditions (either a ON or USING clause) +// of a JoinTableExpr. +type JoinCondition struct { + On Expr + Using Columns +} + +// Format formats the node. +func (node JoinCondition) Format(buf *TrackedBuffer) { + if node.On != nil { + buf.Myprintf(" on %v", node.On) + } + if node.Using != nil { + buf.Myprintf(" using %v", node.Using) + } +} + +func (node JoinCondition) walkSubtree(visit Visit) error { + return Walk( + visit, + node.On, + node.Using, + ) +} + +// JoinTableExpr represents a TableExpr that's a JOIN operation. +type JoinTableExpr struct { + LeftExpr TableExpr + Join string + RightExpr TableExpr + Condition JoinCondition +} + +// JoinTableExpr.Join +const ( + JoinStr = "join" + StraightJoinStr = "straight_join" + LeftJoinStr = "left join" + RightJoinStr = "right join" + NaturalJoinStr = "natural join" + NaturalLeftJoinStr = "natural left join" + NaturalRightJoinStr = "natural right join" +) + +// Format formats the node. +func (node *JoinTableExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v%v", node.LeftExpr, node.Join, node.RightExpr, node.Condition) +} + +func (node *JoinTableExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.LeftExpr, + node.RightExpr, + node.Condition, + ) +} + +// IndexHints represents a list of index hints. +type IndexHints struct { + Type string + Indexes []ColIdent +} + +// Index hints. +const ( + UseStr = "use " + IgnoreStr = "ignore " + ForceStr = "force " +) + +// Format formats the node. +func (node *IndexHints) Format(buf *TrackedBuffer) { + buf.Myprintf(" %sindex ", node.Type) + prefix := "(" + for _, n := range node.Indexes { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } + buf.Myprintf(")") +} + +func (node *IndexHints) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + for _, n := range node.Indexes { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// Where represents a WHERE or HAVING clause. +type Where struct { + Type string + Expr Expr +} + +// Where.Type +const ( + WhereStr = "where" + HavingStr = "having" +) + +// NewWhere creates a WHERE or HAVING clause out +// of a Expr. If the expression is nil, it returns nil. +func NewWhere(typ string, expr Expr) *Where { + if expr == nil { + return nil + } + return &Where{Type: typ, Expr: expr} +} + +// Format formats the node. +func (node *Where) Format(buf *TrackedBuffer) { + if node == nil || node.Expr == nil { + return + } + buf.Myprintf(" %s %v", node.Type, node.Expr) +} + +func (node *Where) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// Expr represents an expression. +type Expr interface { + iExpr() + // replace replaces any subexpression that matches + // from with to. The implementation can use the + // replaceExprs convenience function. + replace(from, to Expr) bool + SQLNode +} + +func (*AndExpr) iExpr() {} +func (*OrExpr) iExpr() {} +func (*NotExpr) iExpr() {} +func (*ParenExpr) iExpr() {} +func (*ComparisonExpr) iExpr() {} +func (*RangeCond) iExpr() {} +func (*IsExpr) iExpr() {} +func (*ExistsExpr) iExpr() {} +func (*SQLVal) iExpr() {} +func (*NullVal) iExpr() {} +func (BoolVal) iExpr() {} +func (*ColName) iExpr() {} +func (ValTuple) iExpr() {} +func (*Subquery) iExpr() {} +func (ListArg) iExpr() {} +func (*BinaryExpr) iExpr() {} +func (*UnaryExpr) iExpr() {} +func (*IntervalExpr) iExpr() {} +func (*CollateExpr) iExpr() {} +func (*FuncExpr) iExpr() {} +func (*CaseExpr) iExpr() {} +func (*ValuesFuncExpr) iExpr() {} +func (*ConvertExpr) iExpr() {} +func (*SubstrExpr) iExpr() {} +func (*ConvertUsingExpr) iExpr() {} +func (*MatchExpr) iExpr() {} +func (*GroupConcatExpr) iExpr() {} +func (*Default) iExpr() {} + +// ReplaceExpr finds the from expression from root +// and replaces it with to. If from matches root, +// then to is returned. +func ReplaceExpr(root, from, to Expr) Expr { + if root == from { + return to + } + root.replace(from, to) + return root +} + +// replaceExprs is a convenience function used by implementors +// of the replace method. +func replaceExprs(from, to Expr, exprs ...*Expr) bool { + for _, expr := range exprs { + if *expr == nil { + continue + } + if *expr == from { + *expr = to + return true + } + if (*expr).replace(from, to) { + return true + } + } + return false +} + +// Exprs represents a list of value expressions. +// It's not a valid expression because it's not parenthesized. +type Exprs []Expr + +// Format formats the node. +func (node Exprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node Exprs) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// AndExpr represents an AND expression. +type AndExpr struct { + Left, Right Expr +} + +// Format formats the node. +func (node *AndExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v and %v", node.Left, node.Right) +} + +func (node *AndExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + ) +} + +func (node *AndExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Left, &node.Right) +} + +// OrExpr represents an OR expression. +type OrExpr struct { + Left, Right Expr +} + +// Format formats the node. +func (node *OrExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v or %v", node.Left, node.Right) +} + +func (node *OrExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + ) +} + +func (node *OrExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Left, &node.Right) +} + +// NotExpr represents a NOT expression. +type NotExpr struct { + Expr Expr +} + +// Format formats the node. +func (node *NotExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("not %v", node.Expr) +} + +func (node *NotExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +func (node *NotExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Expr) +} + +// ParenExpr represents a parenthesized boolean expression. +type ParenExpr struct { + Expr Expr +} + +// Format formats the node. +func (node *ParenExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", node.Expr) +} + +func (node *ParenExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +func (node *ParenExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Expr) +} + +// ComparisonExpr represents a two-value comparison expression. +type ComparisonExpr struct { + Operator string + Left, Right Expr + Escape Expr +} + +// ComparisonExpr.Operator +const ( + EqualStr = "=" + LessThanStr = "<" + GreaterThanStr = ">" + LessEqualStr = "<=" + GreaterEqualStr = ">=" + NotEqualStr = "!=" + NullSafeEqualStr = "<=>" + InStr = "in" + NotInStr = "not in" + LikeStr = "like" + NotLikeStr = "not like" + RegexpStr = "regexp" + NotRegexpStr = "not regexp" + JSONExtractOp = "->" + JSONUnquoteExtractOp = "->>" +) + +// Format formats the node. +func (node *ComparisonExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v", node.Left, node.Operator, node.Right) + if node.Escape != nil { + buf.Myprintf(" escape %v", node.Escape) + } +} + +func (node *ComparisonExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + node.Escape, + ) +} + +func (node *ComparisonExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Left, &node.Right, &node.Escape) +} + +// RangeCond represents a BETWEEN or a NOT BETWEEN expression. +type RangeCond struct { + Operator string + Left Expr + From, To Expr +} + +// RangeCond.Operator +const ( + BetweenStr = "between" + NotBetweenStr = "not between" +) + +// Format formats the node. +func (node *RangeCond) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v and %v", node.Left, node.Operator, node.From, node.To) +} + +func (node *RangeCond) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.From, + node.To, + ) +} + +func (node *RangeCond) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Left, &node.From, &node.To) +} + +// IsExpr represents an IS ... or an IS NOT ... expression. +type IsExpr struct { + Operator string + Expr Expr +} + +// IsExpr.Operator +const ( + IsNullStr = "is null" + IsNotNullStr = "is not null" + IsTrueStr = "is true" + IsNotTrueStr = "is not true" + IsFalseStr = "is false" + IsNotFalseStr = "is not false" +) + +// Format formats the node. +func (node *IsExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s", node.Expr, node.Operator) +} + +func (node *IsExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +func (node *IsExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Expr) +} + +// ExistsExpr represents an EXISTS expression. +type ExistsExpr struct { + Subquery *Subquery +} + +// Format formats the node. +func (node *ExistsExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("exists %v", node.Subquery) +} + +func (node *ExistsExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Subquery, + ) +} + +func (node *ExistsExpr) replace(from, to Expr) bool { + return false +} + +// ExprFromValue converts the given Value into an Expr or returns an error. +func ExprFromValue(value sqltypes.Value) (Expr, error) { + // The type checks here follow the rules defined in sqltypes/types.go. + switch { + case value.Type() == sqltypes.Null: + return &NullVal{}, nil + case value.IsIntegral(): + return NewIntVal(value.ToBytes()), nil + case value.IsFloat() || value.Type() == sqltypes.Decimal: + return NewFloatVal(value.ToBytes()), nil + case value.IsQuoted(): + return NewStrVal(value.ToBytes()), nil + default: + // We cannot support sqltypes.Expression, or any other invalid type. + return nil, fmt.Errorf("cannot convert value %v to AST", value) + } +} + +// ValType specifies the type for SQLVal. +type ValType int + +// These are the possible Valtype values. +// HexNum represents a 0x... value. It cannot +// be treated as a simple value because it can +// be interpreted differently depending on the +// context. +const ( + StrVal = ValType(iota) + IntVal + FloatVal + HexNum + HexVal + ValArg + BitVal +) + +// SQLVal represents a single value. +type SQLVal struct { + Type ValType + Val []byte +} + +// NewStrVal builds a new StrVal. +func NewStrVal(in []byte) *SQLVal { + return &SQLVal{Type: StrVal, Val: in} +} + +// NewIntVal builds a new IntVal. +func NewIntVal(in []byte) *SQLVal { + return &SQLVal{Type: IntVal, Val: in} +} + +// NewFloatVal builds a new FloatVal. +func NewFloatVal(in []byte) *SQLVal { + return &SQLVal{Type: FloatVal, Val: in} +} + +// NewHexNum builds a new HexNum. +func NewHexNum(in []byte) *SQLVal { + return &SQLVal{Type: HexNum, Val: in} +} + +// NewHexVal builds a new HexVal. +func NewHexVal(in []byte) *SQLVal { + return &SQLVal{Type: HexVal, Val: in} +} + +// NewBitVal builds a new BitVal containing a bit literal. +func NewBitVal(in []byte) *SQLVal { + return &SQLVal{Type: BitVal, Val: in} +} + +// NewValArg builds a new ValArg. +func NewValArg(in []byte) *SQLVal { + return &SQLVal{Type: ValArg, Val: in} +} + +// Format formats the node. +func (node *SQLVal) Format(buf *TrackedBuffer) { + switch node.Type { + case StrVal: + sqltypes.MakeTrusted(sqltypes.VarBinary, node.Val).EncodeSQL(buf) + case IntVal, FloatVal, HexNum: + buf.Myprintf("%s", []byte(node.Val)) + case HexVal: + buf.Myprintf("X'%s'", []byte(node.Val)) + case BitVal: + buf.Myprintf("B'%s'", []byte(node.Val)) + case ValArg: + buf.WriteArg(string(node.Val)) + default: + panic("unexpected") + } +} + +func (node *SQLVal) walkSubtree(visit Visit) error { + return nil +} + +func (node *SQLVal) replace(from, to Expr) bool { + return false +} + +// HexDecode decodes the hexval into bytes. +func (node *SQLVal) HexDecode() ([]byte, error) { + dst := make([]byte, hex.DecodedLen(len([]byte(node.Val)))) + _, err := hex.Decode(dst, []byte(node.Val)) + if err != nil { + return nil, err + } + return dst, err +} + +// NullVal represents a NULL value. +type NullVal struct{} + +// Format formats the node. +func (node *NullVal) Format(buf *TrackedBuffer) { + buf.Myprintf("null") +} + +func (node *NullVal) walkSubtree(visit Visit) error { + return nil +} + +func (node *NullVal) replace(from, to Expr) bool { + return false +} + +// BoolVal is true or false. +type BoolVal bool + +// Format formats the node. +func (node BoolVal) Format(buf *TrackedBuffer) { + if node { + buf.Myprintf("true") + } else { + buf.Myprintf("false") + } +} + +func (node BoolVal) walkSubtree(visit Visit) error { + return nil +} + +func (node BoolVal) replace(from, to Expr) bool { + return false +} + +// ColName represents a column name. +type ColName struct { + // Metadata is not populated by the parser. + // It's a placeholder for analyzers to store + // additional data, typically info about which + // table or column this node references. + Metadata interface{} + Name ColIdent + Qualifier TableName +} + +// Format formats the node. +func (node *ColName) Format(buf *TrackedBuffer) { + if !node.Qualifier.IsEmpty() { + buf.Myprintf("%v.", node.Qualifier) + } + buf.Myprintf("%v", node.Name) +} + +func (node *ColName) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + node.Qualifier, + ) +} + +func (node *ColName) replace(from, to Expr) bool { + return false +} + +// Equal returns true if the column names match. +func (node *ColName) Equal(c *ColName) bool { + // Failsafe: ColName should not be empty. + if node == nil || c == nil { + return false + } + return node.Name.Equal(c.Name) && node.Qualifier == c.Qualifier +} + +// ColTuple represents a list of column values. +// It can be ValTuple, Subquery, ListArg. +type ColTuple interface { + iColTuple() + Expr +} + +func (ValTuple) iColTuple() {} +func (*Subquery) iColTuple() {} +func (ListArg) iColTuple() {} + +// ValTuple represents a tuple of actual values. +type ValTuple Exprs + +// Format formats the node. +func (node ValTuple) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", Exprs(node)) +} + +func (node ValTuple) walkSubtree(visit Visit) error { + return Walk(visit, Exprs(node)) +} + +func (node ValTuple) replace(from, to Expr) bool { + for i := range node { + if replaceExprs(from, to, &node[i]) { + return true + } + } + return false +} + +// Subquery represents a subquery. +type Subquery struct { + Select SelectStatement +} + +// Format formats the node. +func (node *Subquery) Format(buf *TrackedBuffer) { + buf.Myprintf("(%v)", node.Select) +} + +func (node *Subquery) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Select, + ) +} + +func (node *Subquery) replace(from, to Expr) bool { + return false +} + +// ListArg represents a named list argument. +type ListArg []byte + +// Format formats the node. +func (node ListArg) Format(buf *TrackedBuffer) { + buf.WriteArg(string(node)) +} + +func (node ListArg) walkSubtree(visit Visit) error { + return nil +} + +func (node ListArg) replace(from, to Expr) bool { + return false +} + +// BinaryExpr represents a binary value expression. +type BinaryExpr struct { + Operator string + Left, Right Expr +} + +// BinaryExpr.Operator +const ( + BitAndStr = "&" + BitOrStr = "|" + BitXorStr = "^" + PlusStr = "+" + MinusStr = "-" + MultStr = "*" + DivStr = "/" + IntDivStr = "div" + ModStr = "%" + ShiftLeftStr = "<<" + ShiftRightStr = ">>" +) + +// Format formats the node. +func (node *BinaryExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v %s %v", node.Left, node.Operator, node.Right) +} + +func (node *BinaryExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Left, + node.Right, + ) +} + +func (node *BinaryExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Left, &node.Right) +} + +// UnaryExpr represents a unary value expression. +type UnaryExpr struct { + Operator string + Expr Expr +} + +// UnaryExpr.Operator +const ( + UPlusStr = "+" + UMinusStr = "-" + TildaStr = "~" + BangStr = "!" + BinaryStr = "binary " + UBinaryStr = "_binary " +) + +// Format formats the node. +func (node *UnaryExpr) Format(buf *TrackedBuffer) { + if _, unary := node.Expr.(*UnaryExpr); unary { + buf.Myprintf("%s %v", node.Operator, node.Expr) + return + } + buf.Myprintf("%s%v", node.Operator, node.Expr) +} + +func (node *UnaryExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +func (node *UnaryExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Expr) +} + +// IntervalExpr represents a date-time INTERVAL expression. +type IntervalExpr struct { + Expr Expr + Unit string +} + +// Format formats the node. +func (node *IntervalExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("interval %v %s", node.Expr, node.Unit) +} + +func (node *IntervalExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +func (node *IntervalExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Expr) +} + +// CollateExpr represents dynamic collate operator. +type CollateExpr struct { + Expr Expr + Charset string +} + +// Format formats the node. +func (node *CollateExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v collate %s", node.Expr, node.Charset) +} + +func (node *CollateExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +func (node *CollateExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Expr) +} + +// FuncExpr represents a function call. +type FuncExpr struct { + Qualifier TableIdent + Name ColIdent + Distinct bool + Exprs SelectExprs +} + +// Format formats the node. +func (node *FuncExpr) Format(buf *TrackedBuffer) { + var distinct string + if node.Distinct { + distinct = "distinct " + } + if !node.Qualifier.IsEmpty() { + buf.Myprintf("%v.", node.Qualifier) + } + // Function names should not be back-quoted even + // if they match a reserved word. So, print the + // name as is. + buf.Myprintf("%s(%s%v)", node.Name.String(), distinct, node.Exprs) +} + +func (node *FuncExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Qualifier, + node.Name, + node.Exprs, + ) +} + +func (node *FuncExpr) replace(from, to Expr) bool { + for _, sel := range node.Exprs { + aliased, ok := sel.(*AliasedExpr) + if !ok { + continue + } + if replaceExprs(from, to, &aliased.Expr) { + return true + } + } + return false +} + +// Aggregates is a map of all aggregate functions. +var Aggregates = map[string]bool{ + "avg": true, + "bit_and": true, + "bit_or": true, + "bit_xor": true, + "count": true, + "group_concat": true, + "max": true, + "min": true, + "std": true, + "stddev_pop": true, + "stddev_samp": true, + "stddev": true, + "sum": true, + "var_pop": true, + "var_samp": true, + "variance": true, +} + +// IsAggregate returns true if the function is an aggregate. +func (node *FuncExpr) IsAggregate() bool { + return Aggregates[node.Name.Lowered()] +} + +// GroupConcatExpr represents a call to GROUP_CONCAT +type GroupConcatExpr struct { + Distinct string + Exprs SelectExprs + OrderBy OrderBy + Separator string +} + +// Format formats the node +func (node *GroupConcatExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("group_concat(%s%v%v%s)", node.Distinct, node.Exprs, node.OrderBy, node.Separator) +} + +func (node *GroupConcatExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Exprs, + node.OrderBy, + ) +} + +func (node *GroupConcatExpr) replace(from, to Expr) bool { + for _, sel := range node.Exprs { + aliased, ok := sel.(*AliasedExpr) + if !ok { + continue + } + if replaceExprs(from, to, &aliased.Expr) { + return true + } + } + for _, order := range node.OrderBy { + if replaceExprs(from, to, &order.Expr) { + return true + } + } + return false +} + +// ValuesFuncExpr represents a function call. +type ValuesFuncExpr struct { + Name *ColName +} + +// Format formats the node. +func (node *ValuesFuncExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("values(%v)", node.Name) +} + +func (node *ValuesFuncExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + ) +} + +func (node *ValuesFuncExpr) replace(from, to Expr) bool { + return false +} + +// SubstrExpr represents a call to SubstrExpr(column, value_expression) or SubstrExpr(column, value_expression,value_expression) +// also supported syntax SubstrExpr(column from value_expression for value_expression) +type SubstrExpr struct { + Name *ColName + From Expr + To Expr +} + +// Format formats the node. +func (node *SubstrExpr) Format(buf *TrackedBuffer) { + + if node.To == nil { + buf.Myprintf("substr(%v, %v)", node.Name, node.From) + } else { + buf.Myprintf("substr(%v, %v, %v)", node.Name, node.From, node.To) + } +} + +func (node *SubstrExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.From, &node.To) +} + +func (node *SubstrExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + node.From, + node.To, + ) +} + +// ConvertExpr represents a call to CONVERT(expr, type) +// or it's equivalent CAST(expr AS type). Both are rewritten to the former. +type ConvertExpr struct { + Expr Expr + Type *ConvertType +} + +// Format formats the node. +func (node *ConvertExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("convert(%v, %v)", node.Expr, node.Type) +} + +func (node *ConvertExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + node.Type, + ) +} + +func (node *ConvertExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Expr) +} + +// ConvertUsingExpr represents a call to CONVERT(expr USING charset). +type ConvertUsingExpr struct { + Expr Expr + Type string +} + +// Format formats the node. +func (node *ConvertUsingExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("convert(%v using %s)", node.Expr, node.Type) +} + +func (node *ConvertUsingExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +func (node *ConvertUsingExpr) replace(from, to Expr) bool { + return replaceExprs(from, to, &node.Expr) +} + +// ConvertType represents the type in call to CONVERT(expr, type) +type ConvertType struct { + Type string + Length *SQLVal + Scale *SQLVal + Operator string + Charset string +} + +// this string is "character set" and this comment is required +const ( + CharacterSetStr = " character set" +) + +// Format formats the node. +func (node *ConvertType) Format(buf *TrackedBuffer) { + buf.Myprintf("%s", node.Type) + if node.Length != nil { + buf.Myprintf("(%v", node.Length) + if node.Scale != nil { + buf.Myprintf(", %v", node.Scale) + } + buf.Myprintf(")") + } + if node.Charset != "" { + buf.Myprintf("%s %s", node.Operator, node.Charset) + } +} + +func (node *ConvertType) walkSubtree(visit Visit) error { + return nil +} + +// MatchExpr represents a call to the MATCH function +type MatchExpr struct { + Columns SelectExprs + Expr Expr + Option string +} + +// MatchExpr.Option +const ( + BooleanModeStr = " in boolean mode" + NaturalLanguageModeStr = " in natural language mode" + NaturalLanguageModeWithQueryExpansionStr = " in natural language mode with query expansion" + QueryExpansionStr = " with query expansion" +) + +// Format formats the node +func (node *MatchExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("match(%v) against (%v%s)", node.Columns, node.Expr, node.Option) +} + +func (node *MatchExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Columns, + node.Expr, + ) +} + +func (node *MatchExpr) replace(from, to Expr) bool { + for _, sel := range node.Columns { + aliased, ok := sel.(*AliasedExpr) + if !ok { + continue + } + if replaceExprs(from, to, &aliased.Expr) { + return true + } + } + return replaceExprs(from, to, &node.Expr) +} + +// CaseExpr represents a CASE expression. +type CaseExpr struct { + Expr Expr + Whens []*When + Else Expr +} + +// Format formats the node. +func (node *CaseExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("case ") + if node.Expr != nil { + buf.Myprintf("%v ", node.Expr) + } + for _, when := range node.Whens { + buf.Myprintf("%v ", when) + } + if node.Else != nil { + buf.Myprintf("else %v ", node.Else) + } + buf.Myprintf("end") +} + +func (node *CaseExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + if err := Walk(visit, node.Expr); err != nil { + return err + } + for _, n := range node.Whens { + if err := Walk(visit, n); err != nil { + return err + } + } + return Walk(visit, node.Else) +} + +func (node *CaseExpr) replace(from, to Expr) bool { + for _, when := range node.Whens { + if replaceExprs(from, to, &when.Cond, &when.Val) { + return true + } + } + return replaceExprs(from, to, &node.Expr, &node.Else) +} + +// Default represents a DEFAULT expression. +type Default struct { + ColName string +} + +// Format formats the node. +func (node *Default) Format(buf *TrackedBuffer) { + buf.Myprintf("default") + if node.ColName != "" { + buf.Myprintf("(%s)", node.ColName) + } +} + +func (node *Default) walkSubtree(visit Visit) error { + return nil +} + +func (node *Default) replace(from, to Expr) bool { + return false +} + +// When represents a WHEN sub-expression. +type When struct { + Cond Expr + Val Expr +} + +// Format formats the node. +func (node *When) Format(buf *TrackedBuffer) { + buf.Myprintf("when %v then %v", node.Cond, node.Val) +} + +func (node *When) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Cond, + node.Val, + ) +} + +// GroupBy represents a GROUP BY clause. +type GroupBy []Expr + +// Format formats the node. +func (node GroupBy) Format(buf *TrackedBuffer) { + prefix := " group by " + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node GroupBy) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// OrderBy represents an ORDER By clause. +type OrderBy []*Order + +// Format formats the node. +func (node OrderBy) Format(buf *TrackedBuffer) { + prefix := " order by " + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node OrderBy) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// Order represents an ordering expression. +type Order struct { + Expr Expr + Direction string +} + +// Order.Direction +const ( + AscScr = "asc" + DescScr = "desc" +) + +// Format formats the node. +func (node *Order) Format(buf *TrackedBuffer) { + if node, ok := node.Expr.(*NullVal); ok { + buf.Myprintf("%v", node) + return + } + if node, ok := node.Expr.(*FuncExpr); ok { + if node.Name.Lowered() == "rand" { + buf.Myprintf("%v", node) + return + } + } + + buf.Myprintf("%v %s", node.Expr, node.Direction) +} + +func (node *Order) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Expr, + ) +} + +// Limit represents a LIMIT clause. +type Limit struct { + Offset, Rowcount Expr +} + +// Format formats the node. +func (node *Limit) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.Myprintf(" limit ") + if node.Offset != nil { + buf.Myprintf("%v, ", node.Offset) + } + buf.Myprintf("%v", node.Rowcount) +} + +func (node *Limit) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Offset, + node.Rowcount, + ) +} + +// Values represents a VALUES clause. +type Values []ValTuple + +// Format formats the node. +func (node Values) Format(buf *TrackedBuffer) { + prefix := "values " + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node Values) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// UpdateExprs represents a list of update expressions. +type UpdateExprs []*UpdateExpr + +// Format formats the node. +func (node UpdateExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node UpdateExprs) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// UpdateExpr represents an update expression. +type UpdateExpr struct { + Name *ColName + Expr Expr +} + +// Format formats the node. +func (node *UpdateExpr) Format(buf *TrackedBuffer) { + buf.Myprintf("%v = %v", node.Name, node.Expr) +} + +func (node *UpdateExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + node.Expr, + ) +} + +// SetExprs represents a list of set expressions. +type SetExprs []*SetExpr + +// Format formats the node. +func (node SetExprs) Format(buf *TrackedBuffer) { + var prefix string + for _, n := range node { + buf.Myprintf("%s%v", prefix, n) + prefix = ", " + } +} + +func (node SetExprs) walkSubtree(visit Visit) error { + for _, n := range node { + if err := Walk(visit, n); err != nil { + return err + } + } + return nil +} + +// SetExpr represents a set expression. +type SetExpr struct { + Name ColIdent + Expr Expr +} + +// Format formats the node. +func (node *SetExpr) Format(buf *TrackedBuffer) { + // We don't have to backtick set variable names. + if node.Name.EqualString("charset") || node.Name.EqualString("names") { + buf.Myprintf("%s %v", node.Name.String(), node.Expr) + } else { + buf.Myprintf("%s = %v", node.Name.String(), node.Expr) + } +} + +func (node *SetExpr) walkSubtree(visit Visit) error { + if node == nil { + return nil + } + return Walk( + visit, + node.Name, + node.Expr, + ) +} + +// OnDup represents an ON DUPLICATE KEY clause. +type OnDup UpdateExprs + +// Format formats the node. +func (node OnDup) Format(buf *TrackedBuffer) { + if node == nil { + return + } + buf.Myprintf(" on duplicate key update %v", UpdateExprs(node)) +} + +func (node OnDup) walkSubtree(visit Visit) error { + return Walk(visit, UpdateExprs(node)) +} + +// ColIdent is a case insensitive SQL identifier. It will be escaped with +// backquotes if necessary. +type ColIdent struct { + // This artifact prevents this struct from being compared + // with itself. It consumes no space as long as it's not the + // last field in the struct. + _ [0]struct{ _ []byte } + val, lowered string +} + +// NewColIdent makes a new ColIdent. +func NewColIdent(str string) ColIdent { + return ColIdent{ + val: str, + } +} + +// Format formats the node. +func (node ColIdent) Format(buf *TrackedBuffer) { + formatID(buf, node.val, node.Lowered()) +} + +func (node ColIdent) walkSubtree(visit Visit) error { + return nil +} + +// IsEmpty returns true if the name is empty. +func (node ColIdent) IsEmpty() bool { + return node.val == "" +} + +// String returns the unescaped column name. It must +// not be used for SQL generation. Use sqlparser.String +// instead. The Stringer conformance is for usage +// in templates. +func (node ColIdent) String() string { + return node.val +} + +// CompliantName returns a compliant id name +// that can be used for a bind var. +func (node ColIdent) CompliantName() string { + return compliantName(node.val) +} + +// Lowered returns a lower-cased column name. +// This function should generally be used only for optimizing +// comparisons. +func (node ColIdent) Lowered() string { + if node.val == "" { + return "" + } + if node.lowered == "" { + node.lowered = strings.ToLower(node.val) + } + return node.lowered +} + +// Equal performs a case-insensitive compare. +func (node ColIdent) Equal(in ColIdent) bool { + return node.Lowered() == in.Lowered() +} + +// EqualString performs a case-insensitive compare with str. +func (node ColIdent) EqualString(str string) bool { + return node.Lowered() == strings.ToLower(str) +} + +// MarshalJSON marshals into JSON. +func (node ColIdent) MarshalJSON() ([]byte, error) { + return json.Marshal(node.val) +} + +// UnmarshalJSON unmarshals from JSON. +func (node *ColIdent) UnmarshalJSON(b []byte) error { + var result string + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + node.val = result + return nil +} + +// TableIdent is a case sensitive SQL identifier. It will be escaped with +// backquotes if necessary. +type TableIdent struct { + v string +} + +// NewTableIdent creates a new TableIdent. +func NewTableIdent(str string) TableIdent { + return TableIdent{v: str} +} + +// Format formats the node. +func (node TableIdent) Format(buf *TrackedBuffer) { + formatID(buf, node.v, strings.ToLower(node.v)) +} + +func (node TableIdent) walkSubtree(visit Visit) error { + return nil +} + +// IsEmpty returns true if TabIdent is empty. +func (node TableIdent) IsEmpty() bool { + return node.v == "" +} + +// String returns the unescaped table name. It must +// not be used for SQL generation. Use sqlparser.String +// instead. The Stringer conformance is for usage +// in templates. +func (node TableIdent) String() string { + return node.v +} + +// CompliantName returns a compliant id name +// that can be used for a bind var. +func (node TableIdent) CompliantName() string { + return compliantName(node.v) +} + +// MarshalJSON marshals into JSON. +func (node TableIdent) MarshalJSON() ([]byte, error) { + return json.Marshal(node.v) +} + +// UnmarshalJSON unmarshals from JSON. +func (node *TableIdent) UnmarshalJSON(b []byte) error { + var result string + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + node.v = result + return nil +} + +// Backtick produces a backticked literal given an input string. +func Backtick(in string) string { + var buf bytes.Buffer + buf.WriteByte('`') + for _, c := range in { + buf.WriteRune(c) + if c == '`' { + buf.WriteByte('`') + } + } + buf.WriteByte('`') + return buf.String() +} + +func formatID(buf *TrackedBuffer, original, lowered string) { + isDbSystemVariable := false + if len(original) > 1 && original[:2] == "@@" { + isDbSystemVariable = true + } + + for i, c := range original { + if !isLetter(uint16(c)) && (!isDbSystemVariable || !isCarat(uint16(c))) { + if i == 0 || !isDigit(uint16(c)) { + goto mustEscape + } + } + } + if _, ok := keywords[lowered]; ok { + goto mustEscape + } + buf.Myprintf("%s", original) + return + +mustEscape: + buf.WriteByte('`') + for _, c := range original { + buf.WriteRune(c) + if c == '`' { + buf.WriteByte('`') + } + } + buf.WriteByte('`') +} + +func compliantName(in string) string { + var buf bytes.Buffer + for i, c := range in { + if !isLetter(uint16(c)) { + if i == 0 || !isDigit(uint16(c)) { + buf.WriteByte('_') + continue + } + } + buf.WriteRune(c) + } + return buf.String() +} diff --git a/vendor/github.com/xwb1989/sqlparser/comments.go b/vendor/github.com/xwb1989/sqlparser/comments.go new file mode 100644 index 000000000..a0f7f1b45 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/comments.go @@ -0,0 +1,293 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "strconv" + "strings" + "unicode" +) + +const ( + // DirectiveMultiShardAutocommit is the query comment directive to allow + // single round trip autocommit with a multi-shard statement. + DirectiveMultiShardAutocommit = "MULTI_SHARD_AUTOCOMMIT" + // DirectiveSkipQueryPlanCache skips query plan cache when set. + DirectiveSkipQueryPlanCache = "SKIP_QUERY_PLAN_CACHE" + // DirectiveQueryTimeout sets a query timeout in vtgate. Only supported for SELECTS. + DirectiveQueryTimeout = "QUERY_TIMEOUT_MS" +) + +func isNonSpace(r rune) bool { + return !unicode.IsSpace(r) +} + +// leadingCommentEnd returns the first index after all leading comments, or +// 0 if there are no leading comments. +func leadingCommentEnd(text string) (end int) { + hasComment := false + pos := 0 + for pos < len(text) { + // Eat up any whitespace. Trailing whitespace will be considered part of + // the leading comments. + nextVisibleOffset := strings.IndexFunc(text[pos:], isNonSpace) + if nextVisibleOffset < 0 { + break + } + pos += nextVisibleOffset + remainingText := text[pos:] + + // Found visible characters. Look for '/*' at the beginning + // and '*/' somewhere after that. + if len(remainingText) < 4 || remainingText[:2] != "/*" { + break + } + commentLength := 4 + strings.Index(remainingText[2:], "*/") + if commentLength < 4 { + // Missing end comment :/ + break + } + + hasComment = true + pos += commentLength + } + + if hasComment { + return pos + } + return 0 +} + +// trailingCommentStart returns the first index of trailing comments. +// If there are no trailing comments, returns the length of the input string. +func trailingCommentStart(text string) (start int) { + hasComment := false + reducedLen := len(text) + for reducedLen > 0 { + // Eat up any whitespace. Leading whitespace will be considered part of + // the trailing comments. + nextReducedLen := strings.LastIndexFunc(text[:reducedLen], isNonSpace) + 1 + if nextReducedLen == 0 { + break + } + reducedLen = nextReducedLen + if reducedLen < 4 || text[reducedLen-2:reducedLen] != "*/" { + break + } + + // Find the beginning of the comment + startCommentPos := strings.LastIndex(text[:reducedLen-2], "/*") + if startCommentPos < 0 { + // Badly formatted sql :/ + break + } + + hasComment = true + reducedLen = startCommentPos + } + + if hasComment { + return reducedLen + } + return len(text) +} + +// MarginComments holds the leading and trailing comments that surround a query. +type MarginComments struct { + Leading string + Trailing string +} + +// SplitMarginComments pulls out any leading or trailing comments from a raw sql query. +// This function also trims leading (if there's a comment) and trailing whitespace. +func SplitMarginComments(sql string) (query string, comments MarginComments) { + trailingStart := trailingCommentStart(sql) + leadingEnd := leadingCommentEnd(sql[:trailingStart]) + comments = MarginComments{ + Leading: strings.TrimLeftFunc(sql[:leadingEnd], unicode.IsSpace), + Trailing: strings.TrimRightFunc(sql[trailingStart:], unicode.IsSpace), + } + return strings.TrimFunc(sql[leadingEnd:trailingStart], unicode.IsSpace), comments +} + +// StripLeadingComments trims the SQL string and removes any leading comments +func StripLeadingComments(sql string) string { + sql = strings.TrimFunc(sql, unicode.IsSpace) + + for hasCommentPrefix(sql) { + switch sql[0] { + case '/': + // Multi line comment + index := strings.Index(sql, "*/") + if index <= 1 { + return sql + } + // don't strip /*! ... */ or /*!50700 ... */ + if len(sql) > 2 && sql[2] == '!' { + return sql + } + sql = sql[index+2:] + case '-': + // Single line comment + index := strings.Index(sql, "\n") + if index == -1 { + return sql + } + sql = sql[index+1:] + } + + sql = strings.TrimFunc(sql, unicode.IsSpace) + } + + return sql +} + +func hasCommentPrefix(sql string) bool { + return len(sql) > 1 && ((sql[0] == '/' && sql[1] == '*') || (sql[0] == '-' && sql[1] == '-')) +} + +// ExtractMysqlComment extracts the version and SQL from a comment-only query +// such as /*!50708 sql here */ +func ExtractMysqlComment(sql string) (version string, innerSQL string) { + sql = sql[3 : len(sql)-2] + + digitCount := 0 + endOfVersionIndex := strings.IndexFunc(sql, func(c rune) bool { + digitCount++ + return !unicode.IsDigit(c) || digitCount == 6 + }) + version = sql[0:endOfVersionIndex] + innerSQL = strings.TrimFunc(sql[endOfVersionIndex:], unicode.IsSpace) + + return version, innerSQL +} + +const commentDirectivePreamble = "/*vt+" + +// CommentDirectives is the parsed representation for execution directives +// conveyed in query comments +type CommentDirectives map[string]interface{} + +// ExtractCommentDirectives parses the comment list for any execution directives +// of the form: +// +// /*vt+ OPTION_ONE=1 OPTION_TWO OPTION_THREE=abcd */ +// +// It returns the map of the directive values or nil if there aren't any. +func ExtractCommentDirectives(comments Comments) CommentDirectives { + if comments == nil { + return nil + } + + var vals map[string]interface{} + + for _, comment := range comments { + commentStr := string(comment) + if commentStr[0:5] != commentDirectivePreamble { + continue + } + + if vals == nil { + vals = make(map[string]interface{}) + } + + // Split on whitespace and ignore the first and last directive + // since they contain the comment start/end + directives := strings.Fields(commentStr) + for i := 1; i < len(directives)-1; i++ { + directive := directives[i] + sep := strings.IndexByte(directive, '=') + + // No value is equivalent to a true boolean + if sep == -1 { + vals[directive] = true + continue + } + + strVal := directive[sep+1:] + directive = directive[:sep] + + intVal, err := strconv.Atoi(strVal) + if err == nil { + vals[directive] = intVal + continue + } + + boolVal, err := strconv.ParseBool(strVal) + if err == nil { + vals[directive] = boolVal + continue + } + + vals[directive] = strVal + } + } + return vals +} + +// IsSet checks the directive map for the named directive and returns +// true if the directive is set and has a true/false or 0/1 value +func (d CommentDirectives) IsSet(key string) bool { + if d == nil { + return false + } + + val, ok := d[key] + if !ok { + return false + } + + boolVal, ok := val.(bool) + if ok { + return boolVal + } + + intVal, ok := val.(int) + if ok { + return intVal == 1 + } + return false +} + +// SkipQueryPlanCacheDirective returns true if skip query plan cache directive is set to true in query. +func SkipQueryPlanCacheDirective(stmt Statement) bool { + switch stmt := stmt.(type) { + case *Select: + directives := ExtractCommentDirectives(stmt.Comments) + if directives.IsSet(DirectiveSkipQueryPlanCache) { + return true + } + case *Insert: + directives := ExtractCommentDirectives(stmt.Comments) + if directives.IsSet(DirectiveSkipQueryPlanCache) { + return true + } + case *Update: + directives := ExtractCommentDirectives(stmt.Comments) + if directives.IsSet(DirectiveSkipQueryPlanCache) { + return true + } + case *Delete: + directives := ExtractCommentDirectives(stmt.Comments) + if directives.IsSet(DirectiveSkipQueryPlanCache) { + return true + } + default: + return false + } + return false +} diff --git a/vendor/github.com/xwb1989/sqlparser/dependency/bytes2/buffer.go b/vendor/github.com/xwb1989/sqlparser/dependency/bytes2/buffer.go new file mode 100644 index 000000000..72f8fc6e4 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/dependency/bytes2/buffer.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package bytes2 + +// Buffer implements a subset of the write portion of +// bytes.Buffer, but more efficiently. This is meant to +// be used in very high QPS operations, especially for +// WriteByte, and without abstracting it as a Writer. +// Function signatures contain errors for compatibility, +// but they do not return errors. +type Buffer struct { + bytes []byte +} + +// NewBuffer is equivalent to bytes.NewBuffer. +func NewBuffer(b []byte) *Buffer { + return &Buffer{bytes: b} +} + +// Write is equivalent to bytes.Buffer.Write. +func (buf *Buffer) Write(b []byte) (int, error) { + buf.bytes = append(buf.bytes, b...) + return len(b), nil +} + +// WriteString is equivalent to bytes.Buffer.WriteString. +func (buf *Buffer) WriteString(s string) (int, error) { + buf.bytes = append(buf.bytes, s...) + return len(s), nil +} + +// WriteByte is equivalent to bytes.Buffer.WriteByte. +func (buf *Buffer) WriteByte(b byte) error { + buf.bytes = append(buf.bytes, b) + return nil +} + +// Bytes is equivalent to bytes.Buffer.Bytes. +func (buf *Buffer) Bytes() []byte { + return buf.bytes +} + +// Strings is equivalent to bytes.Buffer.Strings. +func (buf *Buffer) String() string { + return string(buf.bytes) +} + +// Len is equivalent to bytes.Buffer.Len. +func (buf *Buffer) Len() int { + return len(buf.bytes) +} diff --git a/vendor/github.com/xwb1989/sqlparser/dependency/hack/hack.go b/vendor/github.com/xwb1989/sqlparser/dependency/hack/hack.go new file mode 100644 index 000000000..e6344ad99 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/dependency/hack/hack.go @@ -0,0 +1,79 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package hack gives you some efficient functionality at the cost of +// breaking some Go rules. +package hack + +import ( + "reflect" + "unsafe" +) + +// StringArena lets you consolidate allocations for a group of strings +// that have similar life length +type StringArena struct { + buf []byte + str string +} + +// NewStringArena creates an arena of the specified size. +func NewStringArena(size int) *StringArena { + sa := &StringArena{buf: make([]byte, 0, size)} + pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&sa.buf)) + pstring := (*reflect.StringHeader)(unsafe.Pointer(&sa.str)) + pstring.Data = pbytes.Data + pstring.Len = pbytes.Cap + return sa +} + +// NewString copies a byte slice into the arena and returns it as a string. +// If the arena is full, it returns a traditional go string. +func (sa *StringArena) NewString(b []byte) string { + if len(b) == 0 { + return "" + } + if len(sa.buf)+len(b) > cap(sa.buf) { + return string(b) + } + start := len(sa.buf) + sa.buf = append(sa.buf, b...) + return sa.str[start : start+len(b)] +} + +// SpaceLeft returns the amount of space left in the arena. +func (sa *StringArena) SpaceLeft() int { + return cap(sa.buf) - len(sa.buf) +} + +// String force casts a []byte to a string. +// USE AT YOUR OWN RISK +func String(b []byte) (s string) { + if len(b) == 0 { + return "" + } + pbytes := (*reflect.SliceHeader)(unsafe.Pointer(&b)) + pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) + pstring.Data = pbytes.Data + pstring.Len = pbytes.Len + return +} + +// StringPointer returns &s[0], which is not allowed in go +func StringPointer(s string) unsafe.Pointer { + pstring := (*reflect.StringHeader)(unsafe.Pointer(&s)) + return unsafe.Pointer(pstring.Data) +} diff --git a/vendor/github.com/xwb1989/sqlparser/dependency/querypb/query.pb.go b/vendor/github.com/xwb1989/sqlparser/dependency/querypb/query.pb.go new file mode 100644 index 000000000..db9f924c1 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/dependency/querypb/query.pb.go @@ -0,0 +1,2734 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: query.proto + +/* +Package query is a generated protocol buffer package. + +It is generated from these files: + query.proto + +It has these top-level messages: + Target + VTGateCallerID + EventToken + Value + BindVariable + BoundQuery + ExecuteOptions + Field + Row + ResultExtras + QueryResult + StreamEvent + ExecuteRequest + ExecuteResponse + ResultWithError + ExecuteBatchRequest + ExecuteBatchResponse + StreamExecuteRequest + StreamExecuteResponse + BeginRequest + BeginResponse + CommitRequest + CommitResponse + RollbackRequest + RollbackResponse + PrepareRequest + PrepareResponse + CommitPreparedRequest + CommitPreparedResponse + RollbackPreparedRequest + RollbackPreparedResponse + CreateTransactionRequest + CreateTransactionResponse + StartCommitRequest + StartCommitResponse + SetRollbackRequest + SetRollbackResponse + ConcludeTransactionRequest + ConcludeTransactionResponse + ReadTransactionRequest + ReadTransactionResponse + BeginExecuteRequest + BeginExecuteResponse + BeginExecuteBatchRequest + BeginExecuteBatchResponse + MessageStreamRequest + MessageStreamResponse + MessageAckRequest + MessageAckResponse + SplitQueryRequest + QuerySplit + SplitQueryResponse + StreamHealthRequest + RealtimeStats + StreamHealthResponse + UpdateStreamRequest + UpdateStreamResponse + TransactionMetadata +*/ +package querypb + +import "strconv" + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Flags sent from the MySQL C API +type MySqlFlag int32 + +const ( + MySqlFlag_EMPTY MySqlFlag = 0 + MySqlFlag_NOT_NULL_FLAG MySqlFlag = 1 + MySqlFlag_PRI_KEY_FLAG MySqlFlag = 2 + MySqlFlag_UNIQUE_KEY_FLAG MySqlFlag = 4 + MySqlFlag_MULTIPLE_KEY_FLAG MySqlFlag = 8 + MySqlFlag_BLOB_FLAG MySqlFlag = 16 + MySqlFlag_UNSIGNED_FLAG MySqlFlag = 32 + MySqlFlag_ZEROFILL_FLAG MySqlFlag = 64 + MySqlFlag_BINARY_FLAG MySqlFlag = 128 + MySqlFlag_ENUM_FLAG MySqlFlag = 256 + MySqlFlag_AUTO_INCREMENT_FLAG MySqlFlag = 512 + MySqlFlag_TIMESTAMP_FLAG MySqlFlag = 1024 + MySqlFlag_SET_FLAG MySqlFlag = 2048 + MySqlFlag_NO_DEFAULT_VALUE_FLAG MySqlFlag = 4096 + MySqlFlag_ON_UPDATE_NOW_FLAG MySqlFlag = 8192 + MySqlFlag_NUM_FLAG MySqlFlag = 32768 + MySqlFlag_PART_KEY_FLAG MySqlFlag = 16384 + MySqlFlag_GROUP_FLAG MySqlFlag = 32768 + MySqlFlag_UNIQUE_FLAG MySqlFlag = 65536 + MySqlFlag_BINCMP_FLAG MySqlFlag = 131072 +) + +var MySqlFlag_name = map[int32]string{ + 0: "EMPTY", + 1: "NOT_NULL_FLAG", + 2: "PRI_KEY_FLAG", + 4: "UNIQUE_KEY_FLAG", + 8: "MULTIPLE_KEY_FLAG", + 16: "BLOB_FLAG", + 32: "UNSIGNED_FLAG", + 64: "ZEROFILL_FLAG", + 128: "BINARY_FLAG", + 256: "ENUM_FLAG", + 512: "AUTO_INCREMENT_FLAG", + 1024: "TIMESTAMP_FLAG", + 2048: "SET_FLAG", + 4096: "NO_DEFAULT_VALUE_FLAG", + 8192: "ON_UPDATE_NOW_FLAG", + 32768: "NUM_FLAG", + 16384: "PART_KEY_FLAG", + // Duplicate value: 32768: "GROUP_FLAG", + 65536: "UNIQUE_FLAG", + 131072: "BINCMP_FLAG", +} +var MySqlFlag_value = map[string]int32{ + "EMPTY": 0, + "NOT_NULL_FLAG": 1, + "PRI_KEY_FLAG": 2, + "UNIQUE_KEY_FLAG": 4, + "MULTIPLE_KEY_FLAG": 8, + "BLOB_FLAG": 16, + "UNSIGNED_FLAG": 32, + "ZEROFILL_FLAG": 64, + "BINARY_FLAG": 128, + "ENUM_FLAG": 256, + "AUTO_INCREMENT_FLAG": 512, + "TIMESTAMP_FLAG": 1024, + "SET_FLAG": 2048, + "NO_DEFAULT_VALUE_FLAG": 4096, + "ON_UPDATE_NOW_FLAG": 8192, + "NUM_FLAG": 32768, + "PART_KEY_FLAG": 16384, + "GROUP_FLAG": 32768, + "UNIQUE_FLAG": 65536, + "BINCMP_FLAG": 131072, +} + +func (x MySqlFlag) String() string { + return EnumName(MySqlFlag_name, int32(x)) +} + +// Flag allows us to qualify types by their common properties. +type Flag int32 + +const ( + Flag_NONE Flag = 0 + Flag_ISINTEGRAL Flag = 256 + Flag_ISUNSIGNED Flag = 512 + Flag_ISFLOAT Flag = 1024 + Flag_ISQUOTED Flag = 2048 + Flag_ISTEXT Flag = 4096 + Flag_ISBINARY Flag = 8192 +) + +var Flag_name = map[int32]string{ + 0: "NONE", + 256: "ISINTEGRAL", + 512: "ISUNSIGNED", + 1024: "ISFLOAT", + 2048: "ISQUOTED", + 4096: "ISTEXT", + 8192: "ISBINARY", +} +var Flag_value = map[string]int32{ + "NONE": 0, + "ISINTEGRAL": 256, + "ISUNSIGNED": 512, + "ISFLOAT": 1024, + "ISQUOTED": 2048, + "ISTEXT": 4096, + "ISBINARY": 8192, +} + +func (x Flag) String() string { + return EnumName(Flag_name, int32(x)) +} + +// Type defines the various supported data types in bind vars +// and query results. +type Type int32 + +const ( + // NULL_TYPE specifies a NULL type. + Type_NULL_TYPE Type = 0 + // INT8 specifies a TINYINT type. + // Properties: 1, IsNumber. + Type_INT8 Type = 257 + // UINT8 specifies a TINYINT UNSIGNED type. + // Properties: 2, IsNumber, IsUnsigned. + Type_UINT8 Type = 770 + // INT16 specifies a SMALLINT type. + // Properties: 3, IsNumber. + Type_INT16 Type = 259 + // UINT16 specifies a SMALLINT UNSIGNED type. + // Properties: 4, IsNumber, IsUnsigned. + Type_UINT16 Type = 772 + // INT24 specifies a MEDIUMINT type. + // Properties: 5, IsNumber. + Type_INT24 Type = 261 + // UINT24 specifies a MEDIUMINT UNSIGNED type. + // Properties: 6, IsNumber, IsUnsigned. + Type_UINT24 Type = 774 + // INT32 specifies a INTEGER type. + // Properties: 7, IsNumber. + Type_INT32 Type = 263 + // UINT32 specifies a INTEGER UNSIGNED type. + // Properties: 8, IsNumber, IsUnsigned. + Type_UINT32 Type = 776 + // INT64 specifies a BIGINT type. + // Properties: 9, IsNumber. + Type_INT64 Type = 265 + // UINT64 specifies a BIGINT UNSIGNED type. + // Properties: 10, IsNumber, IsUnsigned. + Type_UINT64 Type = 778 + // FLOAT32 specifies a FLOAT type. + // Properties: 11, IsFloat. + Type_FLOAT32 Type = 1035 + // FLOAT64 specifies a DOUBLE or REAL type. + // Properties: 12, IsFloat. + Type_FLOAT64 Type = 1036 + // TIMESTAMP specifies a TIMESTAMP type. + // Properties: 13, IsQuoted. + Type_TIMESTAMP Type = 2061 + // DATE specifies a DATE type. + // Properties: 14, IsQuoted. + Type_DATE Type = 2062 + // TIME specifies a TIME type. + // Properties: 15, IsQuoted. + Type_TIME Type = 2063 + // DATETIME specifies a DATETIME type. + // Properties: 16, IsQuoted. + Type_DATETIME Type = 2064 + // YEAR specifies a YEAR type. + // Properties: 17, IsNumber, IsUnsigned. + Type_YEAR Type = 785 + // DECIMAL specifies a DECIMAL or NUMERIC type. + // Properties: 18, None. + Type_DECIMAL Type = 18 + // TEXT specifies a TEXT type. + // Properties: 19, IsQuoted, IsText. + Type_TEXT Type = 6163 + // BLOB specifies a BLOB type. + // Properties: 20, IsQuoted, IsBinary. + Type_BLOB Type = 10260 + // VARCHAR specifies a VARCHAR type. + // Properties: 21, IsQuoted, IsText. + Type_VARCHAR Type = 6165 + // VARBINARY specifies a VARBINARY type. + // Properties: 22, IsQuoted, IsBinary. + Type_VARBINARY Type = 10262 + // CHAR specifies a CHAR type. + // Properties: 23, IsQuoted, IsText. + Type_CHAR Type = 6167 + // BINARY specifies a BINARY type. + // Properties: 24, IsQuoted, IsBinary. + Type_BINARY Type = 10264 + // BIT specifies a BIT type. + // Properties: 25, IsQuoted. + Type_BIT Type = 2073 + // ENUM specifies an ENUM type. + // Properties: 26, IsQuoted. + Type_ENUM Type = 2074 + // SET specifies a SET type. + // Properties: 27, IsQuoted. + Type_SET Type = 2075 + // TUPLE specifies a a tuple. This cannot + // be returned in a QueryResult, but it can + // be sent as a bind var. + // Properties: 28, None. + Type_TUPLE Type = 28 + // GEOMETRY specifies a GEOMETRY type. + // Properties: 29, IsQuoted. + Type_GEOMETRY Type = 2077 + // JSON specifies a JSON type. + // Properties: 30, IsQuoted. + Type_JSON Type = 2078 + // EXPRESSION specifies a SQL expression. + // This type is for internal use only. + // Properties: 31, None. + Type_EXPRESSION Type = 31 +) + +var Type_name = map[int32]string{ + 0: "NULL_TYPE", + 257: "INT8", + 770: "UINT8", + 259: "INT16", + 772: "UINT16", + 261: "INT24", + 774: "UINT24", + 263: "INT32", + 776: "UINT32", + 265: "INT64", + 778: "UINT64", + 1035: "FLOAT32", + 1036: "FLOAT64", + 2061: "TIMESTAMP", + 2062: "DATE", + 2063: "TIME", + 2064: "DATETIME", + 785: "YEAR", + 18: "DECIMAL", + 6163: "TEXT", + 10260: "BLOB", + 6165: "VARCHAR", + 10262: "VARBINARY", + 6167: "CHAR", + 10264: "BINARY", + 2073: "BIT", + 2074: "ENUM", + 2075: "SET", + 28: "TUPLE", + 2077: "GEOMETRY", + 2078: "JSON", + 31: "EXPRESSION", +} +var Type_value = map[string]int32{ + "NULL_TYPE": 0, + "INT8": 257, + "UINT8": 770, + "INT16": 259, + "UINT16": 772, + "INT24": 261, + "UINT24": 774, + "INT32": 263, + "UINT32": 776, + "INT64": 265, + "UINT64": 778, + "FLOAT32": 1035, + "FLOAT64": 1036, + "TIMESTAMP": 2061, + "DATE": 2062, + "TIME": 2063, + "DATETIME": 2064, + "YEAR": 785, + "DECIMAL": 18, + "TEXT": 6163, + "BLOB": 10260, + "VARCHAR": 6165, + "VARBINARY": 10262, + "CHAR": 6167, + "BINARY": 10264, + "BIT": 2073, + "ENUM": 2074, + "SET": 2075, + "TUPLE": 28, + "GEOMETRY": 2077, + "JSON": 2078, + "EXPRESSION": 31, +} + +func (x Type) String() string { + return EnumName(Type_name, int32(x)) +} + +// TransactionState represents the state of a distributed transaction. +type TransactionState int32 + +const ( + TransactionState_UNKNOWN TransactionState = 0 + TransactionState_PREPARE TransactionState = 1 + TransactionState_COMMIT TransactionState = 2 + TransactionState_ROLLBACK TransactionState = 3 +) + +var TransactionState_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PREPARE", + 2: "COMMIT", + 3: "ROLLBACK", +} +var TransactionState_value = map[string]int32{ + "UNKNOWN": 0, + "PREPARE": 1, + "COMMIT": 2, + "ROLLBACK": 3, +} + +func (x TransactionState) String() string { + return EnumName(TransactionState_name, int32(x)) +} + +type ExecuteOptions_IncludedFields int32 + +const ( + ExecuteOptions_TYPE_AND_NAME ExecuteOptions_IncludedFields = 0 + ExecuteOptions_TYPE_ONLY ExecuteOptions_IncludedFields = 1 + ExecuteOptions_ALL ExecuteOptions_IncludedFields = 2 +) + +var ExecuteOptions_IncludedFields_name = map[int32]string{ + 0: "TYPE_AND_NAME", + 1: "TYPE_ONLY", + 2: "ALL", +} +var ExecuteOptions_IncludedFields_value = map[string]int32{ + "TYPE_AND_NAME": 0, + "TYPE_ONLY": 1, + "ALL": 2, +} + +func (x ExecuteOptions_IncludedFields) String() string { + return EnumName(ExecuteOptions_IncludedFields_name, int32(x)) +} + +type ExecuteOptions_Workload int32 + +const ( + ExecuteOptions_UNSPECIFIED ExecuteOptions_Workload = 0 + ExecuteOptions_OLTP ExecuteOptions_Workload = 1 + ExecuteOptions_OLAP ExecuteOptions_Workload = 2 + ExecuteOptions_DBA ExecuteOptions_Workload = 3 +) + +var ExecuteOptions_Workload_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "OLTP", + 2: "OLAP", + 3: "DBA", +} +var ExecuteOptions_Workload_value = map[string]int32{ + "UNSPECIFIED": 0, + "OLTP": 1, + "OLAP": 2, + "DBA": 3, +} + +func (x ExecuteOptions_Workload) String() string { + return EnumName(ExecuteOptions_Workload_name, int32(x)) +} + +type ExecuteOptions_TransactionIsolation int32 + +const ( + ExecuteOptions_DEFAULT ExecuteOptions_TransactionIsolation = 0 + ExecuteOptions_REPEATABLE_READ ExecuteOptions_TransactionIsolation = 1 + ExecuteOptions_READ_COMMITTED ExecuteOptions_TransactionIsolation = 2 + ExecuteOptions_READ_UNCOMMITTED ExecuteOptions_TransactionIsolation = 3 + ExecuteOptions_SERIALIZABLE ExecuteOptions_TransactionIsolation = 4 +) + +var ExecuteOptions_TransactionIsolation_name = map[int32]string{ + 0: "DEFAULT", + 1: "REPEATABLE_READ", + 2: "READ_COMMITTED", + 3: "READ_UNCOMMITTED", + 4: "SERIALIZABLE", +} +var ExecuteOptions_TransactionIsolation_value = map[string]int32{ + "DEFAULT": 0, + "REPEATABLE_READ": 1, + "READ_COMMITTED": 2, + "READ_UNCOMMITTED": 3, + "SERIALIZABLE": 4, +} + +func (x ExecuteOptions_TransactionIsolation) String() string { + return EnumName(ExecuteOptions_TransactionIsolation_name, int32(x)) +} + +// The category of one statement. +type StreamEvent_Statement_Category int32 + +const ( + StreamEvent_Statement_Error StreamEvent_Statement_Category = 0 + StreamEvent_Statement_DML StreamEvent_Statement_Category = 1 + StreamEvent_Statement_DDL StreamEvent_Statement_Category = 2 +) + +var StreamEvent_Statement_Category_name = map[int32]string{ + 0: "Error", + 1: "DML", + 2: "DDL", +} +var StreamEvent_Statement_Category_value = map[string]int32{ + "Error": 0, + "DML": 1, + "DDL": 2, +} + +func (x StreamEvent_Statement_Category) String() string { + return EnumName(StreamEvent_Statement_Category_name, int32(x)) +} + +type SplitQueryRequest_Algorithm int32 + +const ( + SplitQueryRequest_EQUAL_SPLITS SplitQueryRequest_Algorithm = 0 + SplitQueryRequest_FULL_SCAN SplitQueryRequest_Algorithm = 1 +) + +var SplitQueryRequest_Algorithm_name = map[int32]string{ + 0: "EQUAL_SPLITS", + 1: "FULL_SCAN", +} +var SplitQueryRequest_Algorithm_value = map[string]int32{ + "EQUAL_SPLITS": 0, + "FULL_SCAN": 1, +} + +func (x SplitQueryRequest_Algorithm) String() string { + return EnumName(SplitQueryRequest_Algorithm_name, int32(x)) +} + +/* +// Target describes what the client expects the tablet is. +// If the tablet does not match, an error is returned. +type Target struct { + Keyspace string `protobuf:"bytes,1,opt,name=keyspace" json:"keyspace,omitempty"` + Shard string `protobuf:"bytes,2,opt,name=shard" json:"shard,omitempty"` + TabletType topodata.TabletType `protobuf:"varint,3,opt,name=tablet_type,json=tabletType,enum=topodata.TabletType" json:"tablet_type,omitempty"` +} + +func (m *Target) Reset() { *m = Target{} } +func (m *Target) String() string { return "TODO" } + + + +func (m *Target) GetKeyspace() string { + if m != nil { + return m.Keyspace + } + return "" +} + +func (m *Target) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *Target) GetTabletType() topodata.TabletType { + if m != nil { + return m.TabletType + } + return topodata.TabletType_UNKNOWN +} + + +// VTGateCallerID is sent by VTGate to VTTablet to describe the +// caller. If possible, this information is secure. For instance, +// if using unique certificates that guarantee that VTGate->VTTablet +// traffic cannot be spoofed, then VTTablet can trust this information, +// and VTTablet will use it for tablet ACLs, for instance. +// Because of this security guarantee, this is different than the CallerID +// structure, which is not secure at all, because it is provided +// by the Vitess client. +type VTGateCallerID struct { + Username string `protobuf:"bytes,1,opt,name=username" json:"username,omitempty"` + Groups []string `protobuf:"bytes,2,rep,name=groups" json:"groups,omitempty"` +} + +func (m *VTGateCallerID) Reset() { *m = VTGateCallerID{} } +func (m *VTGateCallerID) String() string { return "TODO" } + + + +func (m *VTGateCallerID) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *VTGateCallerID) GetGroups() []string { + if m != nil { + return m.Groups + } + return nil +} + +// EventToken is a structure that describes a point in time in a +// replication stream on one shard. The most recent known replication +// position can be retrieved from vttablet when executing a query. It +// is also sent with the replication streams from the binlog service. +type EventToken struct { + // timestamp is the MySQL timestamp of the statements. Seconds since Epoch. + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp" json:"timestamp,omitempty"` + // The shard name that applied the statements. Note this is not set when + // streaming from a vttablet. It is only used on the client -> vtgate link. + Shard string `protobuf:"bytes,2,opt,name=shard" json:"shard,omitempty"` + // The position on the replication stream after this statement was applied. + // It is not the transaction ID / GTID, but the position / GTIDSet. + Position string `protobuf:"bytes,3,opt,name=position" json:"position,omitempty"` +} + +func (m *EventToken) Reset() { *m = EventToken{} } +func (m *EventToken) String() string { return "TODO" } + + + +func (m *EventToken) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +func (m *EventToken) GetShard() string { + if m != nil { + return m.Shard + } + return "" +} + +func (m *EventToken) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} +*/ + +// Value represents a typed value. +type Value struct { + Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return "TODO" } + +func (m *Value) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *Value) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +// BindVariable represents a single bind variable in a Query. +type BindVariable struct { + Type Type `protobuf:"varint,1,opt,name=type,enum=query.Type" json:"type,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + // values are set if type is TUPLE. + Values []*Value `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` +} + +func (m *BindVariable) Reset() { *m = BindVariable{} } +func (m *BindVariable) String() string { return "TODO" } + +func (m *BindVariable) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *BindVariable) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func (m *BindVariable) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +// BoundQuery is a query with its bind variables +type BoundQuery struct { + // sql is the SQL query to execute + Sql string `protobuf:"bytes,1,opt,name=sql" json:"sql,omitempty"` + // bind_variables is a map of all bind variables to expand in the query. + // nil values are not allowed. Use NULL_TYPE to express a NULL value. + BindVariables map[string]*BindVariable `protobuf:"bytes,2,rep,name=bind_variables,json=bindVariables" json:"bind_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *BoundQuery) Reset() { *m = BoundQuery{} } +func (m *BoundQuery) String() string { return "TODO" } + +func (m *BoundQuery) GetSql() string { + if m != nil { + return m.Sql + } + return "" +} + +func (m *BoundQuery) GetBindVariables() map[string]*BindVariable { + if m != nil { + return m.BindVariables + } + return nil +} + +/* +// ExecuteOptions is passed around for all Execute calls. +type ExecuteOptions struct { + // If set, we will try to include an EventToken with the responses. + IncludeEventToken bool `protobuf:"varint,2,opt,name=include_event_token,json=includeEventToken" json:"include_event_token,omitempty"` + // If set, the fresher field may be set as a result comparison to this token. + // This is a shortcut so the application doesn't need to care about + // comparing EventTokens. + CompareEventToken *EventToken `protobuf:"bytes,3,opt,name=compare_event_token,json=compareEventToken" json:"compare_event_token,omitempty"` + // Controls what fields are returned in Field message responses from mysql, i.e. + // field name, table name, etc. This is an optimization for high-QPS queries where + // the client knows what it's getting + IncludedFields ExecuteOptions_IncludedFields `protobuf:"varint,4,opt,name=included_fields,json=includedFields,enum=query.ExecuteOptions_IncludedFields" json:"included_fields,omitempty"` + // client_rows_found specifies if rows_affected should return + // rows found instead of rows affected. Behavior is defined + // by MySQL's CLIENT_FOUND_ROWS flag. + ClientFoundRows bool `protobuf:"varint,5,opt,name=client_found_rows,json=clientFoundRows" json:"client_found_rows,omitempty"` + // workload specifies the type of workload: + // OLTP: DMLs allowed, results have row count limit, and + // query timeouts are shorter. + // OLAP: DMLS not allowed, no limit on row count, timeouts + // can be as high as desired. + // DBA: no limit on rowcount or timeout, all queries allowed + // but intended for long DMLs and DDLs. + Workload ExecuteOptions_Workload `protobuf:"varint,6,opt,name=workload,enum=query.ExecuteOptions_Workload" json:"workload,omitempty"` + // sql_select_limit sets an implicit limit on all select statements. Since + // vitess also sets a rowcount limit on queries, the smallest value wins. + SqlSelectLimit int64 `protobuf:"varint,8,opt,name=sql_select_limit,json=sqlSelectLimit" json:"sql_select_limit,omitempty"` + TransactionIsolation ExecuteOptions_TransactionIsolation `protobuf:"varint,9,opt,name=transaction_isolation,json=transactionIsolation,enum=query.ExecuteOptions_TransactionIsolation" json:"transaction_isolation,omitempty"` +} + +func (m *ExecuteOptions) Reset() { *m = ExecuteOptions{} } +func (m *ExecuteOptions) String() string { return "TODO" } + + + +func (m *ExecuteOptions) GetIncludeEventToken() bool { + if m != nil { + return m.IncludeEventToken + } + return false +} + +func (m *ExecuteOptions) GetCompareEventToken() *EventToken { + if m != nil { + return m.CompareEventToken + } + return nil +} + +func (m *ExecuteOptions) GetIncludedFields() ExecuteOptions_IncludedFields { + if m != nil { + return m.IncludedFields + } + return ExecuteOptions_TYPE_AND_NAME +} + +func (m *ExecuteOptions) GetClientFoundRows() bool { + if m != nil { + return m.ClientFoundRows + } + return false +} + +func (m *ExecuteOptions) GetWorkload() ExecuteOptions_Workload { + if m != nil { + return m.Workload + } + return ExecuteOptions_UNSPECIFIED +} + +func (m *ExecuteOptions) GetSqlSelectLimit() int64 { + if m != nil { + return m.SqlSelectLimit + } + return 0 +} + +func (m *ExecuteOptions) GetTransactionIsolation() ExecuteOptions_TransactionIsolation { + if m != nil { + return m.TransactionIsolation + } + return ExecuteOptions_DEFAULT +} + +// Field describes a single column returned by a query +type Field struct { + // name of the field as returned by mysql C API + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // vitess-defined type. Conversion function is in sqltypes package. + Type Type `protobuf:"varint,2,opt,name=type,enum=query.Type" json:"type,omitempty"` + // Remaining fields from mysql C API. + // These fields are only populated when ExecuteOptions.included_fields + // is set to IncludedFields.ALL. + Table string `protobuf:"bytes,3,opt,name=table" json:"table,omitempty"` + OrgTable string `protobuf:"bytes,4,opt,name=org_table,json=orgTable" json:"org_table,omitempty"` + Database string `protobuf:"bytes,5,opt,name=database" json:"database,omitempty"` + OrgName string `protobuf:"bytes,6,opt,name=org_name,json=orgName" json:"org_name,omitempty"` + // column_length is really a uint32. All 32 bits can be used. + ColumnLength uint32 `protobuf:"varint,7,opt,name=column_length,json=columnLength" json:"column_length,omitempty"` + // charset is actually a uint16. Only the lower 16 bits are used. + Charset uint32 `protobuf:"varint,8,opt,name=charset" json:"charset,omitempty"` + // decimals is actualy a uint8. Only the lower 8 bits are used. + Decimals uint32 `protobuf:"varint,9,opt,name=decimals" json:"decimals,omitempty"` + // flags is actually a uint16. Only the lower 16 bits are used. + Flags uint32 `protobuf:"varint,10,opt,name=flags" json:"flags,omitempty"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return "TODO" } + + + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetType() Type { + if m != nil { + return m.Type + } + return Type_NULL_TYPE +} + +func (m *Field) GetTable() string { + if m != nil { + return m.Table + } + return "" +} + +func (m *Field) GetOrgTable() string { + if m != nil { + return m.OrgTable + } + return "" +} + +func (m *Field) GetDatabase() string { + if m != nil { + return m.Database + } + return "" +} + +func (m *Field) GetOrgName() string { + if m != nil { + return m.OrgName + } + return "" +} + +func (m *Field) GetColumnLength() uint32 { + if m != nil { + return m.ColumnLength + } + return 0 +} + +func (m *Field) GetCharset() uint32 { + if m != nil { + return m.Charset + } + return 0 +} + +func (m *Field) GetDecimals() uint32 { + if m != nil { + return m.Decimals + } + return 0 +} + +func (m *Field) GetFlags() uint32 { + if m != nil { + return m.Flags + } + return 0 +} + +// Row is a database row. +type Row struct { + // lengths contains the length of each value in values. + // A length of -1 means that the field is NULL. While + // reading values, you have to accummulate the length + // to know the offset where the next value begins in values. + Lengths []int64 `protobuf:"zigzag64,1,rep,packed,name=lengths" json:"lengths,omitempty"` + // values contains a concatenation of all values in the row. + Values []byte `protobuf:"bytes,2,opt,name=values,proto3" json:"values,omitempty"` +} + +func (m *Row) Reset() { *m = Row{} } +func (m *Row) String() string { return "TODO" } + + + +func (m *Row) GetLengths() []int64 { + if m != nil { + return m.Lengths + } + return nil +} + +func (m *Row) GetValues() []byte { + if m != nil { + return m.Values + } + return nil +} + +// ResultExtras contains optional out-of-band information. Usually the +// extras are requested by adding ExecuteOptions flags. +type ResultExtras struct { + // event_token is populated if the include_event_token flag is set + // in ExecuteOptions. + EventToken *EventToken `protobuf:"bytes,1,opt,name=event_token,json=eventToken" json:"event_token,omitempty"` + // If set, it means the data returned with this result is fresher + // than the compare_token passed in the ExecuteOptions. + Fresher bool `protobuf:"varint,2,opt,name=fresher" json:"fresher,omitempty"` +} + +func (m *ResultExtras) Reset() { *m = ResultExtras{} } +func (m *ResultExtras) String() string { return "TODO" } + + + +func (m *ResultExtras) GetEventToken() *EventToken { + if m != nil { + return m.EventToken + } + return nil +} + +func (m *ResultExtras) GetFresher() bool { + if m != nil { + return m.Fresher + } + return false +} + +// QueryResult is returned by Execute and ExecuteStream. +// +// As returned by Execute, len(fields) is always equal to len(row) +// (for each row in rows). +// +// As returned by StreamExecute, the first QueryResult has the fields +// set, and subsequent QueryResult have rows set. And as Execute, +// len(QueryResult[0].fields) is always equal to len(row) (for each +// row in rows for each QueryResult in QueryResult[1:]). +type QueryResult struct { + Fields []*Field `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty"` + RowsAffected uint64 `protobuf:"varint,2,opt,name=rows_affected,json=rowsAffected" json:"rows_affected,omitempty"` + InsertId uint64 `protobuf:"varint,3,opt,name=insert_id,json=insertId" json:"insert_id,omitempty"` + Rows []*Row `protobuf:"bytes,4,rep,name=rows" json:"rows,omitempty"` + Extras *ResultExtras `protobuf:"bytes,5,opt,name=extras" json:"extras,omitempty"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return "TODO" } + + + +func (m *QueryResult) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *QueryResult) GetRowsAffected() uint64 { + if m != nil { + return m.RowsAffected + } + return 0 +} + +func (m *QueryResult) GetInsertId() uint64 { + if m != nil { + return m.InsertId + } + return 0 +} + +func (m *QueryResult) GetRows() []*Row { + if m != nil { + return m.Rows + } + return nil +} + +func (m *QueryResult) GetExtras() *ResultExtras { + if m != nil { + return m.Extras + } + return nil +} + +// StreamEvent describes a set of transformations that happened as a +// single transactional unit on a server. It is streamed back by the +// Update Stream calls. +type StreamEvent struct { + // The statements in this transaction. + Statements []*StreamEvent_Statement `protobuf:"bytes,1,rep,name=statements" json:"statements,omitempty"` + // The Event Token for this event. + EventToken *EventToken `protobuf:"bytes,2,opt,name=event_token,json=eventToken" json:"event_token,omitempty"` +} + +func (m *StreamEvent) Reset() { *m = StreamEvent{} } +func (m *StreamEvent) String() string { return "TODO" } + + + +func (m *StreamEvent) GetStatements() []*StreamEvent_Statement { + if m != nil { + return m.Statements + } + return nil +} + +func (m *StreamEvent) GetEventToken() *EventToken { + if m != nil { + return m.EventToken + } + return nil +} + +// One individual Statement in a transaction. +type StreamEvent_Statement struct { + Category StreamEvent_Statement_Category `protobuf:"varint,1,opt,name=category,enum=query.StreamEvent_Statement_Category" json:"category,omitempty"` + // table_name, primary_key_fields and primary_key_values are set for DML. + TableName string `protobuf:"bytes,2,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + PrimaryKeyFields []*Field `protobuf:"bytes,3,rep,name=primary_key_fields,json=primaryKeyFields" json:"primary_key_fields,omitempty"` + PrimaryKeyValues []*Row `protobuf:"bytes,4,rep,name=primary_key_values,json=primaryKeyValues" json:"primary_key_values,omitempty"` + // sql is set for all queries. + // FIXME(alainjobart) we may not need it for DMLs. + Sql []byte `protobuf:"bytes,5,opt,name=sql,proto3" json:"sql,omitempty"` +} + +func (m *StreamEvent_Statement) Reset() { *m = StreamEvent_Statement{} } +func (m *StreamEvent_Statement) String() string { return "TODO" } + + + +func (m *StreamEvent_Statement) GetCategory() StreamEvent_Statement_Category { + if m != nil { + return m.Category + } + return StreamEvent_Statement_Error +} + +func (m *StreamEvent_Statement) GetTableName() string { + if m != nil { + return m.TableName + } + return "" +} + +func (m *StreamEvent_Statement) GetPrimaryKeyFields() []*Field { + if m != nil { + return m.PrimaryKeyFields + } + return nil +} + +func (m *StreamEvent_Statement) GetPrimaryKeyValues() []*Row { + if m != nil { + return m.PrimaryKeyValues + } + return nil +} + +func (m *StreamEvent_Statement) GetSql() []byte { + if m != nil { + return m.Sql + } + return nil +} + + +// ExecuteRequest is the payload to Execute +type ExecuteRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"` + TransactionId int64 `protobuf:"varint,5,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` +} + +func (m *ExecuteRequest) Reset() { *m = ExecuteRequest{} } +func (m *ExecuteRequest) String() string { return "TODO" } + + + +func (m *ExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ExecuteRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ExecuteRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ExecuteRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *ExecuteRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *ExecuteRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// ExecuteResponse is the returned value from Execute +type ExecuteResponse struct { + Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` +} + +func (m *ExecuteResponse) Reset() { *m = ExecuteResponse{} } +func (m *ExecuteResponse) String() string { return "TODO" } + + + +func (m *ExecuteResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// ResultWithError represents a query response +// in the form of result or error but not both. +// TODO: To be used in ExecuteBatchResponse and BeginExecuteBatchResponse. +type ResultWithError struct { + // error contains an query level error, only set if result is unset. + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + // result contains the query result, only set if error is unset. + Result *QueryResult `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` +} + +func (m *ResultWithError) Reset() { *m = ResultWithError{} } +func (m *ResultWithError) String() string { return "TODO" } + + + +func (m *ResultWithError) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *ResultWithError) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// ExecuteBatchRequest is the payload to ExecuteBatch +type ExecuteBatchRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Queries []*BoundQuery `protobuf:"bytes,4,rep,name=queries" json:"queries,omitempty"` + AsTransaction bool `protobuf:"varint,5,opt,name=as_transaction,json=asTransaction" json:"as_transaction,omitempty"` + TransactionId int64 `protobuf:"varint,6,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"` +} + +func (m *ExecuteBatchRequest) Reset() { *m = ExecuteBatchRequest{} } +func (m *ExecuteBatchRequest) String() string { return "TODO" } + + + +func (m *ExecuteBatchRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ExecuteBatchRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ExecuteBatchRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ExecuteBatchRequest) GetQueries() []*BoundQuery { + if m != nil { + return m.Queries + } + return nil +} + +func (m *ExecuteBatchRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + +func (m *ExecuteBatchRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *ExecuteBatchRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// ExecuteBatchResponse is the returned value from ExecuteBatch +type ExecuteBatchResponse struct { + Results []*QueryResult `protobuf:"bytes,1,rep,name=results" json:"results,omitempty"` +} + +func (m *ExecuteBatchResponse) Reset() { *m = ExecuteBatchResponse{} } +func (m *ExecuteBatchResponse) String() string { return "TODO" } + + + +func (m *ExecuteBatchResponse) GetResults() []*QueryResult { + if m != nil { + return m.Results + } + return nil +} + +// StreamExecuteRequest is the payload to StreamExecute +type StreamExecuteRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,5,opt,name=options" json:"options,omitempty"` +} + +func (m *StreamExecuteRequest) Reset() { *m = StreamExecuteRequest{} } +func (m *StreamExecuteRequest) String() string { return "TODO" } + + + +func (m *StreamExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *StreamExecuteRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *StreamExecuteRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StreamExecuteRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *StreamExecuteRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// StreamExecuteResponse is the returned value from StreamExecute +type StreamExecuteResponse struct { + Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` +} + +func (m *StreamExecuteResponse) Reset() { *m = StreamExecuteResponse{} } +func (m *StreamExecuteResponse) String() string { return "TODO" } + + + +func (m *StreamExecuteResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// BeginRequest is the payload to Begin +type BeginRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"` +} + +func (m *BeginRequest) Reset() { *m = BeginRequest{} } +func (m *BeginRequest) String() string { return "TODO" } + + + +func (m *BeginRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *BeginRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *BeginRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *BeginRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// BeginResponse is the returned value from Begin +type BeginResponse struct { + TransactionId int64 `protobuf:"varint,1,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` +} + +func (m *BeginResponse) Reset() { *m = BeginResponse{} } +func (m *BeginResponse) String() string { return "TODO" } + + + +func (m *BeginResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +// CommitRequest is the payload to Commit +type CommitRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` +} + +func (m *CommitRequest) Reset() { *m = CommitRequest{} } +func (m *CommitRequest) String() string { return "TODO" } + + + +func (m *CommitRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *CommitRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *CommitRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *CommitRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +// CommitResponse is the returned value from Commit +type CommitResponse struct { +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return "TODO" } + + + +// RollbackRequest is the payload to Rollback +type RollbackRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` +} + +func (m *RollbackRequest) Reset() { *m = RollbackRequest{} } +func (m *RollbackRequest) String() string { return "TODO" } + + + +func (m *RollbackRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *RollbackRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *RollbackRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *RollbackRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +// RollbackResponse is the returned value from Rollback +type RollbackResponse struct { +} + +func (m *RollbackResponse) Reset() { *m = RollbackResponse{} } +func (m *RollbackResponse) String() string { return "TODO" } + + + +// PrepareRequest is the payload to Prepare +type PrepareRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` + Dtid string `protobuf:"bytes,5,opt,name=dtid" json:"dtid,omitempty"` +} + +func (m *PrepareRequest) Reset() { *m = PrepareRequest{} } +func (m *PrepareRequest) String() string { return "TODO" } + + + +func (m *PrepareRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *PrepareRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *PrepareRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *PrepareRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *PrepareRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// PrepareResponse is the returned value from Prepare +type PrepareResponse struct { +} + +func (m *PrepareResponse) Reset() { *m = PrepareResponse{} } +func (m *PrepareResponse) String() string { return "TODO" } + + + +// CommitPreparedRequest is the payload to CommitPrepared +type CommitPreparedRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Dtid string `protobuf:"bytes,4,opt,name=dtid" json:"dtid,omitempty"` +} + +func (m *CommitPreparedRequest) Reset() { *m = CommitPreparedRequest{} } +func (m *CommitPreparedRequest) String() string { return "TODO" } + + + +func (m *CommitPreparedRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *CommitPreparedRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *CommitPreparedRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *CommitPreparedRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// CommitPreparedResponse is the returned value from CommitPrepared +type CommitPreparedResponse struct { +} + +func (m *CommitPreparedResponse) Reset() { *m = CommitPreparedResponse{} } +func (m *CommitPreparedResponse) String() string { return "TODO" } + + + +// RollbackPreparedRequest is the payload to RollbackPrepared +type RollbackPreparedRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` + Dtid string `protobuf:"bytes,5,opt,name=dtid" json:"dtid,omitempty"` +} + +func (m *RollbackPreparedRequest) Reset() { *m = RollbackPreparedRequest{} } +func (m *RollbackPreparedRequest) String() string { return "TODO" } + + + +func (m *RollbackPreparedRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *RollbackPreparedRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *RollbackPreparedRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *RollbackPreparedRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *RollbackPreparedRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// RollbackPreparedResponse is the returned value from RollbackPrepared +type RollbackPreparedResponse struct { +} + +func (m *RollbackPreparedResponse) Reset() { *m = RollbackPreparedResponse{} } +func (m *RollbackPreparedResponse) String() string { return "TODO" } + + + +// CreateTransactionRequest is the payload to CreateTransaction +type CreateTransactionRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Dtid string `protobuf:"bytes,4,opt,name=dtid" json:"dtid,omitempty"` + Participants []*Target `protobuf:"bytes,5,rep,name=participants" json:"participants,omitempty"` +} + +func (m *CreateTransactionRequest) Reset() { *m = CreateTransactionRequest{} } +func (m *CreateTransactionRequest) String() string { return "TODO" } + + + +func (m *CreateTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *CreateTransactionRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *CreateTransactionRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *CreateTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +func (m *CreateTransactionRequest) GetParticipants() []*Target { + if m != nil { + return m.Participants + } + return nil +} + +// CreateTransactionResponse is the returned value from CreateTransaction +type CreateTransactionResponse struct { +} + +func (m *CreateTransactionResponse) Reset() { *m = CreateTransactionResponse{} } +func (m *CreateTransactionResponse) String() string { return "TODO" } + + + +// StartCommitRequest is the payload to StartCommit +type StartCommitRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` + Dtid string `protobuf:"bytes,5,opt,name=dtid" json:"dtid,omitempty"` +} + +func (m *StartCommitRequest) Reset() { *m = StartCommitRequest{} } +func (m *StartCommitRequest) String() string { return "TODO" } + + + +func (m *StartCommitRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *StartCommitRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *StartCommitRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StartCommitRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *StartCommitRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// StartCommitResponse is the returned value from StartCommit +type StartCommitResponse struct { +} + +func (m *StartCommitResponse) Reset() { *m = StartCommitResponse{} } +func (m *StartCommitResponse) String() string { return "TODO" } + + + +// SetRollbackRequest is the payload to SetRollback +type SetRollbackRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + TransactionId int64 `protobuf:"varint,4,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` + Dtid string `protobuf:"bytes,5,opt,name=dtid" json:"dtid,omitempty"` +} + +func (m *SetRollbackRequest) Reset() { *m = SetRollbackRequest{} } +func (m *SetRollbackRequest) String() string { return "TODO" } + + + +func (m *SetRollbackRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *SetRollbackRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *SetRollbackRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *SetRollbackRequest) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +func (m *SetRollbackRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// SetRollbackResponse is the returned value from SetRollback +type SetRollbackResponse struct { +} + +func (m *SetRollbackResponse) Reset() { *m = SetRollbackResponse{} } +func (m *SetRollbackResponse) String() string { return "TODO" } + + + +// ConcludeTransactionRequest is the payload to ConcludeTransaction +type ConcludeTransactionRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Dtid string `protobuf:"bytes,4,opt,name=dtid" json:"dtid,omitempty"` +} + +func (m *ConcludeTransactionRequest) Reset() { *m = ConcludeTransactionRequest{} } +func (m *ConcludeTransactionRequest) String() string { return "TODO" } + + + +func (m *ConcludeTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ConcludeTransactionRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ConcludeTransactionRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ConcludeTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// ConcludeTransactionResponse is the returned value from ConcludeTransaction +type ConcludeTransactionResponse struct { +} + +func (m *ConcludeTransactionResponse) Reset() { *m = ConcludeTransactionResponse{} } +func (m *ConcludeTransactionResponse) String() string { return "TODO" } + + + +// ReadTransactionRequest is the payload to ReadTransaction +type ReadTransactionRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Dtid string `protobuf:"bytes,4,opt,name=dtid" json:"dtid,omitempty"` +} + +func (m *ReadTransactionRequest) Reset() { *m = ReadTransactionRequest{} } +func (m *ReadTransactionRequest) String() string { return "TODO" } + + + +func (m *ReadTransactionRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *ReadTransactionRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *ReadTransactionRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *ReadTransactionRequest) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +// ReadTransactionResponse is the returned value from ReadTransaction +type ReadTransactionResponse struct { + Metadata *TransactionMetadata `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` +} + +func (m *ReadTransactionResponse) Reset() { *m = ReadTransactionResponse{} } +func (m *ReadTransactionResponse) String() string { return "TODO" } + + + +func (m *ReadTransactionResponse) GetMetadata() *TransactionMetadata { + if m != nil { + return m.Metadata + } + return nil +} + +// BeginExecuteRequest is the payload to BeginExecute +type BeginExecuteRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,5,opt,name=options" json:"options,omitempty"` +} + +func (m *BeginExecuteRequest) Reset() { *m = BeginExecuteRequest{} } +func (m *BeginExecuteRequest) String() string { return "TODO" } + + + +func (m *BeginExecuteRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *BeginExecuteRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *BeginExecuteRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *BeginExecuteRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *BeginExecuteRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// BeginExecuteResponse is the returned value from BeginExecute +type BeginExecuteResponse struct { + // error contains an application level error if necessary. Note the + // transaction_id may be set, even when an error is returned, if the begin + // worked but the execute failed. + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + Result *QueryResult `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` + // transaction_id might be non-zero even if an error is present. + TransactionId int64 `protobuf:"varint,3,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` +} + +func (m *BeginExecuteResponse) Reset() { *m = BeginExecuteResponse{} } +func (m *BeginExecuteResponse) String() string { return "TODO" } + + + +func (m *BeginExecuteResponse) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *BeginExecuteResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +func (m *BeginExecuteResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +// BeginExecuteBatchRequest is the payload to BeginExecuteBatch +type BeginExecuteBatchRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Queries []*BoundQuery `protobuf:"bytes,4,rep,name=queries" json:"queries,omitempty"` + AsTransaction bool `protobuf:"varint,5,opt,name=as_transaction,json=asTransaction" json:"as_transaction,omitempty"` + Options *ExecuteOptions `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` +} + +func (m *BeginExecuteBatchRequest) Reset() { *m = BeginExecuteBatchRequest{} } +func (m *BeginExecuteBatchRequest) String() string { return "TODO" } + + + +func (m *BeginExecuteBatchRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *BeginExecuteBatchRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *BeginExecuteBatchRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *BeginExecuteBatchRequest) GetQueries() []*BoundQuery { + if m != nil { + return m.Queries + } + return nil +} + +func (m *BeginExecuteBatchRequest) GetAsTransaction() bool { + if m != nil { + return m.AsTransaction + } + return false +} + +func (m *BeginExecuteBatchRequest) GetOptions() *ExecuteOptions { + if m != nil { + return m.Options + } + return nil +} + +// BeginExecuteBatchResponse is the returned value from BeginExecuteBatch +type BeginExecuteBatchResponse struct { + // error contains an application level error if necessary. Note the + // transaction_id may be set, even when an error is returned, if the begin + // worked but the execute failed. + Error *vtrpc.RPCError `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` + Results []*QueryResult `protobuf:"bytes,2,rep,name=results" json:"results,omitempty"` + // transaction_id might be non-zero even if an error is present. + TransactionId int64 `protobuf:"varint,3,opt,name=transaction_id,json=transactionId" json:"transaction_id,omitempty"` +} + +func (m *BeginExecuteBatchResponse) Reset() { *m = BeginExecuteBatchResponse{} } +func (m *BeginExecuteBatchResponse) String() string { return "TODO" } + + + +func (m *BeginExecuteBatchResponse) GetError() *vtrpc.RPCError { + if m != nil { + return m.Error + } + return nil +} + +func (m *BeginExecuteBatchResponse) GetResults() []*QueryResult { + if m != nil { + return m.Results + } + return nil +} + +func (m *BeginExecuteBatchResponse) GetTransactionId() int64 { + if m != nil { + return m.TransactionId + } + return 0 +} + +// MessageStreamRequest is the request payload for MessageStream. +type MessageStreamRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + // name is the message table name. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` +} + +func (m *MessageStreamRequest) Reset() { *m = MessageStreamRequest{} } +func (m *MessageStreamRequest) String() string { return "TODO" } + + + +func (m *MessageStreamRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *MessageStreamRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *MessageStreamRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *MessageStreamRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// MessageStreamResponse is a response for MessageStream. +type MessageStreamResponse struct { + Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` +} + +func (m *MessageStreamResponse) Reset() { *m = MessageStreamResponse{} } +func (m *MessageStreamResponse) String() string { return "TODO" } + + + +func (m *MessageStreamResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// MessageAckRequest is the request payload for MessageAck. +type MessageAckRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + // name is the message table name. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Ids []*Value `protobuf:"bytes,5,rep,name=ids" json:"ids,omitempty"` +} + +func (m *MessageAckRequest) Reset() { *m = MessageAckRequest{} } +func (m *MessageAckRequest) String() string { return "TODO" } + + + +func (m *MessageAckRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *MessageAckRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *MessageAckRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *MessageAckRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *MessageAckRequest) GetIds() []*Value { + if m != nil { + return m.Ids + } + return nil +} + +// MessageAckResponse is the response for MessageAck. +type MessageAckResponse struct { + // result contains the result of the ack operation. + // Since this acts like a DML, only + // RowsAffected is returned in the result. + Result *QueryResult `protobuf:"bytes,1,opt,name=result" json:"result,omitempty"` +} + +func (m *MessageAckResponse) Reset() { *m = MessageAckResponse{} } +func (m *MessageAckResponse) String() string { return "TODO" } + + + +func (m *MessageAckResponse) GetResult() *QueryResult { + if m != nil { + return m.Result + } + return nil +} + +// SplitQueryRequest is the payload for SplitQuery sent by VTGate to a VTTablet. +// See vtgate.SplitQueryRequest for more details. +type SplitQueryRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + Query *BoundQuery `protobuf:"bytes,4,opt,name=query" json:"query,omitempty"` + SplitColumn []string `protobuf:"bytes,5,rep,name=split_column,json=splitColumn" json:"split_column,omitempty"` + // Exactly one of the following must be nonzero. + SplitCount int64 `protobuf:"varint,6,opt,name=split_count,json=splitCount" json:"split_count,omitempty"` + NumRowsPerQueryPart int64 `protobuf:"varint,8,opt,name=num_rows_per_query_part,json=numRowsPerQueryPart" json:"num_rows_per_query_part,omitempty"` + Algorithm SplitQueryRequest_Algorithm `protobuf:"varint,9,opt,name=algorithm,enum=query.SplitQueryRequest_Algorithm" json:"algorithm,omitempty"` +} + +func (m *SplitQueryRequest) Reset() { *m = SplitQueryRequest{} } +func (m *SplitQueryRequest) String() string { return "TODO" } + + + +func (m *SplitQueryRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *SplitQueryRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *SplitQueryRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *SplitQueryRequest) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *SplitQueryRequest) GetSplitColumn() []string { + if m != nil { + return m.SplitColumn + } + return nil +} + +func (m *SplitQueryRequest) GetSplitCount() int64 { + if m != nil { + return m.SplitCount + } + return 0 +} + +func (m *SplitQueryRequest) GetNumRowsPerQueryPart() int64 { + if m != nil { + return m.NumRowsPerQueryPart + } + return 0 +} + +func (m *SplitQueryRequest) GetAlgorithm() SplitQueryRequest_Algorithm { + if m != nil { + return m.Algorithm + } + return SplitQueryRequest_EQUAL_SPLITS +} + +// QuerySplit represents one query to execute on the tablet +type QuerySplit struct { + // query is the query to execute + Query *BoundQuery `protobuf:"bytes,1,opt,name=query" json:"query,omitempty"` + // row_count is the approximate row count the query will return + RowCount int64 `protobuf:"varint,2,opt,name=row_count,json=rowCount" json:"row_count,omitempty"` +} + +func (m *QuerySplit) Reset() { *m = QuerySplit{} } +func (m *QuerySplit) String() string { return "TODO" } + + + +func (m *QuerySplit) GetQuery() *BoundQuery { + if m != nil { + return m.Query + } + return nil +} + +func (m *QuerySplit) GetRowCount() int64 { + if m != nil { + return m.RowCount + } + return 0 +} + +// SplitQueryResponse is returned by SplitQuery and represents all the queries +// to execute in order to get the entire data set. +type SplitQueryResponse struct { + Queries []*QuerySplit `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"` +} + +func (m *SplitQueryResponse) Reset() { *m = SplitQueryResponse{} } +func (m *SplitQueryResponse) String() string { return "TODO" } + + + +func (m *SplitQueryResponse) GetQueries() []*QuerySplit { + if m != nil { + return m.Queries + } + return nil +} + +// StreamHealthRequest is the payload for StreamHealth +type StreamHealthRequest struct { +} + +func (m *StreamHealthRequest) Reset() { *m = StreamHealthRequest{} } +func (m *StreamHealthRequest) String() string { return "TODO" } + + + +// RealtimeStats contains information about the tablet status +type RealtimeStats struct { + // health_error is the last error we got from health check, + // or empty is the server is healthy. This is used for subset selection, + // we do not send queries to servers that are not healthy. + HealthError string `protobuf:"bytes,1,opt,name=health_error,json=healthError" json:"health_error,omitempty"` + // seconds_behind_master is populated for slaves only. It indicates + // how far behind on (MySQL) replication a slave currently is. It is used + // by clients for subset selection (so we don't try to send traffic + // to tablets that are too far behind). + // NOTE: This field must not be evaluated if "health_error" is not empty. + // TODO(mberlin): Let's switch it to int64 instead? + SecondsBehindMaster uint32 `protobuf:"varint,2,opt,name=seconds_behind_master,json=secondsBehindMaster" json:"seconds_behind_master,omitempty"` + // bin_log_players_count is the number of currently running binlog players. + // if the value is 0, it means that filtered replication is currently not + // running on the tablet. If >0, filtered replication is running. + // NOTE: This field must not be evaluated if "health_error" is not empty. + BinlogPlayersCount int32 `protobuf:"varint,3,opt,name=binlog_players_count,json=binlogPlayersCount" json:"binlog_players_count,omitempty"` + // seconds_behind_master_filtered_replication is populated for the receiving + // master of an ongoing filtered replication only. + // It specifies how far the receiving master lags behind the sending master. + // NOTE: This field must not be evaluated if "health_error" is not empty. + // NOTE: This field must not be evaluated if "bin_log_players_count" is 0. + SecondsBehindMasterFilteredReplication int64 `protobuf:"varint,4,opt,name=seconds_behind_master_filtered_replication,json=secondsBehindMasterFilteredReplication" json:"seconds_behind_master_filtered_replication,omitempty"` + // cpu_usage is used for load-based balancing + CpuUsage float64 `protobuf:"fixed64,5,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"` + // qps is the average QPS (queries per second) rate in the last XX seconds + // where XX is usually 60 (See query_service_stats.go). + Qps float64 `protobuf:"fixed64,6,opt,name=qps" json:"qps,omitempty"` +} + +func (m *RealtimeStats) Reset() { *m = RealtimeStats{} } +func (m *RealtimeStats) String() string { return "TODO" } + + + +func (m *RealtimeStats) GetHealthError() string { + if m != nil { + return m.HealthError + } + return "" +} + +func (m *RealtimeStats) GetSecondsBehindMaster() uint32 { + if m != nil { + return m.SecondsBehindMaster + } + return 0 +} + +func (m *RealtimeStats) GetBinlogPlayersCount() int32 { + if m != nil { + return m.BinlogPlayersCount + } + return 0 +} + +func (m *RealtimeStats) GetSecondsBehindMasterFilteredReplication() int64 { + if m != nil { + return m.SecondsBehindMasterFilteredReplication + } + return 0 +} + +func (m *RealtimeStats) GetCpuUsage() float64 { + if m != nil { + return m.CpuUsage + } + return 0 +} + +func (m *RealtimeStats) GetQps() float64 { + if m != nil { + return m.Qps + } + return 0 +} + +// StreamHealthResponse is streamed by StreamHealth on a regular basis +type StreamHealthResponse struct { + // target is the current server type. Only queries with that exact Target + // record will be accepted. + Target *Target `protobuf:"bytes,1,opt,name=target" json:"target,omitempty"` + // serving is true iff the tablet is serving. A tablet may not be serving + // if filtered replication is enabled on a master for instance, + // or if a replica should not be used because the keyspace is being resharded. + Serving bool `protobuf:"varint,2,opt,name=serving" json:"serving,omitempty"` + // tablet_externally_reparented_timestamp can be interpreted as the last time + // we knew that this tablet was the MASTER of this shard. + // + // It is used by vtgate when determining the current MASTER of a shard. + // If vtgate sees more than one MASTER tablet, this timestamp is used + // as tiebreaker where the MASTER with the highest timestamp wins. + // Another usage of this timestamp is in go/vt/vtgate/buffer to detect the end + // of a reparent (failover) and stop buffering. + // + // In practice, this field is set to: + // a) the last time the RPC tabletmanager.TabletExternallyReparented was + // called on this tablet (usually done by an external failover tool e.g. + // Orchestrator). The failover tool can call this as long as we are the + // master i.e. even ages after the last reparent occurred. + // OR + // b) the last time an active reparent was executed through a vtctl command + // (InitShardMaster, PlannedReparentShard, EmergencyReparentShard) + // OR + // c) the last time vttablet was started and it initialized its tablet type + // as MASTER because it was recorded as the shard's current master in the + // topology (see go/vt/vttablet/tabletmanager/init_tablet.go) + // OR + // d) 0 if the vttablet was never a MASTER. + TabletExternallyReparentedTimestamp int64 `protobuf:"varint,3,opt,name=tablet_externally_reparented_timestamp,json=tabletExternallyReparentedTimestamp" json:"tablet_externally_reparented_timestamp,omitempty"` + // realtime_stats contains information about the tablet status + RealtimeStats *RealtimeStats `protobuf:"bytes,4,opt,name=realtime_stats,json=realtimeStats" json:"realtime_stats,omitempty"` + // tablet_alias is the alias of the sending tablet. The discovery/healthcheck.go + // code uses it to verify that it's talking to the correct tablet and that it + // hasn't changed in the meantime e.g. due to tablet restarts where ports or + // ips have been reused but assigned differently. + TabletAlias *topodata.TabletAlias `protobuf:"bytes,5,opt,name=tablet_alias,json=tabletAlias" json:"tablet_alias,omitempty"` +} + +func (m *StreamHealthResponse) Reset() { *m = StreamHealthResponse{} } +func (m *StreamHealthResponse) String() string { return "TODO" } + + + +func (m *StreamHealthResponse) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *StreamHealthResponse) GetServing() bool { + if m != nil { + return m.Serving + } + return false +} + +func (m *StreamHealthResponse) GetTabletExternallyReparentedTimestamp() int64 { + if m != nil { + return m.TabletExternallyReparentedTimestamp + } + return 0 +} + +func (m *StreamHealthResponse) GetRealtimeStats() *RealtimeStats { + if m != nil { + return m.RealtimeStats + } + return nil +} + +func (m *StreamHealthResponse) GetTabletAlias() *topodata.TabletAlias { + if m != nil { + return m.TabletAlias + } + return nil +} + +// UpdateStreamRequest is the payload for UpdateStream. At most one of +// position and timestamp can be set. If neither is set, we will start +// streaming from the current binlog position. +type UpdateStreamRequest struct { + EffectiveCallerId *vtrpc.CallerID `protobuf:"bytes,1,opt,name=effective_caller_id,json=effectiveCallerId" json:"effective_caller_id,omitempty"` + ImmediateCallerId *VTGateCallerID `protobuf:"bytes,2,opt,name=immediate_caller_id,json=immediateCallerId" json:"immediate_caller_id,omitempty"` + Target *Target `protobuf:"bytes,3,opt,name=target" json:"target,omitempty"` + // If position is set, we will start the streaming from that replication + // position. Incompatible with timestamp. + Position string `protobuf:"bytes,4,opt,name=position" json:"position,omitempty"` + // If timestamp is set, we will start the streaming from the first + // event in the binlogs that have that timestamp. Incompatible with position. + Timestamp int64 `protobuf:"varint,5,opt,name=timestamp" json:"timestamp,omitempty"` +} + +func (m *UpdateStreamRequest) Reset() { *m = UpdateStreamRequest{} } +func (m *UpdateStreamRequest) String() string { return "TODO" } + + + +func (m *UpdateStreamRequest) GetEffectiveCallerId() *vtrpc.CallerID { + if m != nil { + return m.EffectiveCallerId + } + return nil +} + +func (m *UpdateStreamRequest) GetImmediateCallerId() *VTGateCallerID { + if m != nil { + return m.ImmediateCallerId + } + return nil +} + +func (m *UpdateStreamRequest) GetTarget() *Target { + if m != nil { + return m.Target + } + return nil +} + +func (m *UpdateStreamRequest) GetPosition() string { + if m != nil { + return m.Position + } + return "" +} + +func (m *UpdateStreamRequest) GetTimestamp() int64 { + if m != nil { + return m.Timestamp + } + return 0 +} + +// UpdateStreamResponse is returned by UpdateStream +type UpdateStreamResponse struct { + Event *StreamEvent `protobuf:"bytes,1,opt,name=event" json:"event,omitempty"` +} + +func (m *UpdateStreamResponse) Reset() { *m = UpdateStreamResponse{} } +func (m *UpdateStreamResponse) String() string { return "TODO" } + + + +func (m *UpdateStreamResponse) GetEvent() *StreamEvent { + if m != nil { + return m.Event + } + return nil +} + +// TransactionMetadata contains the metadata for a distributed transaction. +type TransactionMetadata struct { + Dtid string `protobuf:"bytes,1,opt,name=dtid" json:"dtid,omitempty"` + State TransactionState `protobuf:"varint,2,opt,name=state,enum=query.TransactionState" json:"state,omitempty"` + TimeCreated int64 `protobuf:"varint,3,opt,name=time_created,json=timeCreated" json:"time_created,omitempty"` + Participants []*Target `protobuf:"bytes,4,rep,name=participants" json:"participants,omitempty"` +} + +func (m *TransactionMetadata) Reset() { *m = TransactionMetadata{} } +func (m *TransactionMetadata) String() string { return "TODO" } + + + +func (m *TransactionMetadata) GetDtid() string { + if m != nil { + return m.Dtid + } + return "" +} + +func (m *TransactionMetadata) GetState() TransactionState { + if m != nil { + return m.State + } + return TransactionState_UNKNOWN +} + +func (m *TransactionMetadata) GetTimeCreated() int64 { + if m != nil { + return m.TimeCreated + } + return 0 +} + +func (m *TransactionMetadata) GetParticipants() []*Target { + if m != nil { + return m.Participants + } + return nil +} + +func init() { + proto.RegisterType((*Target)(nil), "query.Target") + proto.RegisterType((*VTGateCallerID)(nil), "query.VTGateCallerID") + proto.RegisterType((*EventToken)(nil), "query.EventToken") + proto.RegisterType((*Value)(nil), "query.Value") + proto.RegisterType((*BindVariable)(nil), "query.BindVariable") + proto.RegisterType((*BoundQuery)(nil), "query.BoundQuery") + proto.RegisterType((*ExecuteOptions)(nil), "query.ExecuteOptions") + proto.RegisterType((*Field)(nil), "query.Field") + proto.RegisterType((*Row)(nil), "query.Row") + proto.RegisterType((*ResultExtras)(nil), "query.ResultExtras") + proto.RegisterType((*QueryResult)(nil), "query.QueryResult") + proto.RegisterType((*StreamEvent)(nil), "query.StreamEvent") + proto.RegisterType((*StreamEvent_Statement)(nil), "query.StreamEvent.Statement") + proto.RegisterType((*ExecuteRequest)(nil), "query.ExecuteRequest") + proto.RegisterType((*ExecuteResponse)(nil), "query.ExecuteResponse") + proto.RegisterType((*ResultWithError)(nil), "query.ResultWithError") + proto.RegisterType((*ExecuteBatchRequest)(nil), "query.ExecuteBatchRequest") + proto.RegisterType((*ExecuteBatchResponse)(nil), "query.ExecuteBatchResponse") + proto.RegisterType((*StreamExecuteRequest)(nil), "query.StreamExecuteRequest") + proto.RegisterType((*StreamExecuteResponse)(nil), "query.StreamExecuteResponse") + proto.RegisterType((*BeginRequest)(nil), "query.BeginRequest") + proto.RegisterType((*BeginResponse)(nil), "query.BeginResponse") + proto.RegisterType((*CommitRequest)(nil), "query.CommitRequest") + proto.RegisterType((*CommitResponse)(nil), "query.CommitResponse") + proto.RegisterType((*RollbackRequest)(nil), "query.RollbackRequest") + proto.RegisterType((*RollbackResponse)(nil), "query.RollbackResponse") + proto.RegisterType((*PrepareRequest)(nil), "query.PrepareRequest") + proto.RegisterType((*PrepareResponse)(nil), "query.PrepareResponse") + proto.RegisterType((*CommitPreparedRequest)(nil), "query.CommitPreparedRequest") + proto.RegisterType((*CommitPreparedResponse)(nil), "query.CommitPreparedResponse") + proto.RegisterType((*RollbackPreparedRequest)(nil), "query.RollbackPreparedRequest") + proto.RegisterType((*RollbackPreparedResponse)(nil), "query.RollbackPreparedResponse") + proto.RegisterType((*CreateTransactionRequest)(nil), "query.CreateTransactionRequest") + proto.RegisterType((*CreateTransactionResponse)(nil), "query.CreateTransactionResponse") + proto.RegisterType((*StartCommitRequest)(nil), "query.StartCommitRequest") + proto.RegisterType((*StartCommitResponse)(nil), "query.StartCommitResponse") + proto.RegisterType((*SetRollbackRequest)(nil), "query.SetRollbackRequest") + proto.RegisterType((*SetRollbackResponse)(nil), "query.SetRollbackResponse") + proto.RegisterType((*ConcludeTransactionRequest)(nil), "query.ConcludeTransactionRequest") + proto.RegisterType((*ConcludeTransactionResponse)(nil), "query.ConcludeTransactionResponse") + proto.RegisterType((*ReadTransactionRequest)(nil), "query.ReadTransactionRequest") + proto.RegisterType((*ReadTransactionResponse)(nil), "query.ReadTransactionResponse") + proto.RegisterType((*BeginExecuteRequest)(nil), "query.BeginExecuteRequest") + proto.RegisterType((*BeginExecuteResponse)(nil), "query.BeginExecuteResponse") + proto.RegisterType((*BeginExecuteBatchRequest)(nil), "query.BeginExecuteBatchRequest") + proto.RegisterType((*BeginExecuteBatchResponse)(nil), "query.BeginExecuteBatchResponse") + proto.RegisterType((*MessageStreamRequest)(nil), "query.MessageStreamRequest") + proto.RegisterType((*MessageStreamResponse)(nil), "query.MessageStreamResponse") + proto.RegisterType((*MessageAckRequest)(nil), "query.MessageAckRequest") + proto.RegisterType((*MessageAckResponse)(nil), "query.MessageAckResponse") + proto.RegisterType((*SplitQueryRequest)(nil), "query.SplitQueryRequest") + proto.RegisterType((*QuerySplit)(nil), "query.QuerySplit") + proto.RegisterType((*SplitQueryResponse)(nil), "query.SplitQueryResponse") + proto.RegisterType((*StreamHealthRequest)(nil), "query.StreamHealthRequest") + proto.RegisterType((*RealtimeStats)(nil), "query.RealtimeStats") + proto.RegisterType((*StreamHealthResponse)(nil), "query.StreamHealthResponse") + proto.RegisterType((*UpdateStreamRequest)(nil), "query.UpdateStreamRequest") + proto.RegisterType((*UpdateStreamResponse)(nil), "query.UpdateStreamResponse") + proto.RegisterType((*TransactionMetadata)(nil), "query.TransactionMetadata") + proto.RegisterEnum("query.MySqlFlag", MySqlFlag_name, MySqlFlag_value) + proto.RegisterEnum("query.Flag", Flag_name, Flag_value) + proto.RegisterEnum("query.Type", Type_name, Type_value) + proto.RegisterEnum("query.TransactionState", TransactionState_name, TransactionState_value) + proto.RegisterEnum("query.ExecuteOptions_IncludedFields", ExecuteOptions_IncludedFields_name, ExecuteOptions_IncludedFields_value) + proto.RegisterEnum("query.ExecuteOptions_Workload", ExecuteOptions_Workload_name, ExecuteOptions_Workload_value) + proto.RegisterEnum("query.ExecuteOptions_TransactionIsolation", ExecuteOptions_TransactionIsolation_name, ExecuteOptions_TransactionIsolation_value) + proto.RegisterEnum("query.StreamEvent_Statement_Category", StreamEvent_Statement_Category_name, StreamEvent_Statement_Category_value) + proto.RegisterEnum("query.SplitQueryRequest_Algorithm", SplitQueryRequest_Algorithm_name, SplitQueryRequest_Algorithm_value) +} + +*/ diff --git a/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/bind_variables.go b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/bind_variables.go new file mode 100644 index 000000000..1879e6dbb --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/bind_variables.go @@ -0,0 +1,266 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "errors" + "fmt" + "reflect" + "strconv" + + "github.com/xwb1989/sqlparser/dependency/querypb" +) + +// NullBindVariable is a bindvar with NULL value. +var NullBindVariable = &querypb.BindVariable{Type: querypb.Type_NULL_TYPE} + +// ValueToProto converts Value to a *querypb.Value. +func ValueToProto(v Value) *querypb.Value { + return &querypb.Value{Type: v.typ, Value: v.val} +} + +// ProtoToValue converts a *querypb.Value to a Value. +func ProtoToValue(v *querypb.Value) Value { + return MakeTrusted(v.Type, v.Value) +} + +// BuildBindVariables builds a map[string]*querypb.BindVariable from a map[string]interface{}. +func BuildBindVariables(in map[string]interface{}) (map[string]*querypb.BindVariable, error) { + if len(in) == 0 { + return nil, nil + } + + out := make(map[string]*querypb.BindVariable, len(in)) + for k, v := range in { + bv, err := BuildBindVariable(v) + if err != nil { + return nil, fmt.Errorf("%s: %v", k, err) + } + out[k] = bv + } + return out, nil +} + +// Int32BindVariable converts an int32 to a bind var. +func Int32BindVariable(v int32) *querypb.BindVariable { + return ValueBindVariable(NewInt32(v)) +} + +// Int64BindVariable converts an int64 to a bind var. +func Int64BindVariable(v int64) *querypb.BindVariable { + return ValueBindVariable(NewInt64(v)) +} + +// Uint64BindVariable converts a uint64 to a bind var. +func Uint64BindVariable(v uint64) *querypb.BindVariable { + return ValueBindVariable(NewUint64(v)) +} + +// Float64BindVariable converts a float64 to a bind var. +func Float64BindVariable(v float64) *querypb.BindVariable { + return ValueBindVariable(NewFloat64(v)) +} + +// StringBindVariable converts a string to a bind var. +func StringBindVariable(v string) *querypb.BindVariable { + return ValueBindVariable(NewVarChar(v)) +} + +// BytesBindVariable converts a []byte to a bind var. +func BytesBindVariable(v []byte) *querypb.BindVariable { + return &querypb.BindVariable{Type: VarBinary, Value: v} +} + +// ValueBindVariable converts a Value to a bind var. +func ValueBindVariable(v Value) *querypb.BindVariable { + return &querypb.BindVariable{Type: v.typ, Value: v.val} +} + +// BuildBindVariable builds a *querypb.BindVariable from a valid input type. +func BuildBindVariable(v interface{}) (*querypb.BindVariable, error) { + switch v := v.(type) { + case string: + return StringBindVariable(v), nil + case []byte: + return BytesBindVariable(v), nil + case int: + return &querypb.BindVariable{ + Type: querypb.Type_INT64, + Value: strconv.AppendInt(nil, int64(v), 10), + }, nil + case int64: + return Int64BindVariable(v), nil + case uint64: + return Uint64BindVariable(v), nil + case float64: + return Float64BindVariable(v), nil + case nil: + return NullBindVariable, nil + case Value: + return ValueBindVariable(v), nil + case *querypb.BindVariable: + return v, nil + case []interface{}: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + lbv, err := BuildBindVariable(lv) + if err != nil { + return nil, err + } + values[i].Type = lbv.Type + values[i].Value = lbv.Value + bv.Values[i] = &values[i] + } + return bv, nil + case []string: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_VARCHAR + values[i].Value = []byte(lv) + bv.Values[i] = &values[i] + } + return bv, nil + case [][]byte: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_VARBINARY + values[i].Value = lv + bv.Values[i] = &values[i] + } + return bv, nil + case []int: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_INT64 + values[i].Value = strconv.AppendInt(nil, int64(lv), 10) + bv.Values[i] = &values[i] + } + return bv, nil + case []int64: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_INT64 + values[i].Value = strconv.AppendInt(nil, lv, 10) + bv.Values[i] = &values[i] + } + return bv, nil + case []uint64: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_UINT64 + values[i].Value = strconv.AppendUint(nil, lv, 10) + bv.Values[i] = &values[i] + } + return bv, nil + case []float64: + bv := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + Values: make([]*querypb.Value, len(v)), + } + values := make([]querypb.Value, len(v)) + for i, lv := range v { + values[i].Type = querypb.Type_FLOAT64 + values[i].Value = strconv.AppendFloat(nil, lv, 'g', -1, 64) + bv.Values[i] = &values[i] + } + return bv, nil + } + return nil, fmt.Errorf("type %T not supported as bind var: %v", v, v) +} + +// ValidateBindVariables validates a map[string]*querypb.BindVariable. +func ValidateBindVariables(bv map[string]*querypb.BindVariable) error { + for k, v := range bv { + if err := ValidateBindVariable(v); err != nil { + return fmt.Errorf("%s: %v", k, err) + } + } + return nil +} + +// ValidateBindVariable returns an error if the bind variable has inconsistent +// fields. +func ValidateBindVariable(bv *querypb.BindVariable) error { + if bv == nil { + return errors.New("bind variable is nil") + } + + if bv.Type == querypb.Type_TUPLE { + if len(bv.Values) == 0 { + return errors.New("empty tuple is not allowed") + } + for _, val := range bv.Values { + if val.Type == querypb.Type_TUPLE { + return errors.New("tuple not allowed inside another tuple") + } + if err := ValidateBindVariable(&querypb.BindVariable{Type: val.Type, Value: val.Value}); err != nil { + return err + } + } + return nil + } + + // If NewValue succeeds, the value is valid. + _, err := NewValue(bv.Type, bv.Value) + return err +} + +// BindVariableToValue converts a bind var into a Value. +func BindVariableToValue(bv *querypb.BindVariable) (Value, error) { + if bv.Type == querypb.Type_TUPLE { + return NULL, errors.New("cannot convert a TUPLE bind var into a value") + } + return MakeTrusted(bv.Type, bv.Value), nil +} + +// BindVariablesEqual compares two maps of bind variables. +func BindVariablesEqual(x, y map[string]*querypb.BindVariable) bool { + return reflect.DeepEqual(&querypb.BoundQuery{BindVariables: x}, &querypb.BoundQuery{BindVariables: y}) +} + +// CopyBindVariables returns a shallow-copy of the given bindVariables map. +func CopyBindVariables(bindVariables map[string]*querypb.BindVariable) map[string]*querypb.BindVariable { + result := make(map[string]*querypb.BindVariable, len(bindVariables)) + for key, value := range bindVariables { + result[key] = value + } + return result +} diff --git a/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/plan_value.go b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/plan_value.go new file mode 100644 index 000000000..7936a6bae --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/plan_value.go @@ -0,0 +1,259 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "encoding/json" + "errors" + "fmt" + + "github.com/xwb1989/sqlparser/dependency/querypb" +) + +// PlanValue represents a value or a list of values for +// a column that will later be resolved using bind vars and used +// to perform plan actions like generating the final query or +// deciding on a route. +// +// Plan values are typically used as a slice ([]planValue) +// where each entry is for one column. For situations where +// the required output is a list of rows (like in the case +// of multi-value inserts), the representation is pivoted. +// For example, a statement like this: +// INSERT INTO t VALUES (1, 2), (3, 4) +// will be represented as follows: +// []PlanValue{ +// Values: {1, 3}, +// Values: {2, 4}, +// } +// +// For WHERE clause items that contain a combination of +// equality expressions and IN clauses like this: +// WHERE pk1 = 1 AND pk2 IN (2, 3, 4) +// The plan values will be represented as follows: +// []PlanValue{ +// Value: 1, +// Values: {2, 3, 4}, +// } +// When converted into rows, columns with single values +// are replicated as the same for all rows: +// [][]Value{ +// {1, 2}, +// {1, 3}, +// {1, 4}, +// } +type PlanValue struct { + Key string + Value Value + ListKey string + Values []PlanValue +} + +// IsNull returns true if the PlanValue is NULL. +func (pv PlanValue) IsNull() bool { + return pv.Key == "" && pv.Value.IsNull() && pv.ListKey == "" && pv.Values == nil +} + +// IsList returns true if the PlanValue is a list. +func (pv PlanValue) IsList() bool { + return pv.ListKey != "" || pv.Values != nil +} + +// ResolveValue resolves a PlanValue as a single value based on the supplied bindvars. +func (pv PlanValue) ResolveValue(bindVars map[string]*querypb.BindVariable) (Value, error) { + switch { + case pv.Key != "": + bv, err := pv.lookupValue(bindVars) + if err != nil { + return NULL, err + } + return MakeTrusted(bv.Type, bv.Value), nil + case !pv.Value.IsNull(): + return pv.Value, nil + case pv.ListKey != "" || pv.Values != nil: + // This code is unreachable because the parser does not allow + // multi-value constructs where a single value is expected. + return NULL, errors.New("a list was supplied where a single value was expected") + } + return NULL, nil +} + +func (pv PlanValue) lookupValue(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) { + bv, ok := bindVars[pv.Key] + if !ok { + return nil, fmt.Errorf("missing bind var %s", pv.Key) + } + if bv.Type == querypb.Type_TUPLE { + return nil, fmt.Errorf("TUPLE was supplied for single value bind var %s", pv.ListKey) + } + return bv, nil +} + +// ResolveList resolves a PlanValue as a list of values based on the supplied bindvars. +func (pv PlanValue) ResolveList(bindVars map[string]*querypb.BindVariable) ([]Value, error) { + switch { + case pv.ListKey != "": + bv, err := pv.lookupList(bindVars) + if err != nil { + return nil, err + } + values := make([]Value, 0, len(bv.Values)) + for _, val := range bv.Values { + values = append(values, MakeTrusted(val.Type, val.Value)) + } + return values, nil + case pv.Values != nil: + values := make([]Value, 0, len(pv.Values)) + for _, val := range pv.Values { + v, err := val.ResolveValue(bindVars) + if err != nil { + return nil, err + } + values = append(values, v) + } + return values, nil + } + // This code is unreachable because the parser does not allow + // single value constructs where multiple values are expected. + return nil, errors.New("a single value was supplied where a list was expected") +} + +func (pv PlanValue) lookupList(bindVars map[string]*querypb.BindVariable) (*querypb.BindVariable, error) { + bv, ok := bindVars[pv.ListKey] + if !ok { + return nil, fmt.Errorf("missing bind var %s", pv.ListKey) + } + if bv.Type != querypb.Type_TUPLE { + return nil, fmt.Errorf("single value was supplied for TUPLE bind var %s", pv.ListKey) + } + return bv, nil +} + +// MarshalJSON should be used only for testing. +func (pv PlanValue) MarshalJSON() ([]byte, error) { + switch { + case pv.Key != "": + return json.Marshal(":" + pv.Key) + case !pv.Value.IsNull(): + if pv.Value.IsIntegral() { + return pv.Value.ToBytes(), nil + } + return json.Marshal(pv.Value.ToString()) + case pv.ListKey != "": + return json.Marshal("::" + pv.ListKey) + case pv.Values != nil: + return json.Marshal(pv.Values) + } + return []byte("null"), nil +} + +func rowCount(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) (int, error) { + count := -1 + setCount := func(l int) error { + switch count { + case -1: + count = l + return nil + case l: + return nil + default: + return errors.New("mismatch in number of column values") + } + } + + for _, pv := range pvs { + switch { + case pv.Key != "" || !pv.Value.IsNull(): + continue + case pv.Values != nil: + if err := setCount(len(pv.Values)); err != nil { + return 0, err + } + case pv.ListKey != "": + bv, err := pv.lookupList(bindVars) + if err != nil { + return 0, err + } + if err := setCount(len(bv.Values)); err != nil { + return 0, err + } + } + } + + if count == -1 { + // If there were no lists inside, it was a single row. + // Note that count can never be 0 because there is enough + // protection at the top level: list bind vars must have + // at least one value (enforced by vtgate), and AST lists + // must have at least one value (enforced by the parser). + // Also lists created internally after vtgate validation + // ensure at least one value. + // TODO(sougou): verify and change API to enforce this. + return 1, nil + } + return count, nil +} + +// ResolveRows resolves a []PlanValue as rows based on the supplied bindvars. +func ResolveRows(pvs []PlanValue, bindVars map[string]*querypb.BindVariable) ([][]Value, error) { + count, err := rowCount(pvs, bindVars) + if err != nil { + return nil, err + } + + // Allocate the rows. + rows := make([][]Value, count) + for i := range rows { + rows[i] = make([]Value, len(pvs)) + } + + // Using j becasue we're resolving by columns. + for j, pv := range pvs { + switch { + case pv.Key != "": + bv, err := pv.lookupValue(bindVars) + if err != nil { + return nil, err + } + for i := range rows { + rows[i][j] = MakeTrusted(bv.Type, bv.Value) + } + case !pv.Value.IsNull(): + for i := range rows { + rows[i][j] = pv.Value + } + case pv.ListKey != "": + bv, err := pv.lookupList(bindVars) + if err != nil { + // This code is unreachable because pvRowCount already checks this. + return nil, err + } + for i := range rows { + rows[i][j] = MakeTrusted(bv.Values[i].Type, bv.Values[i].Value) + } + case pv.Values != nil: + for i := range rows { + rows[i][j], err = pv.Values[i].ResolveValue(bindVars) + if err != nil { + return nil, err + } + } + // default case is a NULL value, which the row values are already initialized to. + } + } + return rows, nil +} diff --git a/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/testing.go b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/testing.go new file mode 100644 index 000000000..0acaedda7 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/testing.go @@ -0,0 +1,154 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + querypb "github.com/xwb1989/sqlparser/dependency/querypb" +) + +// Functions in this file should only be used for testing. +// This is an experiment to see if test code bloat can be +// reduced and readability improved. + +/* +// MakeTestFields builds a []*querypb.Field for testing. +// fields := sqltypes.MakeTestFields( +// "a|b", +// "int64|varchar", +// ) +// The field types are as defined in querypb and are case +// insensitive. Column delimiters must be used only to sepearate +// strings and not at the beginning or the end. +func MakeTestFields(names, types string) []*querypb.Field { + n := split(names) + t := split(types) + var fields []*querypb.Field + for i := range n { + fields = append(fields, &querypb.Field{ + Name: n[i], + Type: querypb.Type(querypb.Type_value[strings.ToUpper(t[i])]), + }) + } + return fields +} + +// MakeTestResult builds a *sqltypes.Result object for testing. +// result := sqltypes.MakeTestResult( +// fields, +// " 1|a", +// "10|abcd", +// ) +// The field type values are set as the types for the rows built. +// Spaces are trimmed from row values. "null" is treated as NULL. +func MakeTestResult(fields []*querypb.Field, rows ...string) *Result { + result := &Result{ + Fields: fields, + } + if len(rows) > 0 { + result.Rows = make([][]Value, len(rows)) + } + for i, row := range rows { + result.Rows[i] = make([]Value, len(fields)) + for j, col := range split(row) { + if col == "null" { + continue + } + result.Rows[i][j] = MakeTrusted(fields[j].Type, []byte(col)) + } + } + result.RowsAffected = uint64(len(result.Rows)) + return result +} + +// MakeTestStreamingResults builds a list of results for streaming. +// results := sqltypes.MakeStreamingResults( +// fields, +// "1|a", +// "2|b", +// "---", +// "c|c", +// ) +// The first result contains only the fields. Subsequent results +// are built using the field types. Every input that starts with a "-" +// is treated as streaming delimiter for one result. A final +// delimiter must not be supplied. +func MakeTestStreamingResults(fields []*querypb.Field, rows ...string) []*Result { + var results []*Result + results = append(results, &Result{Fields: fields}) + start := 0 + cur := 0 + // Add a final streaming delimiter to simplify the loop below. + rows = append(rows, "-") + for cur < len(rows) { + if rows[cur][0] != '-' { + cur++ + continue + } + result := MakeTestResult(fields, rows[start:cur]...) + result.Fields = nil + result.RowsAffected = 0 + results = append(results, result) + start = cur + 1 + cur = start + } + return results +} +*/ + +// TestBindVariable makes a *querypb.BindVariable from +// an interface{}.It panics on invalid input. +// This function should only be used for testing. +func TestBindVariable(v interface{}) *querypb.BindVariable { + if v == nil { + return NullBindVariable + } + bv, err := BuildBindVariable(v) + if err != nil { + panic(err) + } + return bv +} + +// TestValue builds a Value from typ and val. +// This function should only be used for testing. +func TestValue(typ querypb.Type, val string) Value { + return MakeTrusted(typ, []byte(val)) +} + +/* +// PrintResults prints []*Results into a string. +// This function should only be used for testing. +func PrintResults(results []*Result) string { + b := new(bytes.Buffer) + for i, r := range results { + if i == 0 { + fmt.Fprintf(b, "%v", r) + continue + } + fmt.Fprintf(b, ", %v", r) + } + return b.String() +} + +func split(str string) []string { + splits := strings.Split(str, "|") + for i, v := range splits { + splits[i] = strings.TrimSpace(v) + } + return splits +} +*/ diff --git a/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/type.go b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/type.go new file mode 100644 index 000000000..49224e4b3 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/type.go @@ -0,0 +1,288 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqltypes + +import ( + "fmt" + + "github.com/xwb1989/sqlparser/dependency/querypb" +) + +// This file provides wrappers and support +// functions for querypb.Type. + +// These bit flags can be used to query on the +// common properties of types. +const ( + flagIsIntegral = int(querypb.Flag_ISINTEGRAL) + flagIsUnsigned = int(querypb.Flag_ISUNSIGNED) + flagIsFloat = int(querypb.Flag_ISFLOAT) + flagIsQuoted = int(querypb.Flag_ISQUOTED) + flagIsText = int(querypb.Flag_ISTEXT) + flagIsBinary = int(querypb.Flag_ISBINARY) +) + +// IsIntegral returns true if querypb.Type is an integral +// (signed/unsigned) that can be represented using +// up to 64 binary bits. +// If you have a Value object, use its member function. +func IsIntegral(t querypb.Type) bool { + return int(t)&flagIsIntegral == flagIsIntegral +} + +// IsSigned returns true if querypb.Type is a signed integral. +// If you have a Value object, use its member function. +func IsSigned(t querypb.Type) bool { + return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral +} + +// IsUnsigned returns true if querypb.Type is an unsigned integral. +// Caution: this is not the same as !IsSigned. +// If you have a Value object, use its member function. +func IsUnsigned(t querypb.Type) bool { + return int(t)&(flagIsIntegral|flagIsUnsigned) == flagIsIntegral|flagIsUnsigned +} + +// IsFloat returns true is querypb.Type is a floating point. +// If you have a Value object, use its member function. +func IsFloat(t querypb.Type) bool { + return int(t)&flagIsFloat == flagIsFloat +} + +// IsQuoted returns true if querypb.Type is a quoted text or binary. +// If you have a Value object, use its member function. +func IsQuoted(t querypb.Type) bool { + return int(t)&flagIsQuoted == flagIsQuoted +} + +// IsText returns true if querypb.Type is a text. +// If you have a Value object, use its member function. +func IsText(t querypb.Type) bool { + return int(t)&flagIsText == flagIsText +} + +// IsBinary returns true if querypb.Type is a binary. +// If you have a Value object, use its member function. +func IsBinary(t querypb.Type) bool { + return int(t)&flagIsBinary == flagIsBinary +} + +// isNumber returns true if the type is any type of number. +func isNumber(t querypb.Type) bool { + return IsIntegral(t) || IsFloat(t) || t == Decimal +} + +// Vitess data types. These are idiomatically +// named synonyms for the querypb.Type values. +// Although these constants are interchangeable, +// they should be treated as different from querypb.Type. +// Use the synonyms only to refer to the type in Value. +// For proto variables, use the querypb.Type constants +// instead. +// The following conditions are non-overlapping +// and cover all types: IsSigned(), IsUnsigned(), +// IsFloat(), IsQuoted(), Null, Decimal, Expression. +// Also, IsIntegral() == (IsSigned()||IsUnsigned()). +// TestCategory needs to be updated accordingly if +// you add a new type. +// If IsBinary or IsText is true, then IsQuoted is +// also true. But there are IsQuoted types that are +// neither binary or text. +// querypb.Type_TUPLE is not included in this list +// because it's not a valid Value type. +// TODO(sougou): provide a categorization function +// that returns enums, which will allow for cleaner +// switch statements for those who want to cover types +// by their category. +const ( + Null = querypb.Type_NULL_TYPE + Int8 = querypb.Type_INT8 + Uint8 = querypb.Type_UINT8 + Int16 = querypb.Type_INT16 + Uint16 = querypb.Type_UINT16 + Int24 = querypb.Type_INT24 + Uint24 = querypb.Type_UINT24 + Int32 = querypb.Type_INT32 + Uint32 = querypb.Type_UINT32 + Int64 = querypb.Type_INT64 + Uint64 = querypb.Type_UINT64 + Float32 = querypb.Type_FLOAT32 + Float64 = querypb.Type_FLOAT64 + Timestamp = querypb.Type_TIMESTAMP + Date = querypb.Type_DATE + Time = querypb.Type_TIME + Datetime = querypb.Type_DATETIME + Year = querypb.Type_YEAR + Decimal = querypb.Type_DECIMAL + Text = querypb.Type_TEXT + Blob = querypb.Type_BLOB + VarChar = querypb.Type_VARCHAR + VarBinary = querypb.Type_VARBINARY + Char = querypb.Type_CHAR + Binary = querypb.Type_BINARY + Bit = querypb.Type_BIT + Enum = querypb.Type_ENUM + Set = querypb.Type_SET + Geometry = querypb.Type_GEOMETRY + TypeJSON = querypb.Type_JSON + Expression = querypb.Type_EXPRESSION +) + +// bit-shift the mysql flags by two byte so we +// can merge them with the mysql or vitess types. +const ( + mysqlUnsigned = 32 + mysqlBinary = 128 + mysqlEnum = 256 + mysqlSet = 2048 +) + +// If you add to this map, make sure you add a test case +// in tabletserver/endtoend. +var mysqlToType = map[int64]querypb.Type{ + 1: Int8, + 2: Int16, + 3: Int32, + 4: Float32, + 5: Float64, + 6: Null, + 7: Timestamp, + 8: Int64, + 9: Int24, + 10: Date, + 11: Time, + 12: Datetime, + 13: Year, + 16: Bit, + 245: TypeJSON, + 246: Decimal, + 249: Text, + 250: Text, + 251: Text, + 252: Text, + 253: VarChar, + 254: Char, + 255: Geometry, +} + +// modifyType modifies the vitess type based on the +// mysql flag. The function checks specific flags based +// on the type. This allows us to ignore stray flags +// that MySQL occasionally sets. +func modifyType(typ querypb.Type, flags int64) querypb.Type { + switch typ { + case Int8: + if flags&mysqlUnsigned != 0 { + return Uint8 + } + return Int8 + case Int16: + if flags&mysqlUnsigned != 0 { + return Uint16 + } + return Int16 + case Int32: + if flags&mysqlUnsigned != 0 { + return Uint32 + } + return Int32 + case Int64: + if flags&mysqlUnsigned != 0 { + return Uint64 + } + return Int64 + case Int24: + if flags&mysqlUnsigned != 0 { + return Uint24 + } + return Int24 + case Text: + if flags&mysqlBinary != 0 { + return Blob + } + return Text + case VarChar: + if flags&mysqlBinary != 0 { + return VarBinary + } + return VarChar + case Char: + if flags&mysqlBinary != 0 { + return Binary + } + if flags&mysqlEnum != 0 { + return Enum + } + if flags&mysqlSet != 0 { + return Set + } + return Char + } + return typ +} + +// MySQLToType computes the vitess type from mysql type and flags. +func MySQLToType(mysqlType, flags int64) (typ querypb.Type, err error) { + result, ok := mysqlToType[mysqlType] + if !ok { + return 0, fmt.Errorf("unsupported type: %d", mysqlType) + } + return modifyType(result, flags), nil +} + +// typeToMySQL is the reverse of mysqlToType. +var typeToMySQL = map[querypb.Type]struct { + typ int64 + flags int64 +}{ + Int8: {typ: 1}, + Uint8: {typ: 1, flags: mysqlUnsigned}, + Int16: {typ: 2}, + Uint16: {typ: 2, flags: mysqlUnsigned}, + Int32: {typ: 3}, + Uint32: {typ: 3, flags: mysqlUnsigned}, + Float32: {typ: 4}, + Float64: {typ: 5}, + Null: {typ: 6, flags: mysqlBinary}, + Timestamp: {typ: 7}, + Int64: {typ: 8}, + Uint64: {typ: 8, flags: mysqlUnsigned}, + Int24: {typ: 9}, + Uint24: {typ: 9, flags: mysqlUnsigned}, + Date: {typ: 10, flags: mysqlBinary}, + Time: {typ: 11, flags: mysqlBinary}, + Datetime: {typ: 12, flags: mysqlBinary}, + Year: {typ: 13, flags: mysqlUnsigned}, + Bit: {typ: 16, flags: mysqlUnsigned}, + TypeJSON: {typ: 245}, + Decimal: {typ: 246}, + Text: {typ: 252}, + Blob: {typ: 252, flags: mysqlBinary}, + VarChar: {typ: 253}, + VarBinary: {typ: 253, flags: mysqlBinary}, + Char: {typ: 254}, + Binary: {typ: 254, flags: mysqlBinary}, + Enum: {typ: 254, flags: mysqlEnum}, + Set: {typ: 254, flags: mysqlSet}, + Geometry: {typ: 255}, +} + +// TypeToMySQL returns the equivalent mysql type and flag for a vitess type. +func TypeToMySQL(typ querypb.Type) (mysqlType, flags int64) { + val := typeToMySQL[typ] + return val.typ, val.flags +} diff --git a/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/value.go b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/value.go new file mode 100644 index 000000000..a3bceeef4 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/dependency/sqltypes/value.go @@ -0,0 +1,376 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package sqltypes implements interfaces and types that represent SQL values. +package sqltypes + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "strconv" + + "github.com/xwb1989/sqlparser/dependency/bytes2" + "github.com/xwb1989/sqlparser/dependency/hack" + + "github.com/xwb1989/sqlparser/dependency/querypb" +) + +var ( + // NULL represents the NULL value. + NULL = Value{} + + // DontEscape tells you if a character should not be escaped. + DontEscape = byte(255) + + nullstr = []byte("null") +) + +// BinWriter interface is used for encoding values. +// Types like bytes.Buffer conform to this interface. +// We expect the writer objects to be in-memory buffers. +// So, we don't expect the write operations to fail. +type BinWriter interface { + Write([]byte) (int, error) +} + +// Value can store any SQL value. If the value represents +// an integral type, the bytes are always stored as a cannonical +// representation that matches how MySQL returns such values. +type Value struct { + typ querypb.Type + val []byte +} + +// NewValue builds a Value using typ and val. If the value and typ +// don't match, it returns an error. +func NewValue(typ querypb.Type, val []byte) (v Value, err error) { + switch { + case IsSigned(typ): + if _, err := strconv.ParseInt(string(val), 0, 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsUnsigned(typ): + if _, err := strconv.ParseUint(string(val), 0, 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsFloat(typ) || typ == Decimal: + if _, err := strconv.ParseFloat(string(val), 64); err != nil { + return NULL, err + } + return MakeTrusted(typ, val), nil + case IsQuoted(typ) || typ == Null: + return MakeTrusted(typ, val), nil + } + // All other types are unsafe or invalid. + return NULL, fmt.Errorf("invalid type specified for MakeValue: %v", typ) +} + +// MakeTrusted makes a new Value based on the type. +// This function should only be used if you know the value +// and type conform to the rules. Every place this function is +// called, a comment is needed that explains why it's justified. +// Exceptions: The current package and mysql package do not need +// comments. Other packages can also use the function to create +// VarBinary or VarChar values. +func MakeTrusted(typ querypb.Type, val []byte) Value { + if typ == Null { + return NULL + } + return Value{typ: typ, val: val} +} + +// NewInt64 builds an Int64 Value. +func NewInt64(v int64) Value { + return MakeTrusted(Int64, strconv.AppendInt(nil, v, 10)) +} + +// NewInt32 builds an Int64 Value. +func NewInt32(v int32) Value { + return MakeTrusted(Int32, strconv.AppendInt(nil, int64(v), 10)) +} + +// NewUint64 builds an Uint64 Value. +func NewUint64(v uint64) Value { + return MakeTrusted(Uint64, strconv.AppendUint(nil, v, 10)) +} + +// NewFloat64 builds an Float64 Value. +func NewFloat64(v float64) Value { + return MakeTrusted(Float64, strconv.AppendFloat(nil, v, 'g', -1, 64)) +} + +// NewVarChar builds a VarChar Value. +func NewVarChar(v string) Value { + return MakeTrusted(VarChar, []byte(v)) +} + +// NewVarBinary builds a VarBinary Value. +// The input is a string because it's the most common use case. +func NewVarBinary(v string) Value { + return MakeTrusted(VarBinary, []byte(v)) +} + +// NewIntegral builds an integral type from a string representaion. +// The type will be Int64 or Uint64. Int64 will be preferred where possible. +func NewIntegral(val string) (n Value, err error) { + signed, err := strconv.ParseInt(val, 0, 64) + if err == nil { + return MakeTrusted(Int64, strconv.AppendInt(nil, signed, 10)), nil + } + unsigned, err := strconv.ParseUint(val, 0, 64) + if err != nil { + return Value{}, err + } + return MakeTrusted(Uint64, strconv.AppendUint(nil, unsigned, 10)), nil +} + +// InterfaceToValue builds a value from a go type. +// Supported types are nil, int64, uint64, float64, +// string and []byte. +// This function is deprecated. Use the type-specific +// functions instead. +func InterfaceToValue(goval interface{}) (Value, error) { + switch goval := goval.(type) { + case nil: + return NULL, nil + case []byte: + return MakeTrusted(VarBinary, goval), nil + case int64: + return NewInt64(goval), nil + case uint64: + return NewUint64(goval), nil + case float64: + return NewFloat64(goval), nil + case string: + return NewVarChar(goval), nil + default: + return NULL, fmt.Errorf("unexpected type %T: %v", goval, goval) + } +} + +// Type returns the type of Value. +func (v Value) Type() querypb.Type { + return v.typ +} + +// Raw returns the internal represenation of the value. For newer types, +// this may not match MySQL's representation. +func (v Value) Raw() []byte { + return v.val +} + +// ToBytes returns the value as MySQL would return it as []byte. +// In contrast, Raw returns the internal representation of the Value, which may not +// match MySQL's representation for newer types. +// If the value is not convertible like in the case of Expression, it returns nil. +func (v Value) ToBytes() []byte { + if v.typ == Expression { + return nil + } + return v.val +} + +// Len returns the length. +func (v Value) Len() int { + return len(v.val) +} + +// ToString returns the value as MySQL would return it as string. +// If the value is not convertible like in the case of Expression, it returns nil. +func (v Value) ToString() string { + if v.typ == Expression { + return "" + } + return hack.String(v.val) +} + +// String returns a printable version of the value. +func (v Value) String() string { + if v.typ == Null { + return "NULL" + } + if v.IsQuoted() { + return fmt.Sprintf("%v(%q)", v.typ, v.val) + } + return fmt.Sprintf("%v(%s)", v.typ, v.val) +} + +// EncodeSQL encodes the value into an SQL statement. Can be binary. +func (v Value) EncodeSQL(b BinWriter) { + switch { + case v.typ == Null: + b.Write(nullstr) + case v.IsQuoted(): + encodeBytesSQL(v.val, b) + default: + b.Write(v.val) + } +} + +// EncodeASCII encodes the value using 7-bit clean ascii bytes. +func (v Value) EncodeASCII(b BinWriter) { + switch { + case v.typ == Null: + b.Write(nullstr) + case v.IsQuoted(): + encodeBytesASCII(v.val, b) + default: + b.Write(v.val) + } +} + +// IsNull returns true if Value is null. +func (v Value) IsNull() bool { + return v.typ == Null +} + +// IsIntegral returns true if Value is an integral. +func (v Value) IsIntegral() bool { + return IsIntegral(v.typ) +} + +// IsSigned returns true if Value is a signed integral. +func (v Value) IsSigned() bool { + return IsSigned(v.typ) +} + +// IsUnsigned returns true if Value is an unsigned integral. +func (v Value) IsUnsigned() bool { + return IsUnsigned(v.typ) +} + +// IsFloat returns true if Value is a float. +func (v Value) IsFloat() bool { + return IsFloat(v.typ) +} + +// IsQuoted returns true if Value must be SQL-quoted. +func (v Value) IsQuoted() bool { + return IsQuoted(v.typ) +} + +// IsText returns true if Value is a collatable text. +func (v Value) IsText() bool { + return IsText(v.typ) +} + +// IsBinary returns true if Value is binary. +func (v Value) IsBinary() bool { + return IsBinary(v.typ) +} + +// MarshalJSON should only be used for testing. +// It's not a complete implementation. +func (v Value) MarshalJSON() ([]byte, error) { + switch { + case v.IsQuoted(): + return json.Marshal(v.ToString()) + case v.typ == Null: + return nullstr, nil + } + return v.val, nil +} + +// UnmarshalJSON should only be used for testing. +// It's not a complete implementation. +func (v *Value) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + return fmt.Errorf("error unmarshaling empty bytes") + } + var val interface{} + var err error + switch b[0] { + case '-': + var ival int64 + err = json.Unmarshal(b, &ival) + val = ival + case '"': + var bval []byte + err = json.Unmarshal(b, &bval) + val = bval + case 'n': // null + err = json.Unmarshal(b, &val) + default: + var uval uint64 + err = json.Unmarshal(b, &uval) + val = uval + } + if err != nil { + return err + } + *v, err = InterfaceToValue(val) + return err +} + +func encodeBytesSQL(val []byte, b BinWriter) { + buf := &bytes2.Buffer{} + buf.WriteByte('\'') + for _, ch := range val { + if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape { + buf.WriteByte(ch) + } else { + buf.WriteByte('\\') + buf.WriteByte(encodedChar) + } + } + buf.WriteByte('\'') + b.Write(buf.Bytes()) +} + +func encodeBytesASCII(val []byte, b BinWriter) { + buf := &bytes2.Buffer{} + buf.WriteByte('\'') + encoder := base64.NewEncoder(base64.StdEncoding, buf) + encoder.Write(val) + encoder.Close() + buf.WriteByte('\'') + b.Write(buf.Bytes()) +} + +// SQLEncodeMap specifies how to escape binary data with '\'. +// Complies to http://dev.mysql.com/doc/refman/5.1/en/string-syntax.html +var SQLEncodeMap [256]byte + +// SQLDecodeMap is the reverse of SQLEncodeMap +var SQLDecodeMap [256]byte + +var encodeRef = map[byte]byte{ + '\x00': '0', + '\'': '\'', + '"': '"', + '\b': 'b', + '\n': 'n', + '\r': 'r', + '\t': 't', + 26: 'Z', // ctl-Z + '\\': '\\', +} + +func init() { + for i := range SQLEncodeMap { + SQLEncodeMap[i] = DontEscape + SQLDecodeMap[i] = DontEscape + } + for i := range SQLEncodeMap { + if to, ok := encodeRef[byte(i)]; ok { + SQLEncodeMap[byte(i)] = to + SQLDecodeMap[to] = byte(i) + } + } +} diff --git a/vendor/github.com/xwb1989/sqlparser/encodable.go b/vendor/github.com/xwb1989/sqlparser/encodable.go new file mode 100644 index 000000000..badfa42dd --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/encodable.go @@ -0,0 +1,99 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + + "github.com/xwb1989/sqlparser/dependency/sqltypes" +) + +// This file contains types that are 'Encodable'. + +// Encodable defines the interface for types that can +// be custom-encoded into SQL. +type Encodable interface { + EncodeSQL(buf *bytes.Buffer) +} + +// InsertValues is a custom SQL encoder for the values of +// an insert statement. +type InsertValues [][]sqltypes.Value + +// EncodeSQL performs the SQL encoding for InsertValues. +func (iv InsertValues) EncodeSQL(buf *bytes.Buffer) { + for i, rows := range iv { + if i != 0 { + buf.WriteString(", ") + } + buf.WriteByte('(') + for j, bv := range rows { + if j != 0 { + buf.WriteString(", ") + } + bv.EncodeSQL(buf) + } + buf.WriteByte(')') + } +} + +// TupleEqualityList is for generating equality constraints +// for tables that have composite primary keys. +type TupleEqualityList struct { + Columns []ColIdent + Rows [][]sqltypes.Value +} + +// EncodeSQL generates the where clause constraints for the tuple +// equality. +func (tpl *TupleEqualityList) EncodeSQL(buf *bytes.Buffer) { + if len(tpl.Columns) == 1 { + tpl.encodeAsIn(buf) + return + } + tpl.encodeAsEquality(buf) +} + +func (tpl *TupleEqualityList) encodeAsIn(buf *bytes.Buffer) { + Append(buf, tpl.Columns[0]) + buf.WriteString(" in (") + for i, r := range tpl.Rows { + if i != 0 { + buf.WriteString(", ") + } + r[0].EncodeSQL(buf) + } + buf.WriteByte(')') +} + +func (tpl *TupleEqualityList) encodeAsEquality(buf *bytes.Buffer) { + for i, r := range tpl.Rows { + if i != 0 { + buf.WriteString(" or ") + } + buf.WriteString("(") + for j, c := range tpl.Columns { + if j != 0 { + buf.WriteString(" and ") + } + Append(buf, c) + buf.WriteString(" = ") + r[j].EncodeSQL(buf) + } + buf.WriteByte(')') + } +} diff --git a/vendor/github.com/xwb1989/sqlparser/impossible_query.go b/vendor/github.com/xwb1989/sqlparser/impossible_query.go new file mode 100644 index 000000000..1179b6112 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/impossible_query.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreedto in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +// FormatImpossibleQuery creates an impossible query in a TrackedBuffer. +// An impossible query is a modified version of a query where all selects have where clauses that are +// impossible for mysql to resolve. This is used in the vtgate and vttablet: +// +// - In the vtgate it's used for joins: if the first query returns no result, then vtgate uses the impossible +// query just to fetch field info from vttablet +// - In the vttablet, it's just an optimization: the field info is fetched once form MySQL, cached and reused +// for subsequent queries +func FormatImpossibleQuery(buf *TrackedBuffer, node SQLNode) { + switch node := node.(type) { + case *Select: + buf.Myprintf("select %v from %v where 1 != 1", node.SelectExprs, node.From) + if node.GroupBy != nil { + node.GroupBy.Format(buf) + } + case *Union: + buf.Myprintf("%v %s %v", node.Left, node.Type, node.Right) + default: + node.Format(buf) + } +} diff --git a/vendor/github.com/xwb1989/sqlparser/normalizer.go b/vendor/github.com/xwb1989/sqlparser/normalizer.go new file mode 100644 index 000000000..f6520316a --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/normalizer.go @@ -0,0 +1,224 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "fmt" + + "github.com/xwb1989/sqlparser/dependency/sqltypes" + + "github.com/xwb1989/sqlparser/dependency/querypb" +) + +// Normalize changes the statement to use bind values, and +// updates the bind vars to those values. The supplied prefix +// is used to generate the bind var names. The function ensures +// that there are no collisions with existing bind vars. +// Within Select constructs, bind vars are deduped. This allows +// us to identify vindex equality. Otherwise, every value is +// treated as distinct. +func Normalize(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) { + nz := newNormalizer(stmt, bindVars, prefix) + _ = Walk(nz.WalkStatement, stmt) +} + +type normalizer struct { + stmt Statement + bindVars map[string]*querypb.BindVariable + prefix string + reserved map[string]struct{} + counter int + vals map[string]string +} + +func newNormalizer(stmt Statement, bindVars map[string]*querypb.BindVariable, prefix string) *normalizer { + return &normalizer{ + stmt: stmt, + bindVars: bindVars, + prefix: prefix, + reserved: GetBindvars(stmt), + counter: 1, + vals: make(map[string]string), + } +} + +// WalkStatement is the top level walk function. +// If it encounters a Select, it switches to a mode +// where variables are deduped. +func (nz *normalizer) WalkStatement(node SQLNode) (bool, error) { + switch node := node.(type) { + case *Select: + _ = Walk(nz.WalkSelect, node) + // Don't continue + return false, nil + case *SQLVal: + nz.convertSQLVal(node) + case *ComparisonExpr: + nz.convertComparison(node) + } + return true, nil +} + +// WalkSelect normalizes the AST in Select mode. +func (nz *normalizer) WalkSelect(node SQLNode) (bool, error) { + switch node := node.(type) { + case *SQLVal: + nz.convertSQLValDedup(node) + case *ComparisonExpr: + nz.convertComparison(node) + } + return true, nil +} + +func (nz *normalizer) convertSQLValDedup(node *SQLVal) { + // If value is too long, don't dedup. + // Such values are most likely not for vindexes. + // We save a lot of CPU because we avoid building + // the key for them. + if len(node.Val) > 256 { + nz.convertSQLVal(node) + return + } + + // Make the bindvar + bval := nz.sqlToBindvar(node) + if bval == nil { + return + } + + // Check if there's a bindvar for that value already. + var key string + if bval.Type == sqltypes.VarBinary { + // Prefixing strings with "'" ensures that a string + // and number that have the same representation don't + // collide. + key = "'" + string(node.Val) + } else { + key = string(node.Val) + } + bvname, ok := nz.vals[key] + if !ok { + // If there's no such bindvar, make a new one. + bvname = nz.newName() + nz.vals[key] = bvname + nz.bindVars[bvname] = bval + } + + // Modify the AST node to a bindvar. + node.Type = ValArg + node.Val = append([]byte(":"), bvname...) +} + +// convertSQLVal converts an SQLVal without the dedup. +func (nz *normalizer) convertSQLVal(node *SQLVal) { + bval := nz.sqlToBindvar(node) + if bval == nil { + return + } + + bvname := nz.newName() + nz.bindVars[bvname] = bval + + node.Type = ValArg + node.Val = append([]byte(":"), bvname...) +} + +// convertComparison attempts to convert IN clauses to +// use the list bind var construct. If it fails, it returns +// with no change made. The walk function will then continue +// and iterate on converting each individual value into separate +// bind vars. +func (nz *normalizer) convertComparison(node *ComparisonExpr) { + if node.Operator != InStr && node.Operator != NotInStr { + return + } + tupleVals, ok := node.Right.(ValTuple) + if !ok { + return + } + // The RHS is a tuple of values. + // Make a list bindvar. + bvals := &querypb.BindVariable{ + Type: querypb.Type_TUPLE, + } + for _, val := range tupleVals { + bval := nz.sqlToBindvar(val) + if bval == nil { + return + } + bvals.Values = append(bvals.Values, &querypb.Value{ + Type: bval.Type, + Value: bval.Value, + }) + } + bvname := nz.newName() + nz.bindVars[bvname] = bvals + // Modify RHS to be a list bindvar. + node.Right = ListArg(append([]byte("::"), bvname...)) +} + +func (nz *normalizer) sqlToBindvar(node SQLNode) *querypb.BindVariable { + if node, ok := node.(*SQLVal); ok { + var v sqltypes.Value + var err error + switch node.Type { + case StrVal: + v, err = sqltypes.NewValue(sqltypes.VarBinary, node.Val) + case IntVal: + v, err = sqltypes.NewValue(sqltypes.Int64, node.Val) + case FloatVal: + v, err = sqltypes.NewValue(sqltypes.Float64, node.Val) + default: + return nil + } + if err != nil { + return nil + } + return sqltypes.ValueBindVariable(v) + } + return nil +} + +func (nz *normalizer) newName() string { + for { + newName := fmt.Sprintf("%s%d", nz.prefix, nz.counter) + if _, ok := nz.reserved[newName]; !ok { + nz.reserved[newName] = struct{}{} + return newName + } + nz.counter++ + } +} + +// GetBindvars returns a map of the bind vars referenced in the statement. +// TODO(sougou); This function gets called again from vtgate/planbuilder. +// Ideally, this should be done only once. +func GetBindvars(stmt Statement) map[string]struct{} { + bindvars := make(map[string]struct{}) + _ = Walk(func(node SQLNode) (kontinue bool, err error) { + switch node := node.(type) { + case *SQLVal: + if node.Type == ValArg { + bindvars[string(node.Val[1:])] = struct{}{} + } + case ListArg: + bindvars[string(node[2:])] = struct{}{} + } + return true, nil + }, stmt) + return bindvars +} diff --git a/vendor/github.com/xwb1989/sqlparser/parsed_query.go b/vendor/github.com/xwb1989/sqlparser/parsed_query.go new file mode 100644 index 000000000..cf4ef03b6 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/parsed_query.go @@ -0,0 +1,119 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "fmt" + + "github.com/xwb1989/sqlparser/dependency/querypb" + "github.com/xwb1989/sqlparser/dependency/sqltypes" +) + +// ParsedQuery represents a parsed query where +// bind locations are precompued for fast substitutions. +type ParsedQuery struct { + Query string + bindLocations []bindLocation +} + +type bindLocation struct { + offset, length int +} + +// NewParsedQuery returns a ParsedQuery of the ast. +func NewParsedQuery(node SQLNode) *ParsedQuery { + buf := NewTrackedBuffer(nil) + buf.Myprintf("%v", node) + return buf.ParsedQuery() +} + +// GenerateQuery generates a query by substituting the specified +// bindVariables. The extras parameter specifies special parameters +// that can perform custom encoding. +func (pq *ParsedQuery) GenerateQuery(bindVariables map[string]*querypb.BindVariable, extras map[string]Encodable) ([]byte, error) { + if len(pq.bindLocations) == 0 { + return []byte(pq.Query), nil + } + buf := bytes.NewBuffer(make([]byte, 0, len(pq.Query))) + current := 0 + for _, loc := range pq.bindLocations { + buf.WriteString(pq.Query[current:loc.offset]) + name := pq.Query[loc.offset : loc.offset+loc.length] + if encodable, ok := extras[name[1:]]; ok { + encodable.EncodeSQL(buf) + } else { + supplied, _, err := FetchBindVar(name, bindVariables) + if err != nil { + return nil, err + } + EncodeValue(buf, supplied) + } + current = loc.offset + loc.length + } + buf.WriteString(pq.Query[current:]) + return buf.Bytes(), nil +} + +// EncodeValue encodes one bind variable value into the query. +func EncodeValue(buf *bytes.Buffer, value *querypb.BindVariable) { + if value.Type != querypb.Type_TUPLE { + // Since we already check for TUPLE, we don't expect an error. + v, _ := sqltypes.BindVariableToValue(value) + v.EncodeSQL(buf) + return + } + + // It's a TUPLE. + buf.WriteByte('(') + for i, bv := range value.Values { + if i != 0 { + buf.WriteString(", ") + } + sqltypes.ProtoToValue(bv).EncodeSQL(buf) + } + buf.WriteByte(')') +} + +// FetchBindVar resolves the bind variable by fetching it from bindVariables. +func FetchBindVar(name string, bindVariables map[string]*querypb.BindVariable) (val *querypb.BindVariable, isList bool, err error) { + name = name[1:] + if name[0] == ':' { + name = name[1:] + isList = true + } + supplied, ok := bindVariables[name] + if !ok { + return nil, false, fmt.Errorf("missing bind var %s", name) + } + + if isList { + if supplied.Type != querypb.Type_TUPLE { + return nil, false, fmt.Errorf("unexpected list arg type (%v) for key %s", supplied.Type, name) + } + if len(supplied.Values) == 0 { + return nil, false, fmt.Errorf("empty list supplied for %s", name) + } + return supplied, true, nil + } + + if supplied.Type == querypb.Type_TUPLE { + return nil, false, fmt.Errorf("unexpected arg type (TUPLE) for non-list key %s", name) + } + + return supplied, false, nil +} diff --git a/vendor/github.com/xwb1989/sqlparser/redact_query.go b/vendor/github.com/xwb1989/sqlparser/redact_query.go new file mode 100644 index 000000000..a50b9bcd3 --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/redact_query.go @@ -0,0 +1,19 @@ +package sqlparser + +import querypb "github.com/xwb1989/sqlparser/dependency/querypb" + +// RedactSQLQuery returns a sql string with the params stripped out for display +func RedactSQLQuery(sql string) (string, error) { + bv := map[string]*querypb.BindVariable{} + sqlStripped, comments := SplitMarginComments(sql) + + stmt, err := Parse(sqlStripped) + if err != nil { + return "", err + } + + prefix := "redacted" + Normalize(stmt, bv, prefix) + + return comments.Leading + String(stmt) + comments.Trailing, nil +} diff --git a/vendor/github.com/xwb1989/sqlparser/sql.go b/vendor/github.com/xwb1989/sqlparser/sql.go new file mode 100644 index 000000000..4c8f2657f --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/sql.go @@ -0,0 +1,6136 @@ +//line sql.y:18 +package sqlparser + +import __yyfmt__ "fmt" + +//line sql.y:18 +func setParseTree(yylex interface{}, stmt Statement) { + yylex.(*Tokenizer).ParseTree = stmt +} + +func setAllowComments(yylex interface{}, allow bool) { + yylex.(*Tokenizer).AllowComments = allow +} + +func setDDL(yylex interface{}, ddl *DDL) { + yylex.(*Tokenizer).partialDDL = ddl +} + +func incNesting(yylex interface{}) bool { + yylex.(*Tokenizer).nesting++ + if yylex.(*Tokenizer).nesting == 200 { + return true + } + return false +} + +func decNesting(yylex interface{}) { + yylex.(*Tokenizer).nesting-- +} + +// forceEOF forces the lexer to end prematurely. Not all SQL statements +// are supported by the Parser, thus calling forceEOF will make the lexer +// return EOF early. +func forceEOF(yylex interface{}) { + yylex.(*Tokenizer).ForceEOF = true +} + +//line sql.y:53 +type yySymType struct { + yys int + empty struct{} + statement Statement + selStmt SelectStatement + ddl *DDL + ins *Insert + byt byte + bytes []byte + bytes2 [][]byte + str string + strs []string + selectExprs SelectExprs + selectExpr SelectExpr + columns Columns + partitions Partitions + colName *ColName + tableExprs TableExprs + tableExpr TableExpr + joinCondition JoinCondition + tableName TableName + tableNames TableNames + indexHints *IndexHints + expr Expr + exprs Exprs + boolVal BoolVal + colTuple ColTuple + values Values + valTuple ValTuple + subquery *Subquery + whens []*When + when *When + orderBy OrderBy + order *Order + limit *Limit + updateExprs UpdateExprs + setExprs SetExprs + updateExpr *UpdateExpr + setExpr *SetExpr + colIdent ColIdent + tableIdent TableIdent + convertType *ConvertType + aliasedTableName *AliasedTableExpr + TableSpec *TableSpec + columnType ColumnType + colKeyOpt ColumnKeyOption + optVal *SQLVal + LengthScaleOption LengthScaleOption + columnDefinition *ColumnDefinition + indexDefinition *IndexDefinition + indexInfo *IndexInfo + indexOption *IndexOption + indexOptions []*IndexOption + indexColumn *IndexColumn + indexColumns []*IndexColumn + partDefs []*PartitionDefinition + partDef *PartitionDefinition + partSpec *PartitionSpec + vindexParam VindexParam + vindexParams []VindexParam + showFilter *ShowFilter +} + +const LEX_ERROR = 57346 +const UNION = 57347 +const SELECT = 57348 +const STREAM = 57349 +const INSERT = 57350 +const UPDATE = 57351 +const DELETE = 57352 +const FROM = 57353 +const WHERE = 57354 +const GROUP = 57355 +const HAVING = 57356 +const ORDER = 57357 +const BY = 57358 +const LIMIT = 57359 +const OFFSET = 57360 +const FOR = 57361 +const ALL = 57362 +const DISTINCT = 57363 +const AS = 57364 +const EXISTS = 57365 +const ASC = 57366 +const DESC = 57367 +const INTO = 57368 +const DUPLICATE = 57369 +const KEY = 57370 +const DEFAULT = 57371 +const SET = 57372 +const LOCK = 57373 +const KEYS = 57374 +const VALUES = 57375 +const LAST_INSERT_ID = 57376 +const NEXT = 57377 +const VALUE = 57378 +const SHARE = 57379 +const MODE = 57380 +const SQL_NO_CACHE = 57381 +const SQL_CACHE = 57382 +const JOIN = 57383 +const STRAIGHT_JOIN = 57384 +const LEFT = 57385 +const RIGHT = 57386 +const INNER = 57387 +const OUTER = 57388 +const CROSS = 57389 +const NATURAL = 57390 +const USE = 57391 +const FORCE = 57392 +const ON = 57393 +const USING = 57394 +const ID = 57395 +const HEX = 57396 +const STRING = 57397 +const INTEGRAL = 57398 +const FLOAT = 57399 +const HEXNUM = 57400 +const VALUE_ARG = 57401 +const LIST_ARG = 57402 +const COMMENT = 57403 +const COMMENT_KEYWORD = 57404 +const BIT_LITERAL = 57405 +const NULL = 57406 +const TRUE = 57407 +const FALSE = 57408 +const OR = 57409 +const AND = 57410 +const NOT = 57411 +const BETWEEN = 57412 +const CASE = 57413 +const WHEN = 57414 +const THEN = 57415 +const ELSE = 57416 +const END = 57417 +const LE = 57418 +const GE = 57419 +const NE = 57420 +const NULL_SAFE_EQUAL = 57421 +const IS = 57422 +const LIKE = 57423 +const REGEXP = 57424 +const IN = 57425 +const SHIFT_LEFT = 57426 +const SHIFT_RIGHT = 57427 +const DIV = 57428 +const MOD = 57429 +const UNARY = 57430 +const COLLATE = 57431 +const BINARY = 57432 +const UNDERSCORE_BINARY = 57433 +const INTERVAL = 57434 +const JSON_EXTRACT_OP = 57435 +const JSON_UNQUOTE_EXTRACT_OP = 57436 +const CREATE = 57437 +const ALTER = 57438 +const DROP = 57439 +const RENAME = 57440 +const ANALYZE = 57441 +const ADD = 57442 +const SCHEMA = 57443 +const TABLE = 57444 +const INDEX = 57445 +const VIEW = 57446 +const TO = 57447 +const IGNORE = 57448 +const IF = 57449 +const UNIQUE = 57450 +const PRIMARY = 57451 +const COLUMN = 57452 +const CONSTRAINT = 57453 +const SPATIAL = 57454 +const FULLTEXT = 57455 +const FOREIGN = 57456 +const KEY_BLOCK_SIZE = 57457 +const SHOW = 57458 +const DESCRIBE = 57459 +const EXPLAIN = 57460 +const DATE = 57461 +const ESCAPE = 57462 +const REPAIR = 57463 +const OPTIMIZE = 57464 +const TRUNCATE = 57465 +const MAXVALUE = 57466 +const PARTITION = 57467 +const REORGANIZE = 57468 +const LESS = 57469 +const THAN = 57470 +const PROCEDURE = 57471 +const TRIGGER = 57472 +const VINDEX = 57473 +const VINDEXES = 57474 +const STATUS = 57475 +const VARIABLES = 57476 +const BEGIN = 57477 +const START = 57478 +const TRANSACTION = 57479 +const COMMIT = 57480 +const ROLLBACK = 57481 +const BIT = 57482 +const TINYINT = 57483 +const SMALLINT = 57484 +const MEDIUMINT = 57485 +const INT = 57486 +const INTEGER = 57487 +const BIGINT = 57488 +const INTNUM = 57489 +const REAL = 57490 +const DOUBLE = 57491 +const FLOAT_TYPE = 57492 +const DECIMAL = 57493 +const NUMERIC = 57494 +const TIME = 57495 +const TIMESTAMP = 57496 +const DATETIME = 57497 +const YEAR = 57498 +const CHAR = 57499 +const VARCHAR = 57500 +const BOOL = 57501 +const CHARACTER = 57502 +const VARBINARY = 57503 +const NCHAR = 57504 +const TEXT = 57505 +const TINYTEXT = 57506 +const MEDIUMTEXT = 57507 +const LONGTEXT = 57508 +const BLOB = 57509 +const TINYBLOB = 57510 +const MEDIUMBLOB = 57511 +const LONGBLOB = 57512 +const JSON = 57513 +const ENUM = 57514 +const GEOMETRY = 57515 +const POINT = 57516 +const LINESTRING = 57517 +const POLYGON = 57518 +const GEOMETRYCOLLECTION = 57519 +const MULTIPOINT = 57520 +const MULTILINESTRING = 57521 +const MULTIPOLYGON = 57522 +const NULLX = 57523 +const AUTO_INCREMENT = 57524 +const APPROXNUM = 57525 +const SIGNED = 57526 +const UNSIGNED = 57527 +const ZEROFILL = 57528 +const DATABASES = 57529 +const TABLES = 57530 +const VITESS_KEYSPACES = 57531 +const VITESS_SHARDS = 57532 +const VITESS_TABLETS = 57533 +const VSCHEMA_TABLES = 57534 +const EXTENDED = 57535 +const FULL = 57536 +const PROCESSLIST = 57537 +const NAMES = 57538 +const CHARSET = 57539 +const GLOBAL = 57540 +const SESSION = 57541 +const ISOLATION = 57542 +const LEVEL = 57543 +const READ = 57544 +const WRITE = 57545 +const ONLY = 57546 +const REPEATABLE = 57547 +const COMMITTED = 57548 +const UNCOMMITTED = 57549 +const SERIALIZABLE = 57550 +const CURRENT_TIMESTAMP = 57551 +const DATABASE = 57552 +const CURRENT_DATE = 57553 +const CURRENT_TIME = 57554 +const LOCALTIME = 57555 +const LOCALTIMESTAMP = 57556 +const UTC_DATE = 57557 +const UTC_TIME = 57558 +const UTC_TIMESTAMP = 57559 +const REPLACE = 57560 +const CONVERT = 57561 +const CAST = 57562 +const SUBSTR = 57563 +const SUBSTRING = 57564 +const GROUP_CONCAT = 57565 +const SEPARATOR = 57566 +const MATCH = 57567 +const AGAINST = 57568 +const BOOLEAN = 57569 +const LANGUAGE = 57570 +const WITH = 57571 +const QUERY = 57572 +const EXPANSION = 57573 +const UNUSED = 57574 + +var yyToknames = [...]string{ + "$end", + "error", + "$unk", + "LEX_ERROR", + "UNION", + "SELECT", + "STREAM", + "INSERT", + "UPDATE", + "DELETE", + "FROM", + "WHERE", + "GROUP", + "HAVING", + "ORDER", + "BY", + "LIMIT", + "OFFSET", + "FOR", + "ALL", + "DISTINCT", + "AS", + "EXISTS", + "ASC", + "DESC", + "INTO", + "DUPLICATE", + "KEY", + "DEFAULT", + "SET", + "LOCK", + "KEYS", + "VALUES", + "LAST_INSERT_ID", + "NEXT", + "VALUE", + "SHARE", + "MODE", + "SQL_NO_CACHE", + "SQL_CACHE", + "JOIN", + "STRAIGHT_JOIN", + "LEFT", + "RIGHT", + "INNER", + "OUTER", + "CROSS", + "NATURAL", + "USE", + "FORCE", + "ON", + "USING", + "'('", + "','", + "')'", + "ID", + "HEX", + "STRING", + "INTEGRAL", + "FLOAT", + "HEXNUM", + "VALUE_ARG", + "LIST_ARG", + "COMMENT", + "COMMENT_KEYWORD", + "BIT_LITERAL", + "NULL", + "TRUE", + "FALSE", + "OR", + "AND", + "NOT", + "'!'", + "BETWEEN", + "CASE", + "WHEN", + "THEN", + "ELSE", + "END", + "'='", + "'<'", + "'>'", + "LE", + "GE", + "NE", + "NULL_SAFE_EQUAL", + "IS", + "LIKE", + "REGEXP", + "IN", + "'|'", + "'&'", + "SHIFT_LEFT", + "SHIFT_RIGHT", + "'+'", + "'-'", + "'*'", + "'/'", + "DIV", + "'%'", + "MOD", + "'^'", + "'~'", + "UNARY", + "COLLATE", + "BINARY", + "UNDERSCORE_BINARY", + "INTERVAL", + "'.'", + "JSON_EXTRACT_OP", + "JSON_UNQUOTE_EXTRACT_OP", + "CREATE", + "ALTER", + "DROP", + "RENAME", + "ANALYZE", + "ADD", + "SCHEMA", + "TABLE", + "INDEX", + "VIEW", + "TO", + "IGNORE", + "IF", + "UNIQUE", + "PRIMARY", + "COLUMN", + "CONSTRAINT", + "SPATIAL", + "FULLTEXT", + "FOREIGN", + "KEY_BLOCK_SIZE", + "SHOW", + "DESCRIBE", + "EXPLAIN", + "DATE", + "ESCAPE", + "REPAIR", + "OPTIMIZE", + "TRUNCATE", + "MAXVALUE", + "PARTITION", + "REORGANIZE", + "LESS", + "THAN", + "PROCEDURE", + "TRIGGER", + "VINDEX", + "VINDEXES", + "STATUS", + "VARIABLES", + "BEGIN", + "START", + "TRANSACTION", + "COMMIT", + "ROLLBACK", + "BIT", + "TINYINT", + "SMALLINT", + "MEDIUMINT", + "INT", + "INTEGER", + "BIGINT", + "INTNUM", + "REAL", + "DOUBLE", + "FLOAT_TYPE", + "DECIMAL", + "NUMERIC", + "TIME", + "TIMESTAMP", + "DATETIME", + "YEAR", + "CHAR", + "VARCHAR", + "BOOL", + "CHARACTER", + "VARBINARY", + "NCHAR", + "TEXT", + "TINYTEXT", + "MEDIUMTEXT", + "LONGTEXT", + "BLOB", + "TINYBLOB", + "MEDIUMBLOB", + "LONGBLOB", + "JSON", + "ENUM", + "GEOMETRY", + "POINT", + "LINESTRING", + "POLYGON", + "GEOMETRYCOLLECTION", + "MULTIPOINT", + "MULTILINESTRING", + "MULTIPOLYGON", + "NULLX", + "AUTO_INCREMENT", + "APPROXNUM", + "SIGNED", + "UNSIGNED", + "ZEROFILL", + "DATABASES", + "TABLES", + "VITESS_KEYSPACES", + "VITESS_SHARDS", + "VITESS_TABLETS", + "VSCHEMA_TABLES", + "EXTENDED", + "FULL", + "PROCESSLIST", + "NAMES", + "CHARSET", + "GLOBAL", + "SESSION", + "ISOLATION", + "LEVEL", + "READ", + "WRITE", + "ONLY", + "REPEATABLE", + "COMMITTED", + "UNCOMMITTED", + "SERIALIZABLE", + "CURRENT_TIMESTAMP", + "DATABASE", + "CURRENT_DATE", + "CURRENT_TIME", + "LOCALTIME", + "LOCALTIMESTAMP", + "UTC_DATE", + "UTC_TIME", + "UTC_TIMESTAMP", + "REPLACE", + "CONVERT", + "CAST", + "SUBSTR", + "SUBSTRING", + "GROUP_CONCAT", + "SEPARATOR", + "MATCH", + "AGAINST", + "BOOLEAN", + "LANGUAGE", + "WITH", + "QUERY", + "EXPANSION", + "UNUSED", + "';'", +} +var yyStatenames = [...]string{} + +const yyEofCode = 1 +const yyErrCode = 2 +const yyInitialStackSize = 16 + +//line yacctab:1 +var yyExca = [...]int{ + -1, 1, + 1, -1, + -2, 0, + -1, 3, + 5, 27, + -2, 4, + -1, 36, + 150, 263, + 151, 263, + -2, 253, + -1, 238, + 109, 587, + -2, 583, + -1, 239, + 109, 588, + -2, 584, + -1, 308, + 80, 746, + -2, 58, + -1, 309, + 80, 707, + -2, 59, + -1, 314, + 80, 691, + -2, 549, + -1, 316, + 80, 728, + -2, 551, + -1, 575, + 52, 41, + 54, 41, + -2, 43, + -1, 708, + 109, 590, + -2, 586, + -1, 912, + 5, 28, + -2, 395, + -1, 937, + 5, 27, + -2, 524, + -1, 1161, + 5, 28, + -2, 525, + -1, 1205, + 5, 27, + -2, 527, + -1, 1267, + 5, 28, + -2, 528, +} + +const yyPrivate = 57344 + +const yyLast = 10949 + +var yyAct = [...]int{ + + 269, 47, 770, 854, 1258, 522, 646, 1069, 1216, 1097, + 810, 1070, 268, 521, 3, 243, 1167, 834, 998, 788, + 806, 217, 569, 1066, 809, 53, 848, 771, 956, 1043, + 743, 567, 313, 904, 989, 945, 211, 410, 585, 710, + 455, 940, 733, 1001, 820, 740, 461, 584, 47, 844, + 307, 556, 571, 767, 294, 759, 222, 467, 475, 886, + 241, 216, 299, 226, 52, 304, 1287, 1277, 1285, 1265, + 302, 1283, 855, 442, 1276, 1061, 1155, 414, 435, 1225, + 212, 213, 214, 215, 295, 871, 1103, 1104, 1105, 1091, + 230, 1092, 1093, 293, 1108, 1106, 586, 1264, 587, 870, + 181, 177, 178, 179, 57, 536, 1240, 488, 487, 497, + 498, 490, 491, 492, 493, 494, 495, 496, 489, 964, + 801, 499, 963, 802, 803, 965, 875, 245, 450, 59, + 60, 61, 62, 63, 980, 869, 827, 310, 675, 1179, + 423, 437, 1194, 439, 835, 676, 1144, 1142, 210, 1284, + 232, 446, 447, 1282, 1259, 1022, 768, 424, 417, 1217, + 1044, 1223, 174, 175, 175, 822, 654, 239, 436, 438, + 420, 645, 1219, 822, 789, 791, 955, 954, 953, 412, + 189, 176, 1245, 866, 863, 864, 1164, 862, 1030, 298, + 1046, 511, 512, 974, 441, 441, 441, 441, 76, 441, + 920, 1019, 186, 898, 682, 186, 441, 1021, 479, 180, + 430, 1112, 873, 876, 489, 807, 822, 499, 499, 881, + 679, 1026, 1048, 47, 1052, 742, 1047, 472, 1045, 186, + 186, 76, 1009, 1050, 474, 186, 464, 76, 508, 1218, + 1250, 510, 1049, 474, 411, 1122, 463, 868, 790, 434, + 473, 472, 943, 588, 1107, 1051, 1053, 821, 1224, 1222, + 1007, 1113, 1241, 835, 828, 821, 1063, 474, 520, 867, + 524, 525, 526, 527, 528, 529, 530, 531, 532, 760, + 535, 537, 537, 537, 537, 537, 537, 537, 537, 545, + 546, 547, 548, 1263, 760, 649, 927, 1025, 882, 1020, + 568, 1018, 685, 686, 1253, 978, 872, 469, 821, 416, + 1009, 717, 236, 819, 817, 1269, 1185, 818, 1184, 874, + 426, 427, 428, 453, 1008, 715, 716, 714, 465, 1013, + 1010, 1003, 1004, 1011, 1006, 1005, 993, 50, 1007, 992, + 824, 916, 186, 915, 186, 825, 1012, 713, 473, 472, + 186, 981, 1015, 700, 702, 703, 917, 186, 701, 473, + 472, 76, 76, 76, 76, 474, 76, 173, 509, 458, + 462, 582, 576, 76, 1270, 1251, 474, 473, 472, 310, + 1201, 418, 419, 734, 1065, 735, 480, 538, 539, 540, + 541, 542, 543, 544, 474, 895, 896, 897, 1182, 1130, + 76, 990, 1008, 1248, 473, 472, 1100, 1013, 1010, 1003, + 1004, 1011, 1006, 1005, 441, 1273, 454, 1209, 1256, 454, + 523, 474, 441, 1099, 1012, 1209, 454, 298, 292, 534, + 1002, 1209, 1210, 441, 441, 441, 441, 441, 441, 441, + 441, 1176, 1175, 1088, 454, 1163, 454, 441, 441, 487, + 497, 498, 490, 491, 492, 493, 494, 495, 496, 489, + 186, 1229, 499, 663, 1119, 1118, 267, 186, 186, 186, + 1115, 1116, 1228, 76, 1115, 1114, 1151, 454, 1109, 76, + 910, 454, 50, 975, 966, 687, 857, 553, 454, 941, + 661, 745, 454, 21, 736, 711, 660, 74, 497, 498, + 490, 491, 492, 493, 494, 495, 496, 489, 659, 650, + 499, 47, 708, 488, 487, 497, 498, 490, 491, 492, + 493, 494, 495, 496, 489, 524, 648, 499, 643, 689, + 312, 595, 594, 942, 432, 704, 415, 425, 747, 706, + 411, 1067, 579, 681, 941, 752, 755, 54, 745, 221, + 23, 761, 1159, 707, 299, 299, 299, 299, 299, 513, + 514, 515, 516, 517, 518, 519, 23, 553, 772, 568, + 1121, 792, 737, 738, 935, 553, 942, 936, 299, 680, + 1117, 76, 747, 580, 967, 578, 910, 186, 186, 76, + 757, 186, 764, 1204, 186, 473, 472, 50, 186, 23, + 76, 76, 76, 76, 76, 76, 76, 76, 773, 796, + 712, 776, 474, 50, 76, 76, 785, 552, 941, 186, + 795, 1033, 578, 697, 698, 793, 836, 837, 838, 794, + 922, 799, 798, 814, 76, 800, 910, 919, 186, 774, + 775, 553, 777, 581, 76, 683, 50, 1189, 829, 441, + 849, 441, 1082, 223, 970, 310, 946, 947, 647, 441, + 312, 312, 312, 312, 910, 312, 845, 840, 811, 850, + 839, 65, 312, 921, 852, 523, 1102, 952, 750, 751, + 918, 298, 298, 298, 298, 298, 1067, 76, 994, 846, + 847, 558, 561, 562, 563, 559, 298, 560, 564, 477, + 50, 946, 947, 949, 657, 298, 451, 695, 899, 492, + 493, 494, 495, 496, 489, 708, 951, 499, 186, 779, + 778, 186, 186, 186, 186, 186, 782, 711, 888, 780, + 887, 783, 883, 186, 781, 784, 186, 562, 563, 805, + 186, 227, 228, 1281, 1275, 186, 186, 1029, 1280, 76, + 468, 748, 749, 893, 900, 892, 707, 756, 456, 1157, + 985, 593, 76, 433, 466, 977, 1255, 1190, 938, 939, + 457, 763, 312, 765, 766, 1254, 1202, 971, 590, 859, + 656, 937, 258, 257, 260, 261, 262, 263, 688, 566, + 468, 259, 264, 218, 709, 1234, 299, 718, 719, 720, + 721, 722, 723, 724, 725, 726, 727, 728, 729, 730, + 731, 732, 950, 186, 926, 219, 76, 54, 76, 1233, + 968, 891, 186, 224, 225, 186, 76, 959, 1192, 890, + 942, 884, 885, 470, 462, 1242, 958, 1180, 960, 961, + 678, 56, 712, 58, 577, 744, 746, 51, 982, 983, + 1, 856, 997, 865, 441, 984, 1257, 986, 987, 988, + 1215, 762, 1096, 972, 973, 816, 808, 409, 830, 831, + 832, 833, 64, 1249, 815, 991, 1221, 1178, 823, 441, + 312, 979, 826, 811, 841, 842, 843, 1101, 312, 1252, + 976, 787, 600, 598, 599, 597, 911, 602, 1000, 312, + 312, 312, 312, 312, 312, 312, 312, 1014, 601, 596, + 197, 928, 305, 312, 312, 558, 561, 562, 563, 559, + 894, 560, 564, 298, 565, 589, 851, 471, 66, 999, + 1017, 1016, 861, 691, 1024, 1037, 674, 1072, 1068, 47, + 880, 1042, 449, 477, 199, 1036, 312, 1055, 507, 1071, + 1062, 772, 1073, 708, 1084, 1085, 1086, 772, 889, 1054, + 76, 962, 311, 186, 1074, 684, 1077, 909, 1076, 1078, + 460, 1232, 1035, 1191, 925, 533, 758, 76, 244, 699, + 1095, 256, 253, 924, 1089, 255, 739, 254, 690, 934, + 481, 242, 1094, 234, 1058, 297, 753, 753, 549, 557, + 555, 1090, 753, 1110, 1111, 490, 491, 492, 493, 494, + 495, 496, 489, 554, 948, 499, 944, 296, 1032, 753, + 76, 76, 1154, 76, 1239, 694, 25, 55, 901, 902, + 903, 229, 19, 18, 299, 17, 20, 16, 1123, 15, + 14, 811, 29, 811, 13, 12, 76, 11, 312, 186, + 186, 1125, 10, 9, 1128, 8, 7, 186, 6, 5, + 907, 312, 1153, 4, 908, 1133, 76, 220, 22, 2, + 0, 912, 913, 914, 1132, 1140, 0, 0, 1064, 0, + 923, 0, 0, 0, 0, 929, 0, 930, 931, 932, + 933, 0, 1158, 1079, 1080, 0, 0, 1081, 0, 1166, + 1083, 1169, 1170, 1171, 1035, 968, 76, 76, 0, 0, + 1172, 1174, 0, 0, 0, 312, 0, 312, 0, 0, + 0, 441, 0, 0, 0, 312, 0, 0, 0, 0, + 0, 0, 1181, 1188, 1183, 76, 0, 76, 76, 0, + 1187, 0, 0, 0, 0, 0, 0, 0, 0, 312, + 0, 0, 0, 0, 0, 0, 1193, 0, 0, 0, + 1072, 298, 186, 1206, 0, 0, 0, 0, 811, 0, + 76, 1203, 1071, 0, 0, 0, 1205, 1131, 0, 0, + 0, 0, 0, 76, 186, 0, 1220, 0, 0, 0, + 76, 1231, 1214, 0, 0, 999, 811, 0, 76, 0, + 0, 186, 1226, 0, 1227, 1072, 1230, 47, 0, 0, + 0, 0, 1243, 0, 0, 0, 1156, 1071, 1039, 1040, + 1244, 0, 0, 523, 1247, 0, 0, 0, 0, 0, + 0, 1056, 1057, 1041, 1059, 1060, 0, 0, 0, 459, + 1261, 0, 0, 0, 0, 1266, 0, 440, 0, 0, + 76, 0, 76, 76, 76, 186, 76, 0, 772, 957, + 0, 0, 76, 1271, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 184, 1278, 312, 209, 1279, 0, + 1087, 0, 0, 0, 0, 1286, 0, 0, 76, 76, + 76, 0, 0, 300, 0, 0, 0, 0, 0, 233, + 0, 184, 184, 0, 0, 0, 0, 184, 0, 1137, + 1138, 0, 1139, 0, 0, 1141, 0, 1143, 0, 995, + 312, 0, 312, 0, 0, 0, 0, 0, 183, 0, + 0, 76, 76, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 76, 312, 0, 0, 0, 0, + 0, 1135, 0, 0, 0, 0, 303, 76, 0, 0, + 1134, 413, 0, 1177, 0, 312, 0, 1136, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 76, 1145, 1146, + 1147, 0, 0, 1150, 1260, 523, 0, 312, 0, 0, + 0, 0, 0, 0, 0, 0, 1160, 1161, 1162, 195, + 1165, 0, 753, 0, 0, 1075, 957, 0, 753, 0, + 0, 0, 0, 76, 184, 0, 184, 0, 0, 0, + 0, 0, 184, 205, 0, 0, 0, 76, 0, 184, + 0, 0, 0, 0, 312, 0, 312, 1098, 0, 0, + 0, 0, 443, 444, 445, 0, 448, 0, 0, 0, + 0, 0, 0, 452, 0, 0, 0, 0, 0, 0, + 0, 1195, 1196, 0, 1197, 1198, 1199, 0, 421, 1124, + 422, 0, 0, 190, 0, 0, 429, 0, 1200, 192, + 0, 0, 1126, 431, 0, 0, 198, 194, 0, 1129, + 0, 0, 0, 1211, 1212, 1213, 0, 312, 0, 0, + 488, 487, 497, 498, 490, 491, 492, 493, 494, 495, + 496, 489, 0, 196, 499, 0, 200, 0, 0, 1235, + 1236, 1237, 1238, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 184, 0, 0, 0, 0, 0, 0, 184, + 573, 184, 0, 0, 191, 0, 905, 0, 0, 1168, + 0, 1168, 1168, 1168, 0, 1173, 0, 0, 0, 0, + 0, 312, 0, 1262, 454, 0, 0, 0, 1267, 0, + 0, 193, 1152, 201, 202, 203, 204, 208, 1148, 454, + 0, 1272, 207, 206, 0, 0, 551, 312, 312, 312, + 0, 0, 0, 0, 0, 575, 0, 0, 0, 1288, + 488, 487, 497, 498, 490, 491, 492, 493, 494, 495, + 496, 489, 1290, 1291, 499, 488, 487, 497, 498, 490, + 491, 492, 493, 494, 495, 496, 489, 0, 0, 499, + 1207, 1208, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 1098, 488, 487, 497, 498, 490, 491, + 492, 493, 494, 495, 496, 489, 1168, 0, 499, 184, + 184, 644, 0, 184, 0, 0, 184, 0, 0, 653, + 662, 0, 0, 0, 0, 0, 1246, 1038, 0, 0, + 664, 665, 666, 667, 668, 669, 670, 671, 0, 0, + 0, 184, 0, 0, 672, 673, 0, 488, 487, 497, + 498, 490, 491, 492, 493, 494, 495, 496, 489, 753, + 184, 499, 1268, 651, 652, 0, 0, 655, 0, 662, + 658, 0, 0, 0, 0, 0, 1274, 0, 0, 23, + 24, 48, 26, 27, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 677, 0, 1149, 42, 0, + 0, 0, 0, 28, 0, 0, 0, 0, 0, 0, + 233, 0, 0, 0, 696, 233, 233, 0, 0, 754, + 754, 233, 37, 0, 0, 754, 50, 0, 0, 0, + 0, 0, 0, 0, 0, 233, 233, 233, 233, 0, + 184, 0, 754, 184, 184, 184, 184, 184, 0, 0, + 0, 0, 0, 0, 0, 786, 0, 0, 184, 0, + 0, 0, 573, 0, 0, 0, 0, 184, 184, 488, + 487, 497, 498, 490, 491, 492, 493, 494, 495, 496, + 489, 0, 0, 499, 0, 30, 31, 33, 32, 35, + 0, 0, 0, 0, 769, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 36, 43, 44, 0, + 0, 45, 46, 34, 0, 0, 0, 0, 0, 0, + 0, 0, 797, 0, 0, 38, 39, 0, 40, 41, + 0, 0, 0, 0, 0, 184, 617, 0, 0, 0, + 0, 483, 0, 486, 184, 0, 858, 184, 860, 500, + 501, 502, 503, 504, 505, 506, 879, 484, 485, 482, + 488, 487, 497, 498, 490, 491, 492, 493, 494, 495, + 496, 489, 662, 0, 499, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 233, 0, 0, 0, 0, 853, + 0, 0, 0, 906, 0, 0, 0, 0, 877, 0, + 0, 878, 0, 0, 0, 0, 0, 0, 49, 0, + 0, 0, 605, 488, 487, 497, 498, 490, 491, 492, + 493, 494, 495, 496, 489, 0, 0, 499, 0, 0, + 0, 233, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 618, 0, 0, 0, 0, 233, 488, 487, + 497, 498, 490, 491, 492, 493, 494, 495, 496, 489, + 0, 0, 499, 631, 632, 633, 634, 635, 636, 637, + 0, 638, 639, 640, 641, 642, 619, 620, 621, 622, + 603, 604, 0, 0, 606, 184, 607, 608, 609, 610, + 611, 612, 613, 614, 615, 616, 623, 624, 625, 626, + 627, 628, 629, 630, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 996, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 1027, 1028, 0, 0, 0, 1023, 0, 0, 184, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 233, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 233, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 662, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 754, 0, 0, 0, 0, + 0, 754, 0, 1031, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 184, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 184, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 184, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1120, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 1127, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 573, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 1186, 0, + 0, 398, 388, 0, 360, 400, 338, 352, 408, 353, + 354, 381, 324, 368, 125, 350, 0, 341, 319, 347, + 320, 339, 362, 93, 365, 337, 390, 371, 107, 406, + 109, 376, 0, 142, 118, 0, 0, 364, 392, 366, + 386, 359, 382, 329, 375, 401, 351, 379, 402, 0, + 0, 0, 75, 0, 812, 813, 0, 0, 0, 0, + 0, 86, 0, 378, 397, 349, 380, 318, 377, 0, + 322, 325, 407, 395, 344, 345, 969, 0, 0, 0, + 0, 0, 0, 363, 367, 383, 357, 0, 0, 0, + 0, 0, 0, 0, 0, 342, 0, 374, 0, 0, + 0, 326, 323, 0, 361, 0, 0, 0, 328, 0, + 343, 384, 754, 317, 387, 393, 358, 187, 396, 356, + 355, 399, 131, 0, 0, 145, 98, 97, 106, 391, + 340, 348, 89, 346, 137, 127, 157, 373, 128, 136, + 110, 149, 132, 156, 188, 164, 147, 163, 78, 146, + 155, 87, 139, 80, 153, 144, 116, 102, 103, 79, + 0, 135, 92, 96, 91, 124, 150, 151, 90, 171, + 83, 162, 82, 84, 161, 123, 148, 154, 117, 114, + 81, 152, 115, 113, 105, 94, 99, 129, 112, 130, + 100, 120, 119, 121, 0, 321, 0, 143, 159, 172, + 336, 394, 165, 166, 167, 168, 0, 0, 0, 122, + 85, 101, 140, 104, 111, 134, 170, 126, 138, 88, + 158, 141, 332, 335, 330, 331, 369, 370, 403, 404, + 405, 385, 327, 0, 333, 334, 0, 389, 372, 77, + 0, 108, 169, 133, 95, 160, 398, 388, 0, 360, + 400, 338, 352, 408, 353, 354, 381, 324, 368, 125, + 350, 0, 341, 319, 347, 320, 339, 362, 93, 365, + 337, 390, 371, 107, 406, 109, 376, 0, 142, 118, + 0, 0, 364, 392, 366, 386, 359, 382, 329, 375, + 401, 351, 379, 402, 0, 0, 0, 75, 0, 812, + 813, 0, 0, 0, 0, 0, 86, 0, 378, 397, + 349, 380, 318, 377, 0, 322, 325, 407, 395, 344, + 345, 0, 0, 0, 0, 0, 0, 0, 363, 367, + 383, 357, 0, 0, 0, 0, 0, 0, 0, 0, + 342, 0, 374, 0, 0, 0, 326, 323, 0, 361, + 0, 0, 0, 328, 0, 343, 384, 0, 317, 387, + 393, 358, 187, 396, 356, 355, 399, 131, 0, 0, + 145, 98, 97, 106, 391, 340, 348, 89, 346, 137, + 127, 157, 373, 128, 136, 110, 149, 132, 156, 188, + 164, 147, 163, 78, 146, 155, 87, 139, 80, 153, + 144, 116, 102, 103, 79, 0, 135, 92, 96, 91, + 124, 150, 151, 90, 171, 83, 162, 82, 84, 161, + 123, 148, 154, 117, 114, 81, 152, 115, 113, 105, + 94, 99, 129, 112, 130, 100, 120, 119, 121, 0, + 321, 0, 143, 159, 172, 336, 394, 165, 166, 167, + 168, 0, 0, 0, 122, 85, 101, 140, 104, 111, + 134, 170, 126, 138, 88, 158, 141, 332, 335, 330, + 331, 369, 370, 403, 404, 405, 385, 327, 0, 333, + 334, 0, 389, 372, 77, 0, 108, 169, 133, 95, + 160, 398, 388, 0, 360, 400, 338, 352, 408, 353, + 354, 381, 324, 368, 125, 350, 0, 341, 319, 347, + 320, 339, 362, 93, 365, 337, 390, 371, 107, 406, + 109, 376, 0, 142, 118, 0, 0, 364, 392, 366, + 386, 359, 382, 329, 375, 401, 351, 379, 402, 50, + 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, + 0, 86, 0, 378, 397, 349, 380, 318, 377, 0, + 322, 325, 407, 395, 344, 345, 0, 0, 0, 0, + 0, 0, 0, 363, 367, 383, 357, 0, 0, 0, + 0, 0, 0, 0, 0, 342, 0, 374, 0, 0, + 0, 326, 323, 0, 361, 0, 0, 0, 328, 0, + 343, 384, 0, 317, 387, 393, 358, 187, 396, 356, + 355, 399, 131, 0, 0, 145, 98, 97, 106, 391, + 340, 348, 89, 346, 137, 127, 157, 373, 128, 136, + 110, 149, 132, 156, 188, 164, 147, 163, 78, 146, + 155, 87, 139, 80, 153, 144, 116, 102, 103, 79, + 0, 135, 92, 96, 91, 124, 150, 151, 90, 171, + 83, 162, 82, 84, 161, 123, 148, 154, 117, 114, + 81, 152, 115, 113, 105, 94, 99, 129, 112, 130, + 100, 120, 119, 121, 0, 321, 0, 143, 159, 172, + 336, 394, 165, 166, 167, 168, 0, 0, 0, 122, + 85, 101, 140, 104, 111, 134, 170, 126, 138, 88, + 158, 141, 332, 335, 330, 331, 369, 370, 403, 404, + 405, 385, 327, 0, 333, 334, 0, 389, 372, 77, + 0, 108, 169, 133, 95, 160, 398, 388, 0, 360, + 400, 338, 352, 408, 353, 354, 381, 324, 368, 125, + 350, 0, 341, 319, 347, 320, 339, 362, 93, 365, + 337, 390, 371, 107, 406, 109, 376, 0, 142, 118, + 0, 0, 364, 392, 366, 386, 359, 382, 329, 375, + 401, 351, 379, 402, 0, 0, 0, 75, 0, 0, + 0, 0, 0, 0, 0, 0, 86, 0, 378, 397, + 349, 380, 318, 377, 0, 322, 325, 407, 395, 344, + 345, 0, 0, 0, 0, 0, 0, 0, 363, 367, + 383, 357, 0, 0, 0, 0, 0, 0, 1034, 0, + 342, 0, 374, 0, 0, 0, 326, 323, 0, 361, + 0, 0, 0, 328, 0, 343, 384, 0, 317, 387, + 393, 358, 187, 396, 356, 355, 399, 131, 0, 0, + 145, 98, 97, 106, 391, 340, 348, 89, 346, 137, + 127, 157, 373, 128, 136, 110, 149, 132, 156, 188, + 164, 147, 163, 78, 146, 155, 87, 139, 80, 153, + 144, 116, 102, 103, 79, 0, 135, 92, 96, 91, + 124, 150, 151, 90, 171, 83, 162, 82, 84, 161, + 123, 148, 154, 117, 114, 81, 152, 115, 113, 105, + 94, 99, 129, 112, 130, 100, 120, 119, 121, 0, + 321, 0, 143, 159, 172, 336, 394, 165, 166, 167, + 168, 0, 0, 0, 122, 85, 101, 140, 104, 111, + 134, 170, 126, 138, 88, 158, 141, 332, 335, 330, + 331, 369, 370, 403, 404, 405, 385, 327, 0, 333, + 334, 0, 389, 372, 77, 0, 108, 169, 133, 95, + 160, 398, 388, 0, 360, 400, 338, 352, 408, 353, + 354, 381, 324, 368, 125, 350, 0, 341, 319, 347, + 320, 339, 362, 93, 365, 337, 390, 371, 107, 406, + 109, 376, 0, 142, 118, 0, 0, 364, 392, 366, + 386, 359, 382, 329, 375, 401, 351, 379, 402, 0, + 0, 0, 238, 0, 0, 0, 0, 0, 0, 0, + 0, 86, 0, 378, 397, 349, 380, 318, 377, 0, + 322, 325, 407, 395, 344, 345, 0, 0, 0, 0, + 0, 0, 0, 363, 367, 383, 357, 0, 0, 0, + 0, 0, 0, 705, 0, 342, 0, 374, 0, 0, + 0, 326, 323, 0, 361, 0, 0, 0, 328, 0, + 343, 384, 0, 317, 387, 393, 358, 187, 396, 356, + 355, 399, 131, 0, 0, 145, 98, 97, 106, 391, + 340, 348, 89, 346, 137, 127, 157, 373, 128, 136, + 110, 149, 132, 156, 188, 164, 147, 163, 78, 146, + 155, 87, 139, 80, 153, 144, 116, 102, 103, 79, + 0, 135, 92, 96, 91, 124, 150, 151, 90, 171, + 83, 162, 82, 84, 161, 123, 148, 154, 117, 114, + 81, 152, 115, 113, 105, 94, 99, 129, 112, 130, + 100, 120, 119, 121, 0, 321, 0, 143, 159, 172, + 336, 394, 165, 166, 167, 168, 0, 0, 0, 122, + 85, 101, 140, 104, 111, 134, 170, 126, 138, 88, + 158, 141, 332, 335, 330, 331, 369, 370, 403, 404, + 405, 385, 327, 0, 333, 334, 0, 389, 372, 77, + 0, 108, 169, 133, 95, 160, 398, 388, 0, 360, + 400, 338, 352, 408, 353, 354, 381, 324, 368, 125, + 350, 0, 341, 319, 347, 320, 339, 362, 93, 365, + 337, 390, 371, 107, 406, 109, 376, 0, 142, 118, + 0, 0, 364, 392, 366, 386, 359, 382, 329, 375, + 401, 351, 379, 402, 0, 0, 0, 75, 0, 0, + 0, 0, 0, 0, 0, 0, 86, 0, 378, 397, + 349, 380, 318, 377, 0, 322, 325, 407, 395, 344, + 345, 0, 0, 0, 0, 0, 0, 0, 363, 367, + 383, 357, 0, 0, 0, 0, 0, 0, 0, 0, + 342, 0, 374, 0, 0, 0, 326, 323, 0, 361, + 0, 0, 0, 328, 0, 343, 384, 0, 317, 387, + 393, 358, 187, 396, 356, 355, 399, 131, 0, 0, + 145, 98, 97, 106, 391, 340, 348, 89, 346, 137, + 127, 157, 373, 128, 136, 110, 149, 132, 156, 188, + 164, 147, 163, 78, 146, 155, 87, 139, 80, 153, + 144, 116, 102, 103, 79, 0, 135, 92, 96, 91, + 124, 150, 151, 90, 171, 83, 162, 82, 84, 161, + 123, 148, 154, 117, 114, 81, 152, 115, 113, 105, + 94, 99, 129, 112, 130, 100, 120, 119, 121, 0, + 321, 0, 143, 159, 172, 336, 394, 165, 166, 167, + 168, 0, 0, 0, 122, 85, 101, 140, 104, 111, + 134, 170, 126, 138, 88, 158, 141, 332, 335, 330, + 331, 369, 370, 403, 404, 405, 385, 327, 0, 333, + 334, 0, 389, 372, 77, 0, 108, 169, 133, 95, + 160, 398, 388, 0, 360, 400, 338, 352, 408, 353, + 354, 381, 324, 368, 125, 350, 0, 341, 319, 347, + 320, 339, 362, 93, 365, 337, 390, 371, 107, 406, + 109, 376, 0, 142, 118, 0, 0, 364, 392, 366, + 386, 359, 382, 329, 375, 401, 351, 379, 402, 0, + 0, 0, 238, 0, 0, 0, 0, 0, 0, 0, + 0, 86, 0, 378, 397, 349, 380, 318, 377, 0, + 322, 325, 407, 395, 344, 345, 0, 0, 0, 0, + 0, 0, 0, 363, 367, 383, 357, 0, 0, 0, + 0, 0, 0, 0, 0, 342, 0, 374, 0, 0, + 0, 326, 323, 0, 361, 0, 0, 0, 328, 0, + 343, 384, 0, 317, 387, 393, 358, 187, 396, 356, + 355, 399, 131, 0, 0, 145, 98, 97, 106, 391, + 340, 348, 89, 346, 137, 127, 157, 373, 128, 136, + 110, 149, 132, 156, 188, 164, 147, 163, 78, 146, + 155, 87, 139, 80, 153, 144, 116, 102, 103, 79, + 0, 135, 92, 96, 91, 124, 150, 151, 90, 171, + 83, 162, 82, 84, 161, 123, 148, 154, 117, 114, + 81, 152, 115, 113, 105, 94, 99, 129, 112, 130, + 100, 120, 119, 121, 0, 321, 0, 143, 159, 172, + 336, 394, 165, 166, 167, 168, 0, 0, 0, 122, + 85, 101, 140, 104, 111, 134, 170, 126, 138, 88, + 158, 141, 332, 335, 330, 331, 369, 370, 403, 404, + 405, 385, 327, 0, 333, 334, 0, 389, 372, 77, + 0, 108, 169, 133, 95, 160, 398, 388, 0, 360, + 400, 338, 352, 408, 353, 354, 381, 324, 368, 125, + 350, 0, 341, 319, 347, 320, 339, 362, 93, 365, + 337, 390, 371, 107, 406, 109, 376, 0, 142, 118, + 0, 0, 364, 392, 366, 386, 359, 382, 329, 375, + 401, 351, 379, 402, 0, 0, 0, 75, 0, 0, + 0, 0, 0, 0, 0, 0, 86, 0, 378, 397, + 349, 380, 318, 377, 0, 322, 325, 407, 395, 344, + 345, 0, 0, 0, 0, 0, 0, 0, 363, 367, + 383, 357, 0, 0, 0, 0, 0, 0, 0, 0, + 342, 0, 374, 0, 0, 0, 326, 323, 0, 361, + 0, 0, 0, 328, 0, 343, 384, 0, 317, 387, + 393, 358, 187, 396, 356, 355, 399, 131, 0, 0, + 145, 98, 97, 106, 391, 340, 348, 89, 346, 137, + 127, 157, 373, 128, 136, 110, 149, 132, 156, 188, + 164, 147, 163, 78, 146, 155, 87, 139, 80, 153, + 144, 116, 102, 103, 79, 0, 135, 92, 96, 91, + 124, 150, 151, 90, 171, 83, 162, 82, 315, 161, + 123, 148, 154, 117, 114, 81, 152, 115, 113, 105, + 94, 99, 129, 112, 130, 100, 120, 119, 121, 0, + 321, 0, 143, 159, 172, 336, 394, 165, 166, 167, + 168, 0, 0, 0, 316, 314, 101, 140, 104, 111, + 134, 170, 126, 138, 88, 158, 141, 332, 335, 330, + 331, 369, 370, 403, 404, 405, 385, 327, 0, 333, + 334, 0, 389, 372, 77, 0, 108, 169, 133, 95, + 160, 398, 388, 0, 360, 400, 338, 352, 408, 353, + 354, 381, 324, 368, 125, 350, 0, 341, 319, 347, + 320, 339, 362, 93, 365, 337, 390, 371, 107, 406, + 109, 376, 0, 142, 118, 0, 0, 364, 392, 366, + 386, 359, 382, 329, 375, 401, 351, 379, 402, 0, + 0, 0, 185, 0, 0, 0, 0, 0, 0, 0, + 0, 86, 0, 378, 397, 349, 380, 318, 377, 0, + 322, 325, 407, 395, 344, 345, 0, 0, 0, 0, + 0, 0, 0, 363, 367, 383, 357, 0, 0, 0, + 0, 0, 0, 0, 0, 342, 0, 374, 0, 0, + 0, 326, 323, 0, 361, 0, 0, 0, 328, 0, + 343, 384, 0, 317, 387, 393, 358, 187, 396, 356, + 355, 399, 131, 0, 0, 145, 98, 97, 106, 391, + 340, 348, 89, 346, 137, 127, 157, 373, 128, 136, + 110, 149, 132, 156, 188, 164, 147, 163, 78, 146, + 155, 87, 139, 80, 153, 144, 116, 102, 103, 79, + 0, 135, 92, 96, 91, 124, 150, 151, 90, 171, + 83, 162, 82, 84, 161, 123, 148, 154, 117, 114, + 81, 152, 115, 113, 105, 94, 99, 129, 112, 130, + 100, 120, 119, 121, 0, 321, 0, 143, 159, 172, + 336, 394, 165, 166, 167, 168, 0, 0, 0, 122, + 85, 101, 140, 104, 111, 134, 170, 126, 138, 88, + 158, 141, 332, 335, 330, 331, 369, 370, 403, 404, + 405, 385, 327, 0, 333, 334, 0, 389, 372, 77, + 0, 108, 169, 133, 95, 160, 398, 388, 0, 360, + 400, 338, 352, 408, 353, 354, 381, 324, 368, 125, + 350, 0, 341, 319, 347, 320, 339, 362, 93, 365, + 337, 390, 371, 107, 406, 109, 376, 0, 142, 118, + 0, 0, 364, 392, 366, 386, 359, 382, 329, 375, + 401, 351, 379, 402, 0, 0, 0, 75, 0, 0, + 0, 0, 0, 0, 0, 0, 86, 0, 378, 397, + 349, 380, 318, 377, 0, 322, 325, 407, 395, 344, + 345, 0, 0, 0, 0, 0, 0, 0, 363, 367, + 383, 357, 0, 0, 0, 0, 0, 0, 0, 0, + 342, 0, 374, 0, 0, 0, 326, 323, 0, 361, + 0, 0, 0, 328, 0, 343, 384, 0, 317, 387, + 393, 358, 187, 396, 356, 355, 399, 131, 0, 0, + 145, 98, 97, 106, 391, 340, 348, 89, 346, 137, + 127, 157, 373, 128, 136, 110, 149, 132, 156, 188, + 164, 147, 163, 78, 146, 583, 87, 139, 80, 153, + 144, 116, 102, 103, 79, 0, 135, 92, 96, 91, + 124, 150, 151, 90, 171, 83, 162, 82, 315, 161, + 123, 148, 154, 117, 114, 81, 152, 115, 113, 105, + 94, 99, 129, 112, 130, 100, 120, 119, 121, 0, + 321, 0, 143, 159, 172, 336, 394, 165, 166, 167, + 168, 0, 0, 0, 316, 314, 101, 140, 104, 111, + 134, 170, 126, 138, 88, 158, 141, 332, 335, 330, + 331, 369, 370, 403, 404, 405, 385, 327, 0, 333, + 334, 0, 389, 372, 77, 0, 108, 169, 133, 95, + 160, 398, 388, 0, 360, 400, 338, 352, 408, 353, + 354, 381, 324, 368, 125, 350, 0, 341, 319, 347, + 320, 339, 362, 93, 365, 337, 390, 371, 107, 406, + 109, 376, 0, 142, 118, 0, 0, 364, 392, 366, + 386, 359, 382, 329, 375, 401, 351, 379, 402, 0, + 0, 0, 75, 0, 0, 0, 0, 0, 0, 0, + 0, 86, 0, 378, 397, 349, 380, 318, 377, 0, + 322, 325, 407, 395, 344, 345, 0, 0, 0, 0, + 0, 0, 0, 363, 367, 383, 357, 0, 0, 0, + 0, 0, 0, 0, 0, 342, 0, 374, 0, 0, + 0, 326, 323, 0, 361, 0, 0, 0, 328, 0, + 343, 384, 0, 317, 387, 393, 358, 187, 396, 356, + 355, 399, 131, 0, 0, 145, 98, 97, 106, 391, + 340, 348, 89, 346, 137, 127, 157, 373, 128, 136, + 110, 149, 132, 156, 188, 164, 147, 163, 78, 146, + 306, 87, 139, 80, 153, 144, 116, 102, 103, 79, + 0, 135, 92, 96, 91, 124, 150, 151, 90, 171, + 83, 162, 82, 315, 161, 123, 148, 154, 117, 114, + 81, 152, 115, 113, 105, 94, 99, 129, 112, 130, + 100, 120, 119, 121, 0, 321, 0, 143, 159, 172, + 336, 394, 165, 166, 167, 168, 0, 0, 0, 316, + 314, 309, 308, 104, 111, 134, 170, 126, 138, 88, + 158, 141, 332, 335, 330, 331, 369, 370, 403, 404, + 405, 385, 327, 0, 333, 334, 0, 389, 372, 77, + 0, 108, 169, 133, 95, 160, 125, 0, 0, 741, + 0, 240, 0, 0, 0, 93, 0, 237, 0, 0, + 107, 279, 109, 0, 0, 142, 118, 0, 0, 0, + 0, 270, 271, 0, 0, 0, 0, 0, 0, 0, + 0, 50, 0, 0, 238, 258, 257, 260, 261, 262, + 263, 0, 0, 86, 259, 264, 265, 266, 0, 0, + 235, 251, 0, 278, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 248, 249, 231, 0, 0, 0, 290, + 0, 250, 0, 0, 246, 247, 252, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 187, + 0, 0, 288, 0, 131, 0, 0, 145, 98, 97, + 106, 0, 0, 0, 89, 0, 137, 127, 157, 0, + 128, 136, 110, 149, 132, 156, 188, 164, 147, 163, + 78, 146, 155, 87, 139, 80, 153, 144, 116, 102, + 103, 79, 0, 135, 92, 96, 91, 124, 150, 151, + 90, 171, 83, 162, 82, 84, 161, 123, 148, 154, + 117, 114, 81, 152, 115, 113, 105, 94, 99, 129, + 112, 130, 100, 120, 119, 121, 0, 0, 0, 143, + 159, 172, 0, 0, 165, 166, 167, 168, 0, 0, + 0, 122, 85, 101, 140, 104, 111, 134, 170, 126, + 138, 88, 158, 141, 280, 289, 286, 287, 284, 285, + 283, 282, 281, 291, 272, 273, 274, 275, 277, 0, + 276, 77, 0, 108, 169, 133, 95, 160, 125, 0, + 0, 0, 0, 240, 0, 0, 0, 93, 0, 237, + 0, 0, 107, 279, 109, 0, 0, 142, 118, 0, + 0, 0, 0, 270, 271, 0, 0, 0, 0, 0, + 0, 0, 0, 50, 0, 454, 238, 258, 257, 260, + 261, 262, 263, 0, 0, 86, 259, 264, 265, 266, + 0, 0, 235, 251, 0, 278, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 248, 249, 0, 0, 0, + 0, 290, 0, 250, 0, 0, 246, 247, 252, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 0, 0, 288, 0, 131, 0, 0, 145, + 98, 97, 106, 0, 0, 0, 89, 0, 137, 127, + 157, 0, 128, 136, 110, 149, 132, 156, 188, 164, + 147, 163, 78, 146, 155, 87, 139, 80, 153, 144, + 116, 102, 103, 79, 0, 135, 92, 96, 91, 124, + 150, 151, 90, 171, 83, 162, 82, 84, 161, 123, + 148, 154, 117, 114, 81, 152, 115, 113, 105, 94, + 99, 129, 112, 130, 100, 120, 119, 121, 0, 0, + 0, 143, 159, 172, 0, 0, 165, 166, 167, 168, + 0, 0, 0, 122, 85, 101, 140, 104, 111, 134, + 170, 126, 138, 88, 158, 141, 280, 289, 286, 287, + 284, 285, 283, 282, 281, 291, 272, 273, 274, 275, + 277, 0, 276, 77, 0, 108, 169, 133, 95, 160, + 125, 0, 0, 0, 0, 240, 0, 0, 0, 93, + 0, 237, 0, 0, 107, 279, 109, 0, 0, 142, + 118, 0, 0, 0, 0, 270, 271, 0, 0, 0, + 0, 0, 0, 0, 0, 50, 0, 0, 238, 258, + 257, 260, 261, 262, 263, 0, 0, 86, 259, 264, + 265, 266, 0, 0, 235, 251, 0, 278, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 248, 249, 231, + 0, 0, 0, 290, 0, 250, 0, 0, 246, 247, + 252, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 187, 0, 0, 288, 0, 131, 0, + 0, 145, 98, 97, 106, 0, 0, 0, 89, 0, + 137, 127, 157, 0, 128, 136, 110, 149, 132, 156, + 188, 164, 147, 163, 78, 146, 155, 87, 139, 80, + 153, 144, 116, 102, 103, 79, 0, 135, 92, 96, + 91, 124, 150, 151, 90, 171, 83, 162, 82, 84, + 161, 123, 148, 154, 117, 114, 81, 152, 115, 113, + 105, 94, 99, 129, 112, 130, 100, 120, 119, 121, + 0, 0, 0, 143, 159, 172, 0, 0, 165, 166, + 167, 168, 0, 0, 0, 122, 85, 101, 140, 104, + 111, 134, 170, 126, 138, 88, 158, 141, 280, 289, + 286, 287, 284, 285, 283, 282, 281, 291, 272, 273, + 274, 275, 277, 0, 276, 77, 0, 108, 169, 133, + 95, 160, 125, 0, 0, 0, 0, 240, 0, 0, + 0, 93, 0, 237, 0, 0, 107, 279, 109, 0, + 0, 142, 118, 0, 0, 0, 0, 270, 271, 0, + 0, 0, 0, 0, 0, 804, 0, 50, 0, 0, + 238, 258, 257, 260, 261, 262, 263, 0, 0, 86, + 259, 264, 265, 266, 0, 0, 235, 251, 0, 278, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 248, + 249, 0, 0, 0, 0, 290, 0, 250, 0, 0, + 246, 247, 252, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 187, 0, 0, 288, 0, + 131, 0, 0, 145, 98, 97, 106, 0, 0, 0, + 89, 0, 137, 127, 157, 0, 128, 136, 110, 149, + 132, 156, 188, 164, 147, 163, 78, 146, 155, 87, + 139, 80, 153, 144, 116, 102, 103, 79, 0, 135, + 92, 96, 91, 124, 150, 151, 90, 171, 83, 162, + 82, 84, 161, 123, 148, 154, 117, 114, 81, 152, + 115, 113, 105, 94, 99, 129, 112, 130, 100, 120, + 119, 121, 0, 0, 0, 143, 159, 172, 0, 0, + 165, 166, 167, 168, 0, 0, 0, 122, 85, 101, + 140, 104, 111, 134, 170, 126, 138, 88, 158, 141, + 280, 289, 286, 287, 284, 285, 283, 282, 281, 291, + 272, 273, 274, 275, 277, 23, 276, 77, 0, 108, + 169, 133, 95, 160, 0, 0, 0, 125, 0, 0, + 0, 0, 240, 0, 0, 0, 93, 0, 237, 0, + 0, 107, 279, 109, 0, 0, 142, 118, 0, 0, + 0, 0, 270, 271, 0, 0, 0, 0, 0, 0, + 0, 0, 50, 0, 0, 238, 258, 257, 260, 261, + 262, 263, 0, 0, 86, 259, 264, 265, 266, 0, + 0, 235, 251, 0, 278, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 248, 249, 0, 0, 0, 0, + 290, 0, 250, 0, 0, 246, 247, 252, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 187, 0, 0, 288, 0, 131, 0, 0, 145, 98, + 97, 106, 0, 0, 0, 89, 0, 137, 127, 157, + 0, 128, 136, 110, 149, 132, 156, 188, 164, 147, + 163, 78, 146, 155, 87, 139, 80, 153, 144, 116, + 102, 103, 79, 0, 135, 92, 96, 91, 124, 150, + 151, 90, 171, 83, 162, 82, 84, 161, 123, 148, + 154, 117, 114, 81, 152, 115, 113, 105, 94, 99, + 129, 112, 130, 100, 120, 119, 121, 0, 0, 0, + 143, 159, 172, 0, 0, 165, 166, 167, 168, 0, + 0, 0, 122, 85, 101, 140, 104, 111, 134, 170, + 126, 138, 88, 158, 141, 280, 289, 286, 287, 284, + 285, 283, 282, 281, 291, 272, 273, 274, 275, 277, + 0, 276, 77, 0, 108, 169, 133, 95, 160, 125, + 0, 0, 0, 0, 240, 0, 0, 0, 93, 0, + 237, 0, 0, 107, 279, 109, 0, 0, 142, 118, + 0, 0, 0, 0, 270, 271, 0, 0, 0, 0, + 0, 0, 0, 0, 50, 0, 0, 238, 258, 257, + 260, 261, 262, 263, 0, 0, 86, 259, 264, 265, + 266, 0, 0, 235, 251, 0, 278, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 248, 249, 0, 0, + 0, 0, 290, 0, 250, 0, 0, 246, 247, 252, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 0, 0, 288, 0, 131, 0, 0, + 145, 98, 97, 106, 0, 0, 0, 89, 0, 137, + 127, 157, 0, 128, 136, 110, 149, 132, 156, 188, + 164, 147, 163, 78, 146, 155, 87, 139, 80, 153, + 144, 116, 102, 103, 79, 0, 135, 92, 96, 91, + 124, 150, 151, 90, 171, 83, 162, 82, 84, 161, + 123, 148, 154, 117, 114, 81, 152, 115, 113, 105, + 94, 99, 129, 112, 130, 100, 120, 119, 121, 0, + 0, 0, 143, 159, 172, 0, 0, 165, 166, 167, + 168, 0, 0, 0, 122, 85, 101, 140, 104, 111, + 134, 170, 126, 138, 88, 158, 141, 280, 289, 286, + 287, 284, 285, 283, 282, 281, 291, 272, 273, 274, + 275, 277, 125, 276, 77, 0, 108, 169, 133, 95, + 160, 93, 0, 0, 0, 0, 107, 279, 109, 0, + 0, 142, 118, 0, 0, 0, 0, 270, 271, 0, + 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, + 238, 258, 257, 260, 261, 262, 263, 0, 0, 86, + 259, 264, 265, 266, 0, 0, 0, 251, 0, 278, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 248, + 249, 0, 0, 0, 0, 290, 0, 250, 0, 0, + 246, 247, 252, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 187, 0, 0, 288, 0, + 131, 0, 0, 145, 98, 97, 106, 0, 0, 0, + 89, 0, 137, 127, 157, 1289, 128, 136, 110, 149, + 132, 156, 188, 164, 147, 163, 78, 146, 155, 87, + 139, 80, 153, 144, 116, 102, 103, 79, 0, 135, + 92, 96, 91, 124, 150, 151, 90, 171, 83, 162, + 82, 84, 161, 123, 148, 154, 117, 114, 81, 152, + 115, 113, 105, 94, 99, 129, 112, 130, 100, 120, + 119, 121, 0, 0, 0, 143, 159, 172, 0, 0, + 165, 166, 167, 168, 0, 0, 0, 122, 85, 101, + 140, 104, 111, 134, 170, 126, 138, 88, 158, 141, + 280, 289, 286, 287, 284, 285, 283, 282, 281, 291, + 272, 273, 274, 275, 277, 125, 276, 77, 0, 108, + 169, 133, 95, 160, 93, 0, 0, 0, 0, 107, + 279, 109, 0, 0, 142, 118, 0, 0, 0, 0, + 270, 271, 0, 0, 0, 0, 0, 0, 0, 0, + 50, 0, 0, 238, 258, 257, 260, 261, 262, 263, + 0, 0, 86, 259, 264, 265, 266, 0, 0, 0, + 251, 0, 278, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 248, 249, 0, 0, 0, 0, 290, 0, + 250, 0, 0, 246, 247, 252, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 187, 0, + 0, 288, 0, 131, 0, 0, 145, 98, 97, 106, + 0, 0, 0, 89, 0, 137, 127, 157, 0, 128, + 136, 110, 149, 132, 156, 188, 164, 147, 163, 78, + 146, 155, 87, 139, 80, 153, 144, 116, 102, 103, + 79, 0, 135, 92, 96, 91, 124, 150, 151, 90, + 171, 83, 162, 82, 84, 161, 123, 148, 154, 117, + 114, 81, 152, 115, 113, 105, 94, 99, 129, 112, + 130, 100, 120, 119, 121, 0, 0, 0, 143, 159, + 172, 0, 0, 165, 166, 167, 168, 0, 0, 0, + 122, 85, 101, 140, 104, 111, 134, 170, 126, 138, + 88, 158, 141, 280, 289, 286, 287, 284, 285, 283, + 282, 281, 291, 272, 273, 274, 275, 277, 125, 276, + 77, 0, 108, 169, 133, 95, 160, 93, 0, 0, + 0, 0, 107, 0, 109, 0, 0, 142, 118, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 0, 0, + 0, 0, 0, 0, 0, 86, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 488, 487, 497, 498, 490, 491, 492, 493, 494, + 495, 496, 489, 0, 0, 499, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 0, 0, 0, 0, 131, 0, 0, 145, + 98, 97, 106, 0, 0, 0, 89, 0, 137, 127, + 157, 0, 128, 136, 110, 149, 132, 156, 188, 164, + 147, 163, 78, 146, 155, 87, 139, 80, 153, 144, + 116, 102, 103, 79, 0, 135, 92, 96, 91, 124, + 150, 151, 90, 171, 83, 162, 82, 84, 161, 123, + 148, 154, 117, 114, 81, 152, 115, 113, 105, 94, + 99, 129, 112, 130, 100, 120, 119, 121, 0, 0, + 0, 143, 159, 172, 0, 0, 165, 166, 167, 168, + 0, 0, 0, 122, 85, 101, 140, 104, 111, 134, + 170, 126, 138, 88, 158, 141, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 77, 0, 108, 169, 133, 95, 160, + 125, 0, 0, 0, 476, 0, 0, 0, 0, 93, + 0, 0, 0, 0, 107, 0, 109, 0, 0, 142, + 118, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 75, 0, + 478, 0, 0, 0, 0, 0, 0, 86, 0, 0, + 0, 0, 473, 472, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 474, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 187, 0, 0, 0, 0, 131, 0, + 0, 145, 98, 97, 106, 0, 0, 0, 89, 0, + 137, 127, 157, 0, 128, 136, 110, 149, 132, 156, + 188, 164, 147, 163, 78, 146, 155, 87, 139, 80, + 153, 144, 116, 102, 103, 79, 0, 135, 92, 96, + 91, 124, 150, 151, 90, 171, 83, 162, 82, 84, + 161, 123, 148, 154, 117, 114, 81, 152, 115, 113, + 105, 94, 99, 129, 112, 130, 100, 120, 119, 121, + 0, 0, 0, 143, 159, 172, 0, 0, 165, 166, + 167, 168, 0, 0, 0, 122, 85, 101, 140, 104, + 111, 134, 170, 126, 138, 88, 158, 141, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 125, 0, 77, 0, 108, 169, 133, + 95, 160, 93, 0, 0, 0, 0, 107, 0, 109, + 0, 0, 142, 118, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 75, 0, 0, 0, 0, 0, 0, 0, 0, + 86, 0, 0, 0, 0, 68, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 71, 72, 0, 67, 0, 0, 0, + 73, 131, 0, 0, 145, 98, 97, 106, 0, 0, + 0, 89, 0, 137, 127, 157, 0, 128, 136, 110, + 149, 132, 156, 69, 164, 147, 163, 78, 146, 155, + 87, 139, 80, 153, 144, 116, 102, 103, 79, 0, + 135, 92, 96, 91, 124, 150, 151, 90, 171, 83, + 162, 82, 84, 161, 123, 148, 154, 117, 114, 81, + 152, 115, 113, 105, 94, 99, 129, 112, 130, 100, + 120, 119, 121, 0, 0, 0, 143, 159, 172, 0, + 0, 165, 166, 167, 168, 0, 0, 0, 122, 85, + 101, 140, 104, 111, 134, 170, 126, 138, 88, 158, + 141, 0, 70, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 77, 0, + 108, 169, 133, 95, 160, 125, 0, 0, 0, 572, + 0, 0, 0, 0, 93, 0, 0, 0, 0, 107, + 0, 109, 0, 0, 142, 118, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 0, 574, 0, 0, 0, 0, + 0, 0, 86, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 187, 0, + 0, 0, 0, 131, 0, 0, 145, 98, 97, 106, + 0, 0, 0, 89, 0, 137, 127, 157, 0, 128, + 136, 110, 149, 132, 156, 188, 164, 147, 163, 78, + 146, 155, 87, 139, 80, 153, 144, 116, 102, 103, + 79, 0, 135, 92, 96, 91, 124, 150, 151, 90, + 171, 83, 162, 82, 84, 161, 123, 148, 154, 117, + 114, 81, 152, 115, 113, 105, 94, 99, 129, 112, + 130, 100, 120, 119, 121, 0, 0, 0, 143, 159, + 172, 0, 0, 165, 166, 167, 168, 0, 0, 0, + 122, 85, 101, 140, 104, 111, 134, 170, 126, 138, + 88, 158, 141, 0, 0, 0, 23, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 125, 0, + 77, 0, 108, 169, 133, 95, 160, 93, 0, 0, + 0, 0, 107, 0, 109, 0, 0, 142, 118, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 50, 0, 0, 75, 0, 0, 0, + 0, 0, 0, 0, 0, 86, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 0, 0, 0, 0, 131, 0, 0, 145, + 98, 97, 106, 0, 0, 0, 89, 0, 137, 127, + 157, 0, 128, 136, 110, 149, 132, 156, 188, 164, + 147, 163, 78, 146, 155, 87, 139, 80, 153, 144, + 116, 102, 103, 79, 0, 135, 92, 96, 91, 124, + 150, 151, 90, 171, 83, 162, 82, 84, 161, 123, + 148, 154, 117, 114, 81, 152, 115, 113, 105, 94, + 99, 129, 112, 130, 100, 120, 119, 121, 0, 0, + 0, 143, 159, 172, 0, 0, 165, 166, 167, 168, + 0, 0, 0, 122, 85, 101, 140, 104, 111, 134, + 170, 126, 138, 88, 158, 141, 0, 0, 0, 23, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 125, 0, 77, 0, 108, 169, 133, 95, 160, + 93, 0, 0, 0, 0, 107, 0, 109, 0, 0, + 142, 118, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 50, 0, 0, 185, + 0, 0, 0, 0, 0, 0, 0, 0, 86, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 187, 0, 0, 0, 0, 131, + 0, 0, 145, 98, 97, 106, 0, 0, 0, 89, + 0, 137, 127, 157, 0, 128, 136, 110, 149, 132, + 156, 188, 164, 147, 163, 78, 146, 155, 87, 139, + 80, 153, 144, 116, 102, 103, 79, 0, 135, 92, + 96, 91, 124, 150, 151, 90, 171, 83, 162, 82, + 84, 161, 123, 148, 154, 117, 114, 81, 152, 115, + 113, 105, 94, 99, 129, 112, 130, 100, 120, 119, + 121, 0, 0, 0, 143, 159, 172, 0, 0, 165, + 166, 167, 168, 0, 0, 0, 122, 85, 101, 140, + 104, 111, 134, 170, 126, 138, 88, 158, 141, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 125, 0, 77, 0, 108, 169, + 133, 95, 160, 93, 0, 0, 0, 0, 107, 0, + 109, 0, 0, 142, 118, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 75, 0, 0, 692, 0, 0, 693, 0, + 0, 86, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 0, 0, + 0, 0, 131, 0, 0, 145, 98, 97, 106, 0, + 0, 0, 89, 0, 137, 127, 157, 0, 128, 136, + 110, 149, 132, 156, 188, 164, 147, 163, 78, 146, + 155, 87, 139, 80, 153, 144, 116, 102, 103, 79, + 0, 135, 92, 96, 91, 124, 150, 151, 90, 171, + 83, 162, 82, 84, 161, 123, 148, 154, 117, 114, + 81, 152, 115, 113, 105, 94, 99, 129, 112, 130, + 100, 120, 119, 121, 0, 0, 0, 143, 159, 172, + 0, 0, 165, 166, 167, 168, 0, 0, 0, 122, + 85, 101, 140, 104, 111, 134, 170, 126, 138, 88, + 158, 141, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 125, 0, 77, + 0, 108, 169, 133, 95, 160, 93, 0, 592, 0, + 0, 107, 0, 109, 0, 0, 142, 118, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 75, 0, 591, 0, 0, + 0, 0, 0, 0, 86, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 187, 0, 0, 0, 0, 131, 0, 0, 145, 98, + 97, 106, 0, 0, 0, 89, 0, 137, 127, 157, + 0, 128, 136, 110, 149, 132, 156, 188, 164, 147, + 163, 78, 146, 155, 87, 139, 80, 153, 144, 116, + 102, 103, 79, 0, 135, 92, 96, 91, 124, 150, + 151, 90, 171, 83, 162, 82, 84, 161, 123, 148, + 154, 117, 114, 81, 152, 115, 113, 105, 94, 99, + 129, 112, 130, 100, 120, 119, 121, 0, 0, 0, + 143, 159, 172, 0, 0, 165, 166, 167, 168, 0, + 0, 0, 122, 85, 101, 140, 104, 111, 134, 170, + 126, 138, 88, 158, 141, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 77, 0, 108, 169, 133, 95, 160, 125, + 0, 0, 0, 572, 0, 0, 0, 0, 93, 0, + 0, 0, 0, 107, 0, 109, 0, 0, 142, 118, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 185, 0, 574, + 0, 0, 0, 0, 0, 0, 86, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 187, 0, 0, 0, 0, 131, 0, 0, + 145, 98, 97, 106, 0, 0, 0, 89, 0, 137, + 127, 157, 0, 570, 136, 110, 149, 132, 156, 188, + 164, 147, 163, 78, 146, 155, 87, 139, 80, 153, + 144, 116, 102, 103, 79, 0, 135, 92, 96, 91, + 124, 150, 151, 90, 171, 83, 162, 82, 84, 161, + 123, 148, 154, 117, 114, 81, 152, 115, 113, 105, + 94, 99, 129, 112, 130, 100, 120, 119, 121, 0, + 0, 0, 143, 159, 172, 0, 0, 165, 166, 167, + 168, 0, 0, 0, 122, 85, 101, 140, 104, 111, + 134, 170, 126, 138, 88, 158, 141, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 125, 0, 77, 0, 108, 169, 133, 95, + 160, 93, 0, 0, 0, 0, 107, 0, 109, 0, + 0, 142, 118, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 50, 0, 0, + 185, 0, 0, 0, 0, 0, 0, 0, 0, 86, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 187, 0, 0, 0, 0, + 131, 0, 0, 145, 98, 97, 106, 0, 0, 0, + 89, 0, 137, 127, 157, 0, 128, 136, 110, 149, + 132, 156, 188, 164, 147, 163, 78, 146, 155, 87, + 139, 80, 153, 144, 116, 102, 103, 79, 0, 135, + 92, 96, 91, 124, 150, 151, 90, 171, 83, 162, + 82, 84, 161, 123, 148, 154, 117, 114, 81, 152, + 115, 113, 105, 94, 99, 129, 112, 130, 100, 120, + 119, 121, 0, 0, 0, 143, 159, 172, 0, 0, + 165, 166, 167, 168, 0, 0, 0, 122, 85, 101, + 140, 104, 111, 134, 170, 126, 138, 88, 158, 141, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 125, 0, 77, 0, 108, + 169, 133, 95, 160, 93, 0, 0, 0, 0, 107, + 0, 109, 0, 0, 142, 118, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 0, 574, 0, 0, 0, 0, + 0, 0, 86, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 187, 0, + 0, 0, 0, 131, 0, 0, 145, 98, 97, 106, + 0, 0, 0, 89, 0, 137, 127, 157, 0, 128, + 136, 110, 149, 132, 156, 188, 164, 147, 163, 78, + 146, 155, 87, 139, 80, 153, 144, 116, 102, 103, + 79, 0, 135, 92, 96, 91, 124, 150, 151, 90, + 171, 83, 162, 82, 84, 161, 123, 148, 154, 117, + 114, 81, 152, 115, 113, 105, 94, 99, 129, 112, + 130, 100, 120, 119, 121, 0, 0, 0, 143, 159, + 172, 0, 0, 165, 166, 167, 168, 0, 0, 0, + 122, 85, 101, 140, 104, 111, 134, 170, 126, 138, + 88, 158, 141, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 125, 0, + 77, 0, 108, 169, 133, 95, 160, 93, 0, 0, + 0, 0, 107, 0, 109, 0, 0, 142, 118, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 75, 0, 478, 0, + 0, 0, 0, 0, 0, 86, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 187, 0, 0, 0, 0, 131, 0, 0, 145, + 98, 97, 106, 0, 0, 0, 89, 0, 137, 127, + 157, 0, 128, 136, 110, 149, 132, 156, 188, 164, + 147, 163, 78, 146, 155, 87, 139, 80, 153, 144, + 116, 102, 103, 79, 0, 135, 92, 96, 91, 124, + 150, 151, 90, 171, 83, 162, 82, 84, 161, 123, + 148, 154, 117, 114, 81, 152, 115, 113, 105, 94, + 99, 129, 112, 130, 100, 120, 119, 121, 0, 0, + 0, 143, 159, 172, 0, 0, 165, 166, 167, 168, + 0, 0, 0, 122, 85, 101, 140, 104, 111, 134, + 170, 126, 138, 88, 158, 141, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 125, 77, 0, 108, 169, 133, 95, 160, + 550, 93, 0, 0, 0, 0, 107, 0, 109, 0, + 0, 142, 118, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 185, 0, 0, 0, 0, 0, 0, 0, 0, 86, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 187, 0, 0, 0, 0, + 131, 0, 0, 145, 98, 97, 106, 0, 0, 0, + 89, 0, 137, 127, 157, 0, 128, 136, 110, 149, + 132, 156, 188, 164, 147, 163, 78, 146, 155, 87, + 139, 80, 153, 144, 116, 102, 103, 79, 0, 135, + 92, 96, 91, 124, 150, 151, 90, 171, 83, 162, + 82, 84, 161, 123, 148, 154, 117, 114, 81, 152, + 115, 113, 105, 94, 99, 129, 112, 130, 100, 120, + 119, 121, 0, 0, 0, 143, 159, 172, 0, 0, + 165, 166, 167, 168, 0, 0, 0, 122, 85, 101, + 140, 104, 111, 134, 170, 126, 138, 88, 158, 141, + 0, 0, 0, 0, 0, 0, 0, 0, 301, 0, + 0, 0, 0, 0, 0, 125, 0, 77, 0, 108, + 169, 133, 95, 160, 93, 0, 0, 0, 0, 107, + 0, 109, 0, 0, 142, 118, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 185, 0, 0, 0, 0, 0, 0, + 0, 0, 86, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 187, 0, + 0, 0, 0, 131, 0, 0, 145, 98, 97, 106, + 0, 0, 0, 89, 0, 137, 127, 157, 0, 128, + 136, 110, 149, 132, 156, 188, 164, 147, 163, 78, + 146, 155, 87, 139, 80, 153, 144, 116, 102, 103, + 79, 0, 135, 92, 96, 91, 124, 150, 151, 90, + 171, 83, 162, 82, 84, 161, 123, 148, 154, 117, + 114, 81, 152, 115, 113, 105, 94, 99, 129, 112, + 130, 100, 120, 119, 121, 0, 0, 0, 143, 159, + 172, 0, 0, 165, 166, 167, 168, 0, 0, 0, + 122, 85, 101, 140, 104, 111, 134, 170, 126, 138, + 88, 158, 141, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 125, 0, + 77, 0, 108, 169, 133, 95, 160, 93, 0, 0, + 0, 0, 107, 0, 109, 0, 0, 142, 118, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 185, 0, 0, 0, + 0, 0, 0, 0, 0, 86, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 182, + 0, 187, 0, 0, 0, 0, 131, 0, 0, 145, + 98, 97, 106, 0, 0, 0, 89, 0, 137, 127, + 157, 0, 128, 136, 110, 149, 132, 156, 188, 164, + 147, 163, 78, 146, 155, 87, 139, 80, 153, 144, + 116, 102, 103, 79, 0, 135, 92, 96, 91, 124, + 150, 151, 90, 171, 83, 162, 82, 84, 161, 123, + 148, 154, 117, 114, 81, 152, 115, 113, 105, 94, + 99, 129, 112, 130, 100, 120, 119, 121, 0, 0, + 0, 143, 159, 172, 0, 0, 165, 166, 167, 168, + 0, 0, 0, 122, 85, 101, 140, 104, 111, 134, + 170, 126, 138, 88, 158, 141, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 125, 0, 77, 0, 108, 169, 133, 95, 160, + 93, 0, 0, 0, 0, 107, 0, 109, 0, 0, + 142, 118, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 75, + 0, 0, 0, 0, 0, 0, 0, 0, 86, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 187, 0, 0, 0, 0, 131, + 0, 0, 145, 98, 97, 106, 0, 0, 0, 89, + 0, 137, 127, 157, 0, 128, 136, 110, 149, 132, + 156, 188, 164, 147, 163, 78, 146, 155, 87, 139, + 80, 153, 144, 116, 102, 103, 79, 0, 135, 92, + 96, 91, 124, 150, 151, 90, 171, 83, 162, 82, + 84, 161, 123, 148, 154, 117, 114, 81, 152, 115, + 113, 105, 94, 99, 129, 112, 130, 100, 120, 119, + 121, 0, 0, 0, 143, 159, 172, 0, 0, 165, + 166, 167, 168, 0, 0, 0, 122, 85, 101, 140, + 104, 111, 134, 170, 126, 138, 88, 158, 141, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 125, 0, 77, 0, 108, 169, + 133, 95, 160, 93, 0, 0, 0, 0, 107, 0, + 109, 0, 0, 142, 118, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 238, 0, 0, 0, 0, 0, 0, 0, + 0, 86, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 187, 0, 0, + 0, 0, 131, 0, 0, 145, 98, 97, 106, 0, + 0, 0, 89, 0, 137, 127, 157, 0, 128, 136, + 110, 149, 132, 156, 188, 164, 147, 163, 78, 146, + 155, 87, 139, 80, 153, 144, 116, 102, 103, 79, + 0, 135, 92, 96, 91, 124, 150, 151, 90, 171, + 83, 162, 82, 84, 161, 123, 148, 154, 117, 114, + 81, 152, 115, 113, 105, 94, 99, 129, 112, 130, + 100, 120, 119, 121, 0, 0, 0, 143, 159, 172, + 0, 0, 165, 166, 167, 168, 0, 0, 0, 122, + 85, 101, 140, 104, 111, 134, 170, 126, 138, 88, + 158, 141, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 125, 0, 77, + 0, 108, 169, 133, 95, 160, 93, 0, 0, 0, + 0, 107, 0, 109, 0, 0, 142, 118, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 185, 0, 0, 0, 0, + 0, 0, 0, 0, 86, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 187, 0, 0, 0, 0, 131, 0, 0, 145, 98, + 97, 106, 0, 0, 0, 89, 0, 137, 127, 157, + 0, 128, 136, 110, 149, 132, 156, 188, 164, 147, + 163, 78, 146, 155, 87, 139, 80, 153, 144, 116, + 102, 103, 79, 0, 135, 92, 96, 91, 124, 150, + 151, 90, 171, 83, 162, 82, 84, 161, 123, 148, + 154, 117, 114, 81, 152, 115, 113, 105, 94, 99, + 129, 112, 130, 100, 120, 119, 121, 0, 0, 0, + 143, 159, 172, 0, 0, 165, 166, 167, 168, 0, + 0, 0, 122, 85, 101, 140, 104, 111, 134, 170, + 126, 138, 88, 158, 141, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 77, 0, 108, 169, 133, 95, 160, +} +var yyPact = [...]int{ + + 1723, -1000, -186, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, 802, 836, -1000, -1000, -1000, -1000, -1000, -1000, 618, + 7335, 41, 62, -18, 10030, 61, 1367, 10699, -1000, -6, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 593, -1000, -1000, + -1000, -1000, -1000, 776, 799, 647, 803, 702, -1000, 5512, + 40, 8914, 9807, 4816, -1000, 484, 59, 10699, -158, 10253, + 34, 34, 34, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 51, 10699, -1000, 10699, 33, 481, 33, + 33, 33, 10699, -1000, 101, -1000, -1000, -1000, -1000, 10699, + 478, 733, 22, 2856, 2856, 2856, 2856, 1, 2856, -83, + 655, -1000, -1000, -1000, -1000, 2856, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 364, 739, 6211, 6211, + 802, -1000, 593, -1000, -1000, -1000, 729, -1000, -1000, 243, + 822, -1000, 7112, 99, -1000, 6211, 1819, 429, -1000, -1000, + 429, -1000, -1000, 81, -1000, -1000, 6657, 6657, 6657, 6657, + 6657, 6657, 6657, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, 429, -1000, 5979, + 429, 429, 429, 429, 429, 429, 429, 429, 6211, 429, + 429, 429, 429, 429, 429, 429, 429, 429, 429, 429, + 429, 429, 9584, 587, 874, -1000, -1000, -1000, 767, 8013, + 8691, 10699, 531, -1000, 589, 4571, -121, -1000, -1000, -1000, + 173, 8459, -1000, -1000, -1000, 731, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 477, + -1000, 1856, 472, 2856, 50, 606, 470, 223, 453, 10699, + 10699, 2856, 44, 10699, 757, 653, 10699, 452, 440, -1000, + 4326, -1000, 2856, 2856, 2856, 2856, 2856, 2856, 2856, 2856, + -1000, -1000, -1000, -1000, -1000, -1000, 2856, 2856, -1000, -67, + -1000, 10699, -1000, -1000, -1000, -1000, 831, 130, 525, 95, + 591, -1000, 278, 776, 364, 702, 8236, 665, -1000, -1000, + 10699, -1000, 6211, 6211, 286, -1000, 9360, -1000, -1000, 3346, + 147, 6657, 284, 237, 6657, 6657, 6657, 6657, 6657, 6657, + 6657, 6657, 6657, 6657, 6657, 6657, 6657, 6657, 6657, 327, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 438, -1000, + 593, 725, 725, 113, 113, 113, 113, 113, 113, 6880, + 5048, 364, 437, 180, 5979, 5512, 5512, 6211, 6211, 10476, + 10476, 5512, 769, 203, 180, 10476, -1000, 364, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 5512, 5512, 5512, 5512, 14, + 10699, -1000, 10476, 8914, 8914, 8914, 8914, 8914, -1000, 679, + 678, -1000, 688, 685, 694, 10699, -1000, 433, 8013, 125, + 429, -1000, 9137, -1000, -1000, 14, 568, 8914, 10699, -1000, + -1000, 4081, 589, -121, 581, -1000, -98, -97, 5744, 110, + -1000, -1000, -1000, -1000, 2611, 188, 273, -66, -1000, -1000, + -1000, 595, -1000, 595, 595, 595, 595, -33, -33, -33, + -33, -1000, -1000, -1000, -1000, -1000, 617, 614, -1000, 595, + 595, 595, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 613, 613, + 613, 597, 597, 622, -1000, 10699, -174, 430, 2856, 756, + 2856, -1000, 70, -1000, 10699, -1000, -1000, 10699, 2856, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, 208, -1000, -1000, -1000, -1000, 695, + 6211, 6211, 3836, 6211, -1000, -1000, -1000, 739, -1000, 769, + 810, -1000, 722, 720, 5512, -1000, -1000, 147, 156, -1000, + -1000, 328, -1000, -1000, -1000, -1000, 94, 429, -1000, 1907, + -1000, -1000, -1000, -1000, 284, 6657, 6657, 6657, 1409, 1907, + 1872, 405, 357, 113, 612, 612, 112, 112, 112, 112, + 112, 910, 910, -1000, -1000, -1000, 364, -1000, -1000, -1000, + 364, 5512, 582, -1000, -1000, 6211, -1000, 364, 426, 426, + 289, 334, 626, -1000, 91, 619, 426, 5512, 218, -1000, + 6211, 364, -1000, 426, 364, 426, 426, 544, 429, -1000, + 564, -1000, 172, 874, 605, 652, 650, -1000, -1000, -1000, + -1000, 675, -1000, 636, -1000, -1000, -1000, -1000, -1000, 58, + 57, 56, 10253, -1000, 818, 8914, 521, -1000, -1000, 581, + -121, -100, -1000, -1000, -1000, 180, -1000, 428, 530, 2366, + -1000, -1000, -1000, -1000, -1000, -1000, 601, 749, 145, 137, + 427, -1000, -1000, 736, -1000, 238, -69, -1000, -1000, 292, + -33, -33, -1000, -1000, 110, 730, 110, 110, 110, 343, + 343, -1000, -1000, -1000, -1000, 280, -1000, -1000, -1000, 277, + -1000, 637, 10253, 2856, -1000, 3591, -1000, -1000, -1000, -1000, + -1000, -1000, 282, 204, 179, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 13, -1000, 2856, -1000, + 209, 10699, 10699, 709, 180, 180, 79, -1000, -1000, 10699, + -1000, -1000, -1000, -1000, 610, -1000, -1000, -1000, 3101, 5512, + -1000, 1409, 1907, 1606, -1000, 6657, 6657, -1000, -1000, 426, + 5512, 180, -1000, -1000, -1000, 54, 327, 54, 6657, 6657, + 3836, 6657, 6657, -168, 532, 187, -1000, 6211, 307, -1000, + -1000, -1000, -1000, -1000, 635, 10476, 429, -1000, 7790, 10253, + 802, 10476, 6211, 6211, -1000, -1000, 6211, 599, -1000, 6211, + -1000, -1000, -1000, 429, 429, 429, 389, -1000, 802, 521, + -1000, -1000, -1000, -130, -132, -1000, -1000, 2611, -1000, 2611, + 10253, -1000, 367, 350, -1000, -1000, 625, 28, -1000, -1000, + -1000, 423, 110, 110, -1000, 155, -1000, -1000, -1000, 420, + -1000, 416, 526, 410, 10699, -1000, -1000, 516, -1000, 165, + -1000, -1000, 10253, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, 10253, 10699, -1000, -1000, -1000, + -1000, -1000, 10253, -1000, -1000, 341, 6211, -1000, -1000, -1000, + 3591, -1000, 818, 8914, -1000, -1000, 364, -1000, 6657, 1907, + 1907, -1000, -1000, 364, 595, 595, -1000, 595, 597, -1000, + 595, -15, 595, -16, 364, 364, 1524, 1728, -1000, 422, + 1553, 429, -165, -1000, 180, 6211, -1000, 732, 490, 498, + -1000, -1000, 5280, 364, 391, 77, 389, 776, -1000, 180, + 180, 180, 10253, 180, 10253, 10253, 10253, 7567, 10253, 776, + -1000, -1000, -1000, -1000, 2366, -1000, 387, -1000, 595, -1000, + -1000, -60, 828, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -33, 340, -33, 259, -1000, 257, + 2856, 3591, 2611, -1000, 594, -1000, -1000, -1000, -1000, 741, + -1000, 180, 815, 513, -1000, 1907, -1000, -1000, 86, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 6657, 6657, + -1000, 6657, 6657, 6657, 364, 322, 180, 748, -1000, 429, + -1000, -1000, 560, 10253, 10253, -1000, -1000, 377, -1000, 371, + 371, 371, 125, -1000, -1000, 107, 10253, -1000, 133, -1000, + -147, 110, -1000, 110, 417, 406, -1000, -1000, -1000, 10253, + 429, 805, 779, -1000, -1000, 1509, 1509, 1509, 1509, 16, + -1000, -1000, 826, -1000, 429, -1000, 593, 73, -1000, 10253, + -1000, -1000, -1000, -1000, -1000, 107, -1000, 347, 160, 317, + -1000, 239, 747, -1000, 738, -1000, -1000, -1000, -1000, -1000, + 363, 12, -1000, 6211, 6211, -1000, -1000, -1000, -1000, 364, + 49, -178, 10476, 498, 364, 10253, -1000, -1000, -1000, 256, + -1000, -1000, -1000, 316, -1000, -1000, 606, 361, -1000, 10253, + 180, 494, -1000, 706, -171, -181, 435, -1000, -1000, -1000, + -1000, -174, -1000, 12, 715, -1000, 705, -1000, -1000, -1000, + 9, -175, 4, -179, 429, -182, 6434, -1000, 1509, 364, + -1000, -1000, +} +var yyPgo = [...]int{ + + 0, 1069, 13, 493, 1068, 1067, 1063, 1059, 1058, 1056, + 1055, 1053, 1052, 1047, 1045, 1044, 1042, 1040, 1039, 1037, + 1036, 1035, 1033, 1032, 104, 1031, 1027, 1026, 57, 1025, + 63, 1024, 1022, 33, 225, 45, 30, 150, 1018, 31, + 54, 84, 1017, 35, 1016, 1014, 70, 1013, 51, 1000, + 999, 1293, 998, 995, 19, 41, 993, 991, 990, 989, + 60, 312, 988, 987, 985, 982, 981, 979, 39, 5, + 7, 12, 11, 978, 127, 15, 976, 55, 975, 974, + 973, 971, 25, 970, 46, 965, 21, 40, 964, 16, + 53, 28, 23, 2, 65, 47, 962, 27, 50, 38, + 961, 958, 367, 948, 944, 942, 940, 936, 934, 140, + 309, 932, 931, 930, 928, 32, 167, 466, 73, 58, + 927, 926, 925, 1239, 59, 52, 22, 924, 36, 1247, + 42, 912, 910, 29, 909, 908, 897, 895, 894, 893, + 892, 264, 890, 889, 887, 17, 20, 882, 881, 49, + 26, 878, 877, 876, 34, 37, 874, 44, 873, 872, + 867, 866, 24, 10, 865, 9, 862, 8, 860, 856, + 4, 853, 18, 852, 3, 851, 6, 43, 850, 847, + 0, 323, 844, 843, 105, +} +var yyR1 = [...]int{ + + 0, 178, 179, 179, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 2, 6, 3, 4, 4, 5, + 5, 7, 7, 27, 27, 8, 9, 9, 9, 182, + 182, 46, 46, 90, 90, 10, 10, 10, 10, 95, + 95, 99, 99, 99, 100, 100, 100, 100, 131, 131, + 11, 11, 11, 11, 11, 11, 11, 176, 176, 175, + 174, 174, 173, 173, 172, 16, 159, 160, 160, 160, + 155, 134, 134, 134, 134, 137, 137, 135, 135, 135, + 135, 135, 135, 135, 136, 136, 136, 136, 136, 138, + 138, 138, 138, 138, 139, 139, 139, 139, 139, 139, + 139, 139, 139, 139, 139, 139, 139, 139, 139, 140, + 140, 140, 140, 140, 140, 140, 140, 154, 154, 141, + 141, 149, 149, 150, 150, 150, 147, 147, 148, 148, + 151, 151, 151, 142, 142, 142, 142, 142, 142, 142, + 144, 144, 152, 152, 145, 145, 145, 146, 146, 153, + 153, 153, 153, 153, 143, 143, 156, 156, 168, 168, + 167, 167, 167, 158, 158, 164, 164, 164, 164, 164, + 157, 157, 166, 166, 165, 161, 161, 161, 162, 162, + 162, 163, 163, 163, 12, 12, 12, 12, 12, 12, + 12, 12, 12, 177, 177, 177, 177, 177, 177, 177, + 177, 177, 177, 177, 171, 169, 169, 170, 170, 13, + 14, 14, 14, 14, 14, 15, 15, 17, 18, 18, + 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, + 18, 18, 18, 18, 18, 18, 18, 18, 18, 18, + 18, 107, 107, 104, 104, 105, 105, 106, 106, 106, + 108, 108, 108, 132, 132, 132, 19, 19, 21, 21, + 22, 23, 20, 20, 20, 20, 20, 183, 24, 25, + 25, 26, 26, 26, 30, 30, 30, 28, 28, 29, + 29, 35, 35, 34, 34, 36, 36, 36, 36, 120, + 120, 120, 119, 119, 38, 38, 39, 39, 40, 40, + 41, 41, 41, 53, 53, 89, 89, 91, 91, 42, + 42, 42, 42, 43, 43, 44, 44, 45, 45, 127, + 127, 126, 126, 126, 125, 125, 47, 47, 47, 49, + 48, 48, 48, 48, 50, 50, 52, 52, 51, 51, + 54, 54, 54, 54, 55, 55, 37, 37, 37, 37, + 37, 37, 37, 103, 103, 57, 57, 56, 56, 56, + 56, 56, 56, 56, 56, 56, 56, 67, 67, 67, + 67, 67, 67, 58, 58, 58, 58, 58, 58, 58, + 33, 33, 68, 68, 68, 74, 69, 69, 61, 61, + 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, + 61, 61, 61, 61, 61, 61, 61, 61, 61, 61, + 61, 61, 61, 61, 61, 61, 61, 61, 61, 65, + 65, 65, 63, 63, 63, 63, 63, 63, 63, 63, + 63, 63, 63, 63, 63, 63, 63, 64, 64, 64, + 64, 64, 64, 64, 64, 184, 184, 66, 66, 66, + 66, 31, 31, 31, 31, 31, 130, 130, 133, 133, + 133, 133, 133, 133, 133, 133, 133, 133, 133, 133, + 133, 78, 78, 32, 32, 76, 76, 77, 79, 79, + 75, 75, 75, 60, 60, 60, 60, 60, 60, 60, + 60, 62, 62, 62, 80, 80, 81, 81, 82, 82, + 83, 83, 84, 85, 85, 85, 86, 86, 86, 86, + 87, 87, 87, 59, 59, 59, 59, 59, 59, 88, + 88, 88, 88, 92, 92, 70, 70, 72, 72, 71, + 73, 93, 93, 97, 94, 94, 98, 98, 98, 96, + 96, 96, 122, 122, 122, 101, 101, 109, 109, 110, + 110, 102, 102, 111, 111, 111, 111, 111, 111, 111, + 111, 111, 111, 112, 112, 112, 113, 113, 114, 114, + 114, 121, 121, 117, 117, 118, 118, 123, 123, 124, + 124, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 115, 115, 115, 115, 115, 115, 115, + 115, 115, 115, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 116, 116, 116, 116, 116, 116, 116, 116, 116, + 116, 180, 181, 128, 129, 129, 129, +} +var yyR2 = [...]int{ + + 0, 2, 0, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 4, 6, 7, 5, 10, 1, 3, 1, + 3, 7, 8, 1, 1, 8, 8, 7, 6, 1, + 1, 1, 3, 0, 4, 3, 4, 5, 4, 1, + 3, 3, 2, 2, 2, 2, 2, 1, 1, 1, + 2, 8, 4, 6, 5, 5, 5, 0, 2, 1, + 0, 2, 1, 3, 3, 4, 4, 1, 3, 3, + 8, 3, 1, 1, 1, 2, 1, 1, 1, 1, + 1, 1, 1, 1, 2, 2, 2, 2, 2, 1, + 2, 2, 2, 1, 4, 4, 2, 2, 3, 3, + 3, 3, 1, 1, 1, 1, 1, 6, 6, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 3, 0, + 3, 0, 5, 0, 3, 5, 0, 1, 0, 1, + 0, 1, 2, 0, 2, 2, 2, 2, 2, 2, + 0, 3, 0, 1, 0, 3, 3, 0, 2, 0, + 2, 1, 2, 1, 0, 2, 5, 4, 1, 2, + 2, 3, 2, 0, 1, 2, 3, 3, 2, 2, + 1, 1, 1, 3, 2, 0, 1, 3, 1, 2, + 3, 1, 1, 1, 6, 7, 7, 12, 7, 7, + 7, 4, 5, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 7, 1, 3, 8, 8, 5, + 4, 6, 5, 4, 4, 3, 2, 3, 4, 4, + 4, 4, 4, 4, 4, 4, 3, 3, 3, 3, + 4, 3, 6, 4, 2, 4, 2, 2, 2, 2, + 3, 1, 1, 0, 1, 0, 1, 0, 2, 2, + 0, 2, 2, 0, 1, 1, 2, 1, 1, 2, + 1, 1, 2, 2, 2, 2, 2, 0, 2, 0, + 2, 1, 2, 2, 0, 1, 1, 0, 1, 0, + 1, 0, 1, 1, 3, 1, 2, 3, 5, 0, + 1, 2, 1, 1, 0, 2, 1, 3, 1, 1, + 1, 3, 3, 3, 7, 1, 3, 1, 3, 4, + 4, 4, 3, 2, 4, 0, 1, 0, 2, 0, + 1, 0, 1, 2, 1, 1, 1, 2, 2, 1, + 2, 3, 2, 3, 2, 2, 2, 1, 1, 3, + 0, 5, 5, 5, 0, 2, 1, 3, 3, 2, + 3, 1, 2, 0, 3, 1, 1, 3, 3, 4, + 4, 5, 3, 4, 5, 6, 2, 1, 2, 1, + 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, + 0, 2, 1, 1, 1, 3, 1, 3, 1, 1, + 1, 1, 1, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, + 2, 2, 2, 2, 3, 1, 1, 1, 1, 4, + 5, 6, 4, 4, 6, 6, 6, 6, 8, 8, + 6, 8, 8, 9, 7, 5, 4, 2, 2, 2, + 2, 2, 2, 2, 2, 0, 2, 4, 4, 4, + 4, 0, 3, 4, 7, 3, 1, 1, 2, 3, + 3, 1, 2, 2, 1, 2, 1, 2, 2, 1, + 2, 0, 1, 0, 2, 1, 2, 4, 0, 2, + 1, 3, 5, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 2, 2, 0, 3, 0, 2, 0, 3, + 1, 3, 2, 0, 1, 1, 0, 2, 4, 4, + 0, 2, 4, 2, 1, 3, 5, 4, 6, 1, + 3, 3, 5, 0, 5, 1, 3, 1, 2, 3, + 1, 1, 3, 3, 1, 3, 3, 3, 3, 1, + 2, 1, 1, 1, 1, 1, 1, 0, 2, 0, + 3, 0, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, + 1, 0, 2, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 0, 0, 1, 1, +} +var yyChk = [...]int{ + + -1000, -178, -1, -2, -6, -7, -8, -9, -10, -11, + -12, -13, -14, -15, -17, -18, -19, -21, -22, -23, + -20, -3, -4, 6, 7, -27, 9, 10, 30, -16, + 112, 113, 115, 114, 140, 116, 133, 49, 152, 153, + 155, 156, 25, 134, 135, 138, 139, -180, 8, 235, + 53, -179, 250, -82, 15, -26, 5, -24, -183, -24, + -24, -24, -24, -24, -159, 53, -114, 121, 70, 148, + 227, 118, 119, 125, -117, 56, -116, 243, 152, 163, + 157, 184, 176, 174, 177, 214, 65, 155, 223, 136, + 172, 168, 166, 27, 189, 248, 167, 131, 130, 190, + 194, 215, 161, 162, 217, 188, 132, 32, 245, 34, + 144, 218, 192, 187, 183, 186, 160, 182, 38, 196, + 195, 197, 213, 179, 169, 18, 221, 139, 142, 191, + 193, 126, 146, 247, 219, 165, 143, 138, 222, 156, + 216, 225, 37, 201, 159, 129, 153, 150, 180, 145, + 170, 171, 185, 158, 181, 154, 147, 140, 224, 202, + 249, 178, 175, 151, 149, 206, 207, 208, 209, 246, + 220, 173, 203, -102, 121, 123, 119, 119, 120, 121, + 227, 118, 119, -51, -123, 56, -116, 121, 148, 119, + 106, 177, 112, 204, 120, 32, 146, -132, 119, -104, + 149, 206, 207, 208, 209, 56, 216, 215, 210, -123, + 154, -128, -128, -128, -128, -128, -2, -86, 17, 16, + -5, -3, -180, 6, 20, 21, -30, 39, 40, -25, + -36, 97, -37, -123, -56, 72, -61, 29, 56, -116, + 23, -60, -57, -75, -73, -74, 106, 107, 95, 96, + 103, 73, 108, -65, -63, -64, -66, 58, 57, 66, + 59, 60, 61, 62, 67, 68, 69, -117, -71, -180, + 43, 44, 236, 237, 238, 239, 242, 240, 75, 33, + 226, 234, 233, 232, 230, 231, 228, 229, 124, 227, + 101, 235, -102, -39, -40, -41, -42, -53, -74, -180, + -51, 11, -46, -51, -94, -131, 154, -98, 216, 215, + -118, -96, -117, -115, 214, 177, 213, 117, 71, 22, + 24, 199, 74, 106, 16, 75, 105, 236, 112, 47, + 228, 229, 226, 238, 239, 227, 204, 29, 10, 25, + 134, 21, 99, 114, 78, 79, 137, 23, 135, 69, + 19, 50, 11, 13, 14, 124, 123, 90, 120, 45, + 8, 108, 26, 87, 41, 28, 43, 88, 17, 230, + 231, 31, 242, 141, 101, 48, 35, 72, 67, 51, + 70, 15, 46, 89, 115, 235, 44, 118, 6, 241, + 30, 133, 42, 119, 205, 77, 122, 68, 5, 125, + 9, 49, 52, 232, 233, 234, 33, 76, 12, -160, + -155, 56, 120, -51, 235, -117, -110, 124, -110, -110, + 119, -51, -51, -109, 124, 56, -109, -109, -109, -51, + 109, -51, 56, 30, 227, 56, 146, 119, 147, 121, + -129, -180, -118, -129, -129, -129, 150, 151, -129, -105, + 211, 51, -129, -181, 55, -87, 19, 31, -37, -123, + -83, -84, -37, -82, -2, -24, 35, -28, 21, 64, + 11, -120, 71, 70, 87, -119, 22, -117, 58, 109, + -37, -58, 90, 72, 88, 89, 74, 92, 91, 102, + 95, 96, 97, 98, 99, 100, 101, 93, 94, 105, + 80, 81, 82, 83, 84, 85, 86, -103, -180, -74, + -180, 110, 111, -61, -61, -61, -61, -61, -61, -61, + -180, -2, -69, -37, -180, -180, -180, -180, -180, -180, + -180, -180, -180, -78, -37, -180, -184, -180, -184, -184, + -184, -184, -184, -184, -184, -180, -180, -180, -180, -52, + 26, -51, 30, 54, -47, -49, -48, -50, 41, 45, + 47, 42, 43, 44, 48, -127, 22, -39, -180, -126, + 142, -125, 22, -123, 58, -51, -46, -182, 54, 11, + 52, 54, -94, 154, -95, -99, 217, 219, 80, -122, + -117, 58, 29, 30, 55, 54, -134, -137, -139, -138, + -140, -135, -136, 174, 175, 106, 178, 180, 181, 182, + 183, 184, 185, 186, 187, 188, 189, 30, 136, 170, + 171, 172, 173, 190, 191, 192, 193, 194, 195, 196, + 197, 157, 158, 159, 160, 161, 162, 163, 165, 166, + 167, 168, 169, 56, -129, 121, -176, 52, 56, 72, + 56, -51, -51, -129, 122, -51, 23, 51, -51, 56, + 56, -124, -123, -115, -129, -129, -129, -129, -129, -129, + -129, -129, -129, -129, -107, 205, 212, -51, 9, 90, + 54, 18, 109, 54, -85, 24, 25, -86, -181, -30, + -62, -117, 59, 62, -29, 42, -51, -37, -37, -67, + 67, 72, 68, 69, -119, 97, -124, -118, -115, -61, + -68, -71, -74, 63, 90, 88, 89, 74, -61, -61, + -61, -61, -61, -61, -61, -61, -61, -61, -61, -61, + -61, -61, -61, -130, 56, 58, 56, -60, -60, -117, + -35, 21, -34, -36, -181, 54, -181, -2, -34, -34, + -37, -37, -75, -117, -123, -75, -34, -28, -76, -77, + 76, -75, -181, -34, -35, -34, -34, -90, 142, -51, + -93, -97, -75, -40, -41, -41, -40, -41, 41, 41, + 41, 46, 41, 46, 41, -48, -123, -181, -54, 49, + 123, 50, -180, -125, -90, 52, -39, -51, -98, -95, + 54, 218, 220, 221, 51, -37, -146, 105, -161, -162, + -163, -118, 58, 59, -155, -156, -164, 126, 129, 125, + -157, 120, 28, -151, 67, 72, -147, 202, -141, 53, + -141, -141, -141, -141, -145, 177, -145, -145, -145, 53, + 53, -141, -141, -141, -149, 53, -149, -149, -150, 53, + -150, -121, 52, -51, -174, 246, -175, 56, -129, 23, + -129, -111, 117, 114, 115, -171, 113, 199, 177, 65, + 29, 15, 236, 142, 249, 56, 143, -51, -51, -129, + -106, 11, 90, 37, -37, -37, -124, -84, -87, -101, + 19, 11, 33, 33, -34, 67, 68, 69, 109, -180, + -68, -61, -61, -61, -33, 137, 71, -181, -181, -34, + 54, -37, -181, -181, -181, 54, 52, 22, 54, 11, + 109, 54, 11, -181, -34, -79, -77, 78, -37, -181, + -181, -181, -181, -181, -59, 30, 33, -2, -180, -180, + -55, 54, 12, 80, -44, -43, 51, 52, -45, 51, + -43, 41, 41, 120, 120, 120, -91, -117, -55, -39, + -55, -99, -100, 222, 219, 225, 56, 54, -163, 80, + 53, 28, -157, -157, 56, 56, -142, 29, 67, -148, + 203, 59, -145, -145, -146, 30, -146, -146, -146, -154, + 58, -154, 59, 59, 51, -117, -129, -173, -172, -118, + -128, -177, 148, 127, 128, 131, 130, 56, 120, 28, + 126, 129, 142, 125, -177, 148, -112, -113, 122, 22, + 120, 28, 142, -129, -108, 88, 12, -123, -123, 38, + 109, -51, -38, 11, 97, -118, -35, -33, 71, -61, + -61, -181, -36, -133, 106, 174, 136, 172, 168, 188, + 179, 201, 170, 202, -130, -133, -61, -61, -118, -61, + -61, 243, -82, 79, -37, 77, -92, 51, -93, -70, + -72, -71, -180, -2, -88, -117, -91, -82, -97, -37, + -37, -37, 53, -37, -180, -180, -180, -181, 54, -82, + -55, 219, 223, 224, -162, -163, -166, -165, -117, 56, + 56, -144, 51, 58, 59, 60, 67, 226, 66, 55, + -146, -146, 56, 106, 55, 54, 55, 54, 55, 54, + -51, 54, 80, -128, -117, -128, -117, -51, -128, -117, + 58, -37, -55, -39, -181, -61, -181, -141, -141, -141, + -150, -141, 162, -141, 162, -181, -181, -181, 54, 19, + -181, 54, 19, -180, -32, 241, -37, 27, -92, 54, + -181, -181, -181, 54, 109, -181, -86, -89, -117, -89, + -89, -89, -126, -117, -86, 55, 54, -141, -152, 199, + 9, -145, 58, -145, 59, 59, -129, -172, -163, 53, + 26, -80, 13, -145, 56, -61, -61, -61, -61, -61, + -181, 58, 28, -72, 33, -2, -180, -117, -117, 54, + 55, -181, -181, -181, -54, -168, -167, 52, 132, 65, + -165, -153, 126, 28, 125, 226, -146, -146, 55, 55, + -89, -180, -81, 14, 16, -181, -181, -181, -181, -31, + 90, 246, 9, -70, -2, 109, -117, -167, 56, -158, + 80, 58, -143, 65, 28, 28, 55, -169, -170, 142, + -37, -69, -181, 244, 48, 247, -93, -181, -117, 59, + 58, -176, -181, 54, -117, 38, 245, 248, -174, -170, + 33, 38, 144, 246, 145, 247, -180, 248, -61, 141, + -181, -181, +} +var yyDef = [...]int{ + + 0, -2, 2, -2, 5, 6, 7, 8, 9, 10, + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 508, 0, 277, 277, 277, 277, 277, 277, 0, + 578, 561, 0, 0, 0, 0, -2, 267, 268, 0, + 270, 271, 783, 783, 783, 783, 783, 0, 33, 34, + 781, 1, 3, 516, 0, 0, 281, 284, 279, 0, + 561, 0, 0, 0, 60, 0, 0, 770, 0, 771, + 559, 559, 559, 579, 580, 583, 584, 683, 684, 685, + 686, 687, 688, 689, 690, 691, 692, 693, 694, 695, + 696, 697, 698, 699, 700, 701, 702, 703, 704, 705, + 706, 707, 708, 709, 710, 711, 712, 713, 714, 715, + 716, 717, 718, 719, 720, 721, 722, 723, 724, 725, + 726, 727, 728, 729, 730, 731, 732, 733, 734, 735, + 736, 737, 738, 739, 740, 741, 742, 743, 744, 745, + 746, 747, 748, 749, 750, 751, 752, 753, 754, 755, + 756, 757, 758, 759, 760, 761, 762, 763, 764, 765, + 766, 767, 768, 769, 772, 773, 774, 775, 776, 777, + 778, 779, 780, 0, 0, 562, 0, 557, 0, 557, + 557, 557, 0, 226, 348, 587, 588, 770, 771, 0, + 0, 0, 0, 784, 784, 784, 784, 0, 784, 255, + 244, 246, 247, 248, 249, 784, 264, 265, 254, 266, + 269, 272, 273, 274, 275, 276, 27, 520, 0, 0, + 508, 29, 0, 277, 282, 283, 287, 285, 286, 278, + 0, 295, 299, 0, 356, 0, 361, 363, -2, -2, + 0, 398, 399, 400, 401, 402, 0, 0, 0, 0, + 0, 0, 0, 425, 426, 427, 428, 493, 494, 495, + 496, 497, 498, 499, 500, 365, 366, 490, 540, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 481, 0, + 455, 455, 455, 455, 455, 455, 455, 455, 0, 0, + 0, 0, 0, 0, 306, 308, 309, 310, 329, 0, + 331, 0, 0, 41, 45, 0, 761, 544, -2, -2, + 0, 0, 585, 586, -2, 690, -2, 591, 592, 593, + 594, 595, 596, 597, 598, 599, 600, 601, 602, 603, + 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, + 614, 615, 616, 617, 618, 619, 620, 621, 622, 623, + 624, 625, 626, 627, 628, 629, 630, 631, 632, 633, + 634, 635, 636, 637, 638, 639, 640, 641, 642, 643, + 644, 645, 646, 647, 648, 649, 650, 651, 652, 653, + 654, 655, 656, 657, 658, 659, 660, 661, 662, 663, + 664, 665, 666, 667, 668, 669, 670, 671, 672, 673, + 674, 675, 676, 677, 678, 679, 680, 681, 682, 0, + 77, 0, 0, 784, 0, 67, 0, 0, 0, 0, + 0, 784, 0, 0, 0, 0, 0, 0, 0, 225, + 0, 227, 784, 784, 784, 784, 784, 784, 784, 784, + 236, 785, 786, 237, 238, 239, 784, 784, 241, 0, + 256, 0, 250, 28, 782, 22, 0, 0, 517, 0, + 509, 510, 513, 516, 27, 284, 0, 289, 288, 280, + 0, 296, 0, 0, 0, 300, 0, 302, 303, 0, + 359, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 383, 384, 385, 386, 387, 388, 389, 362, 0, 376, + 0, 0, 0, 418, 419, 420, 421, 422, 423, 0, + 291, 27, 0, 396, 0, 0, 0, 0, 0, 0, + 0, 0, 287, 0, 482, 0, 447, 0, 448, 449, + 450, 451, 452, 453, 454, 0, 291, 0, 0, 43, + 0, 347, 0, 0, 0, 0, 0, 0, 336, 0, + 0, 339, 0, 0, 0, 0, 330, 0, 0, 350, + 734, 332, 0, 334, 335, -2, 0, 0, 0, 39, + 40, 0, 46, 761, 48, 49, 0, 0, 0, 157, + 552, 553, 554, 550, 185, 0, 140, 136, 82, 83, + 84, 129, 86, 129, 129, 129, 129, 154, 154, 154, + 154, 112, 113, 114, 115, 116, 0, 0, 99, 129, + 129, 129, 103, 119, 120, 121, 122, 123, 124, 125, + 126, 87, 88, 89, 90, 91, 92, 93, 131, 131, + 131, 133, 133, 581, 62, 0, 70, 0, 784, 0, + 784, 75, 0, 201, 0, 220, 558, 0, 784, 223, + 224, 349, 589, 590, 228, 229, 230, 231, 232, 233, + 234, 235, 240, 243, 257, 251, 252, 245, 521, 0, + 0, 0, 0, 0, 512, 514, 515, 520, 30, 287, + 0, 501, 0, 0, 0, 290, 25, 357, 358, 360, + 377, 0, 379, 381, 301, 297, 0, 491, -2, 367, + 368, 392, 393, 394, 0, 0, 0, 0, 390, 372, + 0, 403, 404, 405, 406, 407, 408, 409, 410, 411, + 412, 413, 414, 417, 466, 467, 0, 415, 416, 424, + 0, 0, 292, 293, 395, 0, 539, 27, 0, 0, + 0, 0, 0, 490, 0, 0, 0, 0, 488, 485, + 0, 0, 456, 0, 0, 0, 0, 0, 0, 346, + 354, 541, 0, 307, 325, 327, 0, 322, 337, 338, + 340, 0, 342, 0, 344, 345, 311, 312, 313, 0, + 0, 0, 0, 333, 354, 0, 354, 42, 545, 47, + 0, 0, 52, 53, 546, 547, 548, 0, 76, 186, + 188, 191, 192, 193, 78, 79, 0, 0, 0, 0, + 0, 180, 181, 143, 141, 0, 138, 137, 85, 0, + 154, 154, 106, 107, 157, 0, 157, 157, 157, 0, + 0, 100, 101, 102, 94, 0, 95, 96, 97, 0, + 98, 0, 0, 784, 64, 0, 68, 69, 65, 560, + 66, 783, 0, 0, 573, 202, 563, 564, 565, 566, + 567, 568, 569, 570, 571, 572, 0, 219, 784, 222, + 260, 0, 0, 0, 518, 519, 0, 511, 23, 0, + 555, 556, 502, 503, 304, 378, 380, 382, 0, 291, + 369, 390, 373, 0, 370, 0, 0, 364, 429, 0, + 0, 397, -2, 432, 433, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 508, 0, 486, 0, 0, 446, + 457, 458, 459, 460, 533, 0, 0, -2, 0, 0, + 508, 0, 0, 0, 319, 326, 0, 0, 320, 0, + 321, 341, 343, 0, 0, 0, 0, 317, 508, 354, + 38, 50, 51, 0, 0, 57, 158, 0, 189, 0, + 0, 175, 0, 0, 178, 179, 150, 0, 142, 81, + 139, 0, 157, 157, 108, 0, 109, 110, 111, 0, + 127, 0, 0, 0, 0, 582, 63, 71, 72, 0, + 194, 783, 0, 203, 204, 205, 206, 207, 208, 209, + 210, 211, 212, 213, 783, 0, 0, 783, 574, 575, + 576, 577, 0, 221, 242, 0, 0, 258, 259, 522, + 0, 24, 354, 0, 298, 492, 0, 371, 0, 391, + 374, 430, 294, 0, 129, 129, 471, 129, 133, 474, + 129, 476, 129, 479, 0, 0, 0, 0, 491, 0, + 0, 0, 483, 445, 489, 0, 31, 0, 533, 523, + 535, 537, 0, 27, 0, 529, 0, 516, 542, 355, + 543, 323, 0, 328, 0, 0, 0, 331, 0, 516, + 37, 54, 55, 56, 187, 190, 0, 182, 129, 176, + 177, 152, 0, 144, 145, 146, 147, 148, 149, 130, + 104, 105, 155, 156, 154, 0, 154, 0, 134, 0, + 784, 0, 0, 195, 0, 196, 198, 199, 200, 0, + 261, 262, 504, 305, 431, 375, 434, 468, 154, 472, + 473, 475, 477, 478, 480, 436, 435, 437, 0, 0, + 440, 0, 0, 0, 0, 0, 487, 0, 32, 0, + 538, -2, 0, 0, 0, 44, 35, 0, 315, 0, + 0, 0, 350, 318, 36, 167, 0, 184, 159, 153, + 0, 157, 128, 157, 0, 0, 61, 73, 74, 0, + 0, 506, 0, 469, 470, 0, 0, 0, 0, 461, + 444, 484, 0, 536, 0, -2, 0, 531, 530, 0, + 324, 351, 352, 353, 314, 166, 168, 0, 173, 0, + 183, 164, 0, 161, 163, 151, 117, 118, 132, 135, + 0, 0, 26, 0, 0, 438, 439, 441, 442, 0, + 0, 0, 0, 526, 27, 0, 316, 169, 170, 0, + 174, 172, 80, 0, 160, 162, 67, 0, 215, 0, + 507, 505, 443, 0, 0, 0, 534, -2, 532, 171, + 165, 70, 214, 0, 0, 462, 0, 465, 197, 216, + 0, 463, 0, 0, 0, 0, 0, 464, 0, 0, + 217, 218, +} +var yyTok1 = [...]int{ + + 1, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 73, 3, 3, 3, 100, 92, 3, + 53, 55, 97, 95, 54, 96, 109, 98, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 250, + 81, 80, 82, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 102, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + 3, 3, 3, 3, 91, 3, 103, +} +var yyTok2 = [...]int{ + + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 56, 57, 58, 59, 60, 61, 62, 63, 64, + 65, 66, 67, 68, 69, 70, 71, 72, 74, 75, + 76, 77, 78, 79, 83, 84, 85, 86, 87, 88, + 89, 90, 93, 94, 99, 101, 104, 105, 106, 107, + 108, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, + 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, + 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, + 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, + 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, + 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, + 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, + 239, 240, 241, 242, 243, 244, 245, 246, 247, 248, + 249, +} +var yyTok3 = [...]int{ + 0, +} + +var yyErrorMessages = [...]struct { + state int + token int + msg string +}{} + +//line yaccpar:1 + +/* parser for yacc output */ + +var ( + yyDebug = 0 + yyErrorVerbose = false +) + +type yyLexer interface { + Lex(lval *yySymType) int + Error(s string) +} + +type yyParser interface { + Parse(yyLexer) int + Lookahead() int +} + +type yyParserImpl struct { + lval yySymType + stack [yyInitialStackSize]yySymType + char int +} + +func (p *yyParserImpl) Lookahead() int { + return p.char +} + +func yyNewParser() yyParser { + return &yyParserImpl{} +} + +const yyFlag = -1000 + +func yyTokname(c int) string { + if c >= 1 && c-1 < len(yyToknames) { + if yyToknames[c-1] != "" { + return yyToknames[c-1] + } + } + return __yyfmt__.Sprintf("tok-%v", c) +} + +func yyStatname(s int) string { + if s >= 0 && s < len(yyStatenames) { + if yyStatenames[s] != "" { + return yyStatenames[s] + } + } + return __yyfmt__.Sprintf("state-%v", s) +} + +func yyErrorMessage(state, lookAhead int) string { + const TOKSTART = 4 + + if !yyErrorVerbose { + return "syntax error" + } + + for _, e := range yyErrorMessages { + if e.state == state && e.token == lookAhead { + return "syntax error: " + e.msg + } + } + + res := "syntax error: unexpected " + yyTokname(lookAhead) + + // To match Bison, suggest at most four expected tokens. + expected := make([]int, 0, 4) + + // Look for shiftable tokens. + base := yyPact[state] + for tok := TOKSTART; tok-1 < len(yyToknames); tok++ { + if n := base + tok; n >= 0 && n < yyLast && yyChk[yyAct[n]] == tok { + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + } + + if yyDef[state] == -2 { + i := 0 + for yyExca[i] != -1 || yyExca[i+1] != state { + i += 2 + } + + // Look for tokens that we accept or reduce. + for i += 2; yyExca[i] >= 0; i += 2 { + tok := yyExca[i] + if tok < TOKSTART || yyExca[i+1] == 0 { + continue + } + if len(expected) == cap(expected) { + return res + } + expected = append(expected, tok) + } + + // If the default action is to accept or reduce, give up. + if yyExca[i+1] != 0 { + return res + } + } + + for i, tok := range expected { + if i == 0 { + res += ", expecting " + } else { + res += " or " + } + res += yyTokname(tok) + } + return res +} + +func yylex1(lex yyLexer, lval *yySymType) (char, token int) { + token = 0 + char = lex.Lex(lval) + if char <= 0 { + token = yyTok1[0] + goto out + } + if char < len(yyTok1) { + token = yyTok1[char] + goto out + } + if char >= yyPrivate { + if char < yyPrivate+len(yyTok2) { + token = yyTok2[char-yyPrivate] + goto out + } + } + for i := 0; i < len(yyTok3); i += 2 { + token = yyTok3[i+0] + if token == char { + token = yyTok3[i+1] + goto out + } + } + +out: + if token == 0 { + token = yyTok2[1] /* unknown char */ + } + if yyDebug >= 3 { + __yyfmt__.Printf("lex %s(%d)\n", yyTokname(token), uint(char)) + } + return char, token +} + +func yyParse(yylex yyLexer) int { + return yyNewParser().Parse(yylex) +} + +func (yyrcvr *yyParserImpl) Parse(yylex yyLexer) int { + var yyn int + var yyVAL yySymType + var yyDollar []yySymType + _ = yyDollar // silence set and not used + yyS := yyrcvr.stack[:] + + Nerrs := 0 /* number of errors */ + Errflag := 0 /* error recovery flag */ + yystate := 0 + yyrcvr.char = -1 + yytoken := -1 // yyrcvr.char translated into internal numbering + defer func() { + // Make sure we report no lookahead when not parsing. + yystate = -1 + yyrcvr.char = -1 + yytoken = -1 + }() + yyp := -1 + goto yystack + +ret0: + return 0 + +ret1: + return 1 + +yystack: + /* put a state and value onto the stack */ + if yyDebug >= 4 { + __yyfmt__.Printf("char %v in %v\n", yyTokname(yytoken), yyStatname(yystate)) + } + + yyp++ + if yyp >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyS[yyp] = yyVAL + yyS[yyp].yys = yystate + +yynewstate: + yyn = yyPact[yystate] + if yyn <= yyFlag { + goto yydefault /* simple state */ + } + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) + } + yyn += yytoken + if yyn < 0 || yyn >= yyLast { + goto yydefault + } + yyn = yyAct[yyn] + if yyChk[yyn] == yytoken { /* valid shift */ + yyrcvr.char = -1 + yytoken = -1 + yyVAL = yyrcvr.lval + yystate = yyn + if Errflag > 0 { + Errflag-- + } + goto yystack + } + +yydefault: + /* default state action */ + yyn = yyDef[yystate] + if yyn == -2 { + if yyrcvr.char < 0 { + yyrcvr.char, yytoken = yylex1(yylex, &yyrcvr.lval) + } + + /* look through exception table */ + xi := 0 + for { + if yyExca[xi+0] == -1 && yyExca[xi+1] == yystate { + break + } + xi += 2 + } + for xi += 2; ; xi += 2 { + yyn = yyExca[xi+0] + if yyn < 0 || yyn == yytoken { + break + } + } + yyn = yyExca[xi+1] + if yyn < 0 { + goto ret0 + } + } + if yyn == 0 { + /* error ... attempt to resume parsing */ + switch Errflag { + case 0: /* brand new error */ + yylex.Error(yyErrorMessage(yystate, yytoken)) + Nerrs++ + if yyDebug >= 1 { + __yyfmt__.Printf("%s", yyStatname(yystate)) + __yyfmt__.Printf(" saw %s\n", yyTokname(yytoken)) + } + fallthrough + + case 1, 2: /* incompletely recovered error ... try again */ + Errflag = 3 + + /* find a state where "error" is a legal shift action */ + for yyp >= 0 { + yyn = yyPact[yyS[yyp].yys] + yyErrCode + if yyn >= 0 && yyn < yyLast { + yystate = yyAct[yyn] /* simulate a shift of "error" */ + if yyChk[yystate] == yyErrCode { + goto yystack + } + } + + /* the current p has no shift on "error", pop stack */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery pops state %d\n", yyS[yyp].yys) + } + yyp-- + } + /* there is no state on the stack with an error shift ... abort */ + goto ret1 + + case 3: /* no shift yet; clobber input char */ + if yyDebug >= 2 { + __yyfmt__.Printf("error recovery discards %s\n", yyTokname(yytoken)) + } + if yytoken == yyEofCode { + goto ret1 + } + yyrcvr.char = -1 + yytoken = -1 + goto yynewstate /* try again in the same state */ + } + } + + /* reduction by production yyn */ + if yyDebug >= 2 { + __yyfmt__.Printf("reduce %v in:\n\t%v\n", yyn, yyStatname(yystate)) + } + + yynt := yyn + yypt := yyp + _ = yypt // guard against "declared and not used" + + yyp -= yyR2[yyn] + // yyp is now the index of $0. Perform the default action. Iff the + // reduced production is ε, $1 is possibly out of range. + if yyp+1 >= len(yyS) { + nyys := make([]yySymType, len(yyS)*2) + copy(nyys, yyS) + yyS = nyys + } + yyVAL = yyS[yyp+1] + + /* consult goto table to find next state */ + yyn = yyR1[yyn] + yyg := yyPgo[yyn] + yyj := yyg + yyS[yyp].yys + 1 + + if yyj >= yyLast { + yystate = yyAct[yyg] + } else { + yystate = yyAct[yyj] + if yyChk[yystate] != -yyn { + yystate = yyAct[yyg] + } + } + // dummy call; replaced with literal code + switch yynt { + + case 1: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:302 + { + setParseTree(yylex, yyDollar[1].statement) + } + case 2: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:307 + { + } + case 3: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:308 + { + } + case 4: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:312 + { + yyVAL.statement = yyDollar[1].selStmt + } + case 22: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:335 + { + sel := yyDollar[1].selStmt.(*Select) + sel.OrderBy = yyDollar[2].orderBy + sel.Limit = yyDollar[3].limit + sel.Lock = yyDollar[4].str + yyVAL.selStmt = sel + } + case 23: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:343 + { + yyVAL.selStmt = &Union{Type: yyDollar[2].str, Left: yyDollar[1].selStmt, Right: yyDollar[3].selStmt, OrderBy: yyDollar[4].orderBy, Limit: yyDollar[5].limit, Lock: yyDollar[6].str} + } + case 24: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:347 + { + yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, SelectExprs: SelectExprs{Nextval{Expr: yyDollar[5].expr}}, From: TableExprs{&AliasedTableExpr{Expr: yyDollar[7].tableName}}} + } + case 25: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:353 + { + yyVAL.statement = &Stream{Comments: Comments(yyDollar[2].bytes2), SelectExpr: yyDollar[3].selectExpr, Table: yyDollar[5].tableName} + } + case 26: + yyDollar = yyS[yypt-10 : yypt+1] + //line sql.y:360 + { + yyVAL.selStmt = &Select{Comments: Comments(yyDollar[2].bytes2), Cache: yyDollar[3].str, Distinct: yyDollar[4].str, Hints: yyDollar[5].str, SelectExprs: yyDollar[6].selectExprs, From: yyDollar[7].tableExprs, Where: NewWhere(WhereStr, yyDollar[8].expr), GroupBy: GroupBy(yyDollar[9].exprs), Having: NewWhere(HavingStr, yyDollar[10].expr)} + } + case 27: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:366 + { + yyVAL.selStmt = yyDollar[1].selStmt + } + case 28: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:370 + { + yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} + } + case 29: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:376 + { + yyVAL.selStmt = yyDollar[1].selStmt + } + case 30: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:380 + { + yyVAL.selStmt = &ParenSelect{Select: yyDollar[2].selStmt} + } + case 31: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:387 + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := yyDollar[6].ins + ins.Action = yyDollar[1].str + ins.Comments = yyDollar[2].bytes2 + ins.Ignore = yyDollar[3].str + ins.Table = yyDollar[4].tableName + ins.Partitions = yyDollar[5].partitions + ins.OnDup = OnDup(yyDollar[7].updateExprs) + yyVAL.statement = ins + } + case 32: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:399 + { + cols := make(Columns, 0, len(yyDollar[7].updateExprs)) + vals := make(ValTuple, 0, len(yyDollar[8].updateExprs)) + for _, updateList := range yyDollar[7].updateExprs { + cols = append(cols, updateList.Name.Name) + vals = append(vals, updateList.Expr) + } + yyVAL.statement = &Insert{Action: yyDollar[1].str, Comments: Comments(yyDollar[2].bytes2), Ignore: yyDollar[3].str, Table: yyDollar[4].tableName, Partitions: yyDollar[5].partitions, Columns: cols, Rows: Values{vals}, OnDup: OnDup(yyDollar[8].updateExprs)} + } + case 33: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:411 + { + yyVAL.str = InsertStr + } + case 34: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:415 + { + yyVAL.str = ReplaceStr + } + case 35: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:421 + { + yyVAL.statement = &Update{Comments: Comments(yyDollar[2].bytes2), TableExprs: yyDollar[3].tableExprs, Exprs: yyDollar[5].updateExprs, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit} + } + case 36: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:427 + { + yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), TableExprs: TableExprs{&AliasedTableExpr{Expr: yyDollar[4].tableName}}, Partitions: yyDollar[5].partitions, Where: NewWhere(WhereStr, yyDollar[6].expr), OrderBy: yyDollar[7].orderBy, Limit: yyDollar[8].limit} + } + case 37: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:431 + { + yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[4].tableNames, TableExprs: yyDollar[6].tableExprs, Where: NewWhere(WhereStr, yyDollar[7].expr)} + } + case 38: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:435 + { + yyVAL.statement = &Delete{Comments: Comments(yyDollar[2].bytes2), Targets: yyDollar[3].tableNames, TableExprs: yyDollar[5].tableExprs, Where: NewWhere(WhereStr, yyDollar[6].expr)} + } + case 39: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:440 + { + } + case 40: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:441 + { + } + case 41: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:445 + { + yyVAL.tableNames = TableNames{yyDollar[1].tableName} + } + case 42: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:449 + { + yyVAL.tableNames = append(yyVAL.tableNames, yyDollar[3].tableName) + } + case 43: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:454 + { + yyVAL.partitions = nil + } + case 44: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:458 + { + yyVAL.partitions = yyDollar[3].partitions + } + case 45: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:464 + { + yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[3].setExprs} + } + case 46: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:468 + { + yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].str, Exprs: yyDollar[4].setExprs} + } + case 47: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:472 + { + yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Scope: yyDollar[3].str, Exprs: yyDollar[5].setExprs} + } + case 48: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:476 + { + yyVAL.statement = &Set{Comments: Comments(yyDollar[2].bytes2), Exprs: yyDollar[4].setExprs} + } + case 49: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:482 + { + yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} + } + case 50: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:486 + { + yyVAL.setExprs = append(yyVAL.setExprs, yyDollar[3].setExpr) + } + case 51: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:492 + { + yyVAL.setExpr = yyDollar[3].setExpr + } + case 52: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:496 + { + yyVAL.setExpr = &SetExpr{Name: NewColIdent("tx_read_only"), Expr: NewIntVal([]byte("0"))} + } + case 53: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:500 + { + yyVAL.setExpr = &SetExpr{Name: NewColIdent("tx_read_only"), Expr: NewIntVal([]byte("1"))} + } + case 54: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:506 + { + yyVAL.setExpr = &SetExpr{Name: NewColIdent("tx_isolation"), Expr: NewStrVal([]byte("repeatable read"))} + } + case 55: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:510 + { + yyVAL.setExpr = &SetExpr{Name: NewColIdent("tx_isolation"), Expr: NewStrVal([]byte("read committed"))} + } + case 56: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:514 + { + yyVAL.setExpr = &SetExpr{Name: NewColIdent("tx_isolation"), Expr: NewStrVal([]byte("read uncommitted"))} + } + case 57: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:518 + { + yyVAL.setExpr = &SetExpr{Name: NewColIdent("tx_isolation"), Expr: NewStrVal([]byte("serializable"))} + } + case 58: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:524 + { + yyVAL.str = SessionStr + } + case 59: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:528 + { + yyVAL.str = GlobalStr + } + case 60: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:534 + { + yyDollar[1].ddl.TableSpec = yyDollar[2].TableSpec + yyVAL.statement = yyDollar[1].ddl + } + case 61: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:539 + { + // Change this to an alter statement + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[7].tableName, NewName: yyDollar[7].tableName} + } + case 62: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:544 + { + yyVAL.statement = &DDL{Action: CreateStr, NewName: yyDollar[3].tableName.ToViewName()} + } + case 63: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:548 + { + yyVAL.statement = &DDL{Action: CreateStr, NewName: yyDollar[5].tableName.ToViewName()} + } + case 64: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:552 + { + yyVAL.statement = &DDL{Action: CreateVindexStr, VindexSpec: &VindexSpec{ + Name: yyDollar[3].colIdent, + Type: yyDollar[4].colIdent, + Params: yyDollar[5].vindexParams, + }} + } + case 65: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:560 + { + yyVAL.statement = &DBDDL{Action: CreateStr, DBName: string(yyDollar[4].bytes)} + } + case 66: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:564 + { + yyVAL.statement = &DBDDL{Action: CreateStr, DBName: string(yyDollar[4].bytes)} + } + case 67: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:569 + { + yyVAL.colIdent = NewColIdent("") + } + case 68: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:573 + { + yyVAL.colIdent = yyDollar[2].colIdent + } + case 69: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:579 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 70: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:584 + { + var v []VindexParam + yyVAL.vindexParams = v + } + case 71: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:589 + { + yyVAL.vindexParams = yyDollar[2].vindexParams + } + case 72: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:595 + { + yyVAL.vindexParams = make([]VindexParam, 0, 4) + yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[1].vindexParam) + } + case 73: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:600 + { + yyVAL.vindexParams = append(yyVAL.vindexParams, yyDollar[3].vindexParam) + } + case 74: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:606 + { + yyVAL.vindexParam = VindexParam{Key: yyDollar[1].colIdent, Val: yyDollar[3].str} + } + case 75: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:612 + { + yyVAL.ddl = &DDL{Action: CreateStr, NewName: yyDollar[4].tableName} + setDDL(yylex, yyVAL.ddl) + } + case 76: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:619 + { + yyVAL.TableSpec = yyDollar[2].TableSpec + yyVAL.TableSpec.Options = yyDollar[4].str + } + case 77: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:626 + { + yyVAL.TableSpec = &TableSpec{} + yyVAL.TableSpec.AddColumn(yyDollar[1].columnDefinition) + } + case 78: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:631 + { + yyVAL.TableSpec.AddColumn(yyDollar[3].columnDefinition) + } + case 79: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:635 + { + yyVAL.TableSpec.AddIndex(yyDollar[3].indexDefinition) + } + case 80: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:641 + { + yyDollar[2].columnType.NotNull = yyDollar[3].boolVal + yyDollar[2].columnType.Default = yyDollar[4].optVal + yyDollar[2].columnType.OnUpdate = yyDollar[5].optVal + yyDollar[2].columnType.Autoincrement = yyDollar[6].boolVal + yyDollar[2].columnType.KeyOpt = yyDollar[7].colKeyOpt + yyDollar[2].columnType.Comment = yyDollar[8].optVal + yyVAL.columnDefinition = &ColumnDefinition{Name: NewColIdent(string(yyDollar[1].bytes)), Type: yyDollar[2].columnType} + } + case 81: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:652 + { + yyVAL.columnType = yyDollar[1].columnType + yyVAL.columnType.Unsigned = yyDollar[2].boolVal + yyVAL.columnType.Zerofill = yyDollar[3].boolVal + } + case 85: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:663 + { + yyVAL.columnType = yyDollar[1].columnType + yyVAL.columnType.Length = yyDollar[2].optVal + } + case 86: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:668 + { + yyVAL.columnType = yyDollar[1].columnType + } + case 87: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:674 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 88: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:678 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 89: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:682 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 90: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:686 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 91: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:690 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 92: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:694 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 93: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:698 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 94: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:704 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 95: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:710 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 96: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:716 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 97: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:722 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 98: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:728 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + yyVAL.columnType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.columnType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 99: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:736 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 100: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:740 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 101: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:744 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 102: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:748 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 103: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:752 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 104: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:758 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + } + case 105: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:762 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Collate: yyDollar[4].str} + } + case 106: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:766 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 107: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:770 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 108: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:774 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 109: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:778 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 110: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:782 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 111: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:786 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), Charset: yyDollar[2].str, Collate: yyDollar[3].str} + } + case 112: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:790 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 113: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:794 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 114: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:798 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 115: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:802 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 116: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:806 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 117: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:810 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} + } + case 118: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:815 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes), EnumValues: yyDollar[3].strs, Charset: yyDollar[5].str, Collate: yyDollar[6].str} + } + case 119: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:821 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 120: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:825 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 121: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:829 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 122: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:833 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 123: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:837 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 124: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:841 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 125: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:845 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 126: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:849 + { + yyVAL.columnType = ColumnType{Type: string(yyDollar[1].bytes)} + } + case 127: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:855 + { + yyVAL.strs = make([]string, 0, 4) + yyVAL.strs = append(yyVAL.strs, "'"+string(yyDollar[1].bytes)+"'") + } + case 128: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:860 + { + yyVAL.strs = append(yyDollar[1].strs, "'"+string(yyDollar[3].bytes)+"'") + } + case 129: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:865 + { + yyVAL.optVal = nil + } + case 130: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:869 + { + yyVAL.optVal = NewIntVal(yyDollar[2].bytes) + } + case 131: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:874 + { + yyVAL.LengthScaleOption = LengthScaleOption{} + } + case 132: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:878 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + Scale: NewIntVal(yyDollar[4].bytes), + } + } + case 133: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:886 + { + yyVAL.LengthScaleOption = LengthScaleOption{} + } + case 134: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:890 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + } + } + case 135: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:896 + { + yyVAL.LengthScaleOption = LengthScaleOption{ + Length: NewIntVal(yyDollar[2].bytes), + Scale: NewIntVal(yyDollar[4].bytes), + } + } + case 136: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:904 + { + yyVAL.boolVal = BoolVal(false) + } + case 137: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:908 + { + yyVAL.boolVal = BoolVal(true) + } + case 138: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:913 + { + yyVAL.boolVal = BoolVal(false) + } + case 139: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:917 + { + yyVAL.boolVal = BoolVal(true) + } + case 140: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:923 + { + yyVAL.boolVal = BoolVal(false) + } + case 141: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:927 + { + yyVAL.boolVal = BoolVal(false) + } + case 142: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:931 + { + yyVAL.boolVal = BoolVal(true) + } + case 143: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:936 + { + yyVAL.optVal = nil + } + case 144: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:940 + { + yyVAL.optVal = NewStrVal(yyDollar[2].bytes) + } + case 145: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:944 + { + yyVAL.optVal = NewIntVal(yyDollar[2].bytes) + } + case 146: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:948 + { + yyVAL.optVal = NewFloatVal(yyDollar[2].bytes) + } + case 147: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:952 + { + yyVAL.optVal = NewValArg(yyDollar[2].bytes) + } + case 148: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:956 + { + yyVAL.optVal = NewValArg(yyDollar[2].bytes) + } + case 149: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:960 + { + yyVAL.optVal = NewBitVal(yyDollar[2].bytes) + } + case 150: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:965 + { + yyVAL.optVal = nil + } + case 151: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:969 + { + yyVAL.optVal = NewValArg(yyDollar[3].bytes) + } + case 152: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:974 + { + yyVAL.boolVal = BoolVal(false) + } + case 153: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:978 + { + yyVAL.boolVal = BoolVal(true) + } + case 154: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:983 + { + yyVAL.str = "" + } + case 155: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:987 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 156: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:991 + { + yyVAL.str = string(yyDollar[3].bytes) + } + case 157: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:996 + { + yyVAL.str = "" + } + case 158: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1000 + { + yyVAL.str = string(yyDollar[2].bytes) + } + case 159: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1005 + { + yyVAL.colKeyOpt = colKeyNone + } + case 160: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1009 + { + yyVAL.colKeyOpt = colKeyPrimary + } + case 161: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1013 + { + yyVAL.colKeyOpt = colKey + } + case 162: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1017 + { + yyVAL.colKeyOpt = colKeyUniqueKey + } + case 163: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1021 + { + yyVAL.colKeyOpt = colKeyUnique + } + case 164: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1026 + { + yyVAL.optVal = nil + } + case 165: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1030 + { + yyVAL.optVal = NewStrVal(yyDollar[2].bytes) + } + case 166: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1036 + { + yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns, Options: yyDollar[5].indexOptions} + } + case 167: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1040 + { + yyVAL.indexDefinition = &IndexDefinition{Info: yyDollar[1].indexInfo, Columns: yyDollar[3].indexColumns} + } + case 168: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1046 + { + yyVAL.indexOptions = []*IndexOption{yyDollar[1].indexOption} + } + case 169: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1050 + { + yyVAL.indexOptions = append(yyVAL.indexOptions, yyDollar[2].indexOption) + } + case 170: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1056 + { + yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Using: string(yyDollar[2].bytes)} + } + case 171: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1060 + { + // should not be string + yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewIntVal(yyDollar[3].bytes)} + } + case 172: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1065 + { + yyVAL.indexOption = &IndexOption{Name: string(yyDollar[1].bytes), Value: NewStrVal(yyDollar[2].bytes)} + } + case 173: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1071 + { + yyVAL.str = "" + } + case 174: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1075 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 175: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1081 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].bytes), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} + } + case 176: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1085 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(string(yyDollar[3].bytes)), Spatial: true, Unique: false} + } + case 177: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1089 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes) + " " + string(yyDollar[2].str), Name: NewColIdent(string(yyDollar[3].bytes)), Unique: true} + } + case 178: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1093 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].bytes), Name: NewColIdent(string(yyDollar[2].bytes)), Unique: true} + } + case 179: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1097 + { + yyVAL.indexInfo = &IndexInfo{Type: string(yyDollar[1].str), Name: NewColIdent(string(yyDollar[2].bytes)), Unique: false} + } + case 180: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1103 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 181: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1107 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 182: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1113 + { + yyVAL.indexColumns = []*IndexColumn{yyDollar[1].indexColumn} + } + case 183: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1117 + { + yyVAL.indexColumns = append(yyVAL.indexColumns, yyDollar[3].indexColumn) + } + case 184: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1123 + { + yyVAL.indexColumn = &IndexColumn{Column: yyDollar[1].colIdent, Length: yyDollar[2].optVal} + } + case 185: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1128 + { + yyVAL.str = "" + } + case 186: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1132 + { + yyVAL.str = " " + string(yyDollar[1].str) + } + case 187: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1136 + { + yyVAL.str = string(yyDollar[1].str) + ", " + string(yyDollar[3].str) + } + case 188: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1144 + { + yyVAL.str = yyDollar[1].str + } + case 189: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1148 + { + yyVAL.str = yyDollar[1].str + " " + yyDollar[2].str + } + case 190: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1152 + { + yyVAL.str = yyDollar[1].str + "=" + yyDollar[3].str + } + case 191: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1158 + { + yyVAL.str = yyDollar[1].colIdent.String() + } + case 192: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1162 + { + yyVAL.str = "'" + string(yyDollar[1].bytes) + "'" + } + case 193: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1166 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 194: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1172 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} + } + case 195: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1176 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} + } + case 196: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1180 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} + } + case 197: + yyDollar = yyS[yypt-12 : yypt+1] + //line sql.y:1184 + { + yyVAL.statement = &DDL{ + Action: AddColVindexStr, + Table: yyDollar[4].tableName, + VindexSpec: &VindexSpec{ + Name: yyDollar[7].colIdent, + Type: yyDollar[11].colIdent, + Params: yyDollar[12].vindexParams, + }, + VindexCols: yyDollar[9].columns, + } + } + case 198: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1197 + { + yyVAL.statement = &DDL{ + Action: DropColVindexStr, + Table: yyDollar[4].tableName, + VindexSpec: &VindexSpec{ + Name: yyDollar[7].colIdent, + }, + } + } + case 199: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1207 + { + // Change this to a rename statement + yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[4].tableName, NewName: yyDollar[7].tableName} + } + case 200: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1212 + { + // Rename an index can just be an alter + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, NewName: yyDollar[4].tableName} + } + case 201: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1217 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName.ToViewName(), NewName: yyDollar[3].tableName.ToViewName()} + } + case 202: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1221 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[4].tableName, PartitionSpec: yyDollar[5].partSpec} + } + case 214: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1240 + { + yyVAL.partSpec = &PartitionSpec{Action: ReorganizeStr, Name: yyDollar[3].colIdent, Definitions: yyDollar[6].partDefs} + } + case 215: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1246 + { + yyVAL.partDefs = []*PartitionDefinition{yyDollar[1].partDef} + } + case 216: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1250 + { + yyVAL.partDefs = append(yyDollar[1].partDefs, yyDollar[3].partDef) + } + case 217: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:1256 + { + yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Limit: yyDollar[7].expr} + } + case 218: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:1260 + { + yyVAL.partDef = &PartitionDefinition{Name: yyDollar[2].colIdent, Maxvalue: true} + } + case 219: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1266 + { + yyVAL.statement = &DDL{Action: RenameStr, Table: yyDollar[3].tableName, NewName: yyDollar[5].tableName} + } + case 220: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1272 + { + var exists bool + if yyDollar[3].byt != 0 { + exists = true + } + yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].tableName, IfExists: exists} + } + case 221: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1280 + { + // Change this to an alter statement + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[5].tableName, NewName: yyDollar[5].tableName} + } + case 222: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1285 + { + var exists bool + if yyDollar[3].byt != 0 { + exists = true + } + yyVAL.statement = &DDL{Action: DropStr, Table: yyDollar[4].tableName.ToViewName(), IfExists: exists} + } + case 223: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1293 + { + yyVAL.statement = &DBDDL{Action: DropStr, DBName: string(yyDollar[4].bytes)} + } + case 224: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1297 + { + yyVAL.statement = &DBDDL{Action: DropStr, DBName: string(yyDollar[4].bytes)} + } + case 225: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1303 + { + yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[3].tableName} + } + case 226: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1307 + { + yyVAL.statement = &DDL{Action: TruncateStr, Table: yyDollar[2].tableName} + } + case 227: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1312 + { + yyVAL.statement = &DDL{Action: AlterStr, Table: yyDollar[3].tableName, NewName: yyDollar[3].tableName} + } + case 228: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1318 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 229: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1322 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 230: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1326 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 231: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1331 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 232: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1335 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 233: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1339 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 234: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1343 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 235: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1347 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes) + " " + string(yyDollar[3].bytes)} + } + case 236: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1351 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 237: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1355 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 238: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1359 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 239: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1363 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 240: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1367 + { + yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} + } + case 241: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1371 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 242: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1375 + { + // this is ugly, but I couldn't find a better way for now + if yyDollar[4].str == "processlist" { + yyVAL.statement = &Show{Type: yyDollar[4].str} + } else { + showTablesOpt := &ShowTablesOpt{Extended: yyDollar[2].str, Full: yyDollar[3].str, DbName: yyDollar[5].str, Filter: yyDollar[6].showFilter} + yyVAL.statement = &Show{Type: yyDollar[4].str, ShowTablesOpt: showTablesOpt} + } + } + case 243: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1385 + { + yyVAL.statement = &Show{Scope: yyDollar[2].str, Type: string(yyDollar[3].bytes)} + } + case 244: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1389 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 245: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1393 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes), OnTable: yyDollar[4].tableName} + } + case 246: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1397 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 247: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1401 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 248: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1405 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 249: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1409 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 250: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1419 + { + yyVAL.statement = &Show{Type: string(yyDollar[2].bytes)} + } + case 251: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1425 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 252: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1429 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 253: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1435 + { + yyVAL.str = "" + } + case 254: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1439 + { + yyVAL.str = "extended " + } + case 255: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1445 + { + yyVAL.str = "" + } + case 256: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1449 + { + yyVAL.str = "full " + } + case 257: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1455 + { + yyVAL.str = "" + } + case 258: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1459 + { + yyVAL.str = yyDollar[2].tableIdent.v + } + case 259: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1463 + { + yyVAL.str = yyDollar[2].tableIdent.v + } + case 260: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1469 + { + yyVAL.showFilter = nil + } + case 261: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1473 + { + yyVAL.showFilter = &ShowFilter{Like: string(yyDollar[2].bytes)} + } + case 262: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1477 + { + yyVAL.showFilter = &ShowFilter{Filter: yyDollar[2].expr} + } + case 263: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1483 + { + yyVAL.str = "" + } + case 264: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1487 + { + yyVAL.str = SessionStr + } + case 265: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1491 + { + yyVAL.str = GlobalStr + } + case 266: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1497 + { + yyVAL.statement = &Use{DBName: yyDollar[2].tableIdent} + } + case 267: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1501 + { + yyVAL.statement = &Use{DBName: TableIdent{v: ""}} + } + case 268: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1507 + { + yyVAL.statement = &Begin{} + } + case 269: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1511 + { + yyVAL.statement = &Begin{} + } + case 270: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1517 + { + yyVAL.statement = &Commit{} + } + case 271: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1523 + { + yyVAL.statement = &Rollback{} + } + case 272: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1529 + { + yyVAL.statement = &OtherRead{} + } + case 273: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1533 + { + yyVAL.statement = &OtherRead{} + } + case 274: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1537 + { + yyVAL.statement = &OtherRead{} + } + case 275: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1541 + { + yyVAL.statement = &OtherAdmin{} + } + case 276: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1545 + { + yyVAL.statement = &OtherAdmin{} + } + case 277: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1550 + { + setAllowComments(yylex, true) + } + case 278: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1554 + { + yyVAL.bytes2 = yyDollar[2].bytes2 + setAllowComments(yylex, false) + } + case 279: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1560 + { + yyVAL.bytes2 = nil + } + case 280: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1564 + { + yyVAL.bytes2 = append(yyDollar[1].bytes2, yyDollar[2].bytes) + } + case 281: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1570 + { + yyVAL.str = UnionStr + } + case 282: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1574 + { + yyVAL.str = UnionAllStr + } + case 283: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1578 + { + yyVAL.str = UnionDistinctStr + } + case 284: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1583 + { + yyVAL.str = "" + } + case 285: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1587 + { + yyVAL.str = SQLNoCacheStr + } + case 286: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1591 + { + yyVAL.str = SQLCacheStr + } + case 287: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1596 + { + yyVAL.str = "" + } + case 288: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1600 + { + yyVAL.str = DistinctStr + } + case 289: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1605 + { + yyVAL.str = "" + } + case 290: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1609 + { + yyVAL.str = StraightJoinHint + } + case 291: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1614 + { + yyVAL.selectExprs = nil + } + case 292: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1618 + { + yyVAL.selectExprs = yyDollar[1].selectExprs + } + case 293: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1624 + { + yyVAL.selectExprs = SelectExprs{yyDollar[1].selectExpr} + } + case 294: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1628 + { + yyVAL.selectExprs = append(yyVAL.selectExprs, yyDollar[3].selectExpr) + } + case 295: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1634 + { + yyVAL.selectExpr = &StarExpr{} + } + case 296: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1638 + { + yyVAL.selectExpr = &AliasedExpr{Expr: yyDollar[1].expr, As: yyDollar[2].colIdent} + } + case 297: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1642 + { + yyVAL.selectExpr = &StarExpr{TableName: TableName{Name: yyDollar[1].tableIdent}} + } + case 298: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1646 + { + yyVAL.selectExpr = &StarExpr{TableName: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}} + } + case 299: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1651 + { + yyVAL.colIdent = ColIdent{} + } + case 300: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1655 + { + yyVAL.colIdent = yyDollar[1].colIdent + } + case 301: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1659 + { + yyVAL.colIdent = yyDollar[2].colIdent + } + case 303: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1666 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 304: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1671 + { + yyVAL.tableExprs = TableExprs{&AliasedTableExpr{Expr: TableName{Name: NewTableIdent("dual")}}} + } + case 305: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1675 + { + yyVAL.tableExprs = yyDollar[2].tableExprs + } + case 306: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1681 + { + yyVAL.tableExprs = TableExprs{yyDollar[1].tableExpr} + } + case 307: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1685 + { + yyVAL.tableExprs = append(yyVAL.tableExprs, yyDollar[3].tableExpr) + } + case 310: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1695 + { + yyVAL.tableExpr = yyDollar[1].aliasedTableName + } + case 311: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1699 + { + yyVAL.tableExpr = &AliasedTableExpr{Expr: yyDollar[1].subquery, As: yyDollar[3].tableIdent} + } + case 312: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1703 + { + yyVAL.tableExpr = &ParenTableExpr{Exprs: yyDollar[2].tableExprs} + } + case 313: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1709 + { + yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, As: yyDollar[2].tableIdent, Hints: yyDollar[3].indexHints} + } + case 314: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:1713 + { + yyVAL.aliasedTableName = &AliasedTableExpr{Expr: yyDollar[1].tableName, Partitions: yyDollar[4].partitions, As: yyDollar[6].tableIdent, Hints: yyDollar[7].indexHints} + } + case 315: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1719 + { + yyVAL.columns = Columns{yyDollar[1].colIdent} + } + case 316: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1723 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + } + case 317: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1729 + { + yyVAL.partitions = Partitions{yyDollar[1].colIdent} + } + case 318: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1733 + { + yyVAL.partitions = append(yyVAL.partitions, yyDollar[3].colIdent) + } + case 319: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1746 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + } + case 320: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1750 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + } + case 321: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1754 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr, Condition: yyDollar[4].joinCondition} + } + case 322: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1758 + { + yyVAL.tableExpr = &JoinTableExpr{LeftExpr: yyDollar[1].tableExpr, Join: yyDollar[2].str, RightExpr: yyDollar[3].tableExpr} + } + case 323: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1764 + { + yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} + } + case 324: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1766 + { + yyVAL.joinCondition = JoinCondition{Using: yyDollar[3].columns} + } + case 325: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1770 + { + yyVAL.joinCondition = JoinCondition{} + } + case 326: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1772 + { + yyVAL.joinCondition = yyDollar[1].joinCondition + } + case 327: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1776 + { + yyVAL.joinCondition = JoinCondition{} + } + case 328: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1778 + { + yyVAL.joinCondition = JoinCondition{On: yyDollar[2].expr} + } + case 329: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1781 + { + yyVAL.empty = struct{}{} + } + case 330: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1783 + { + yyVAL.empty = struct{}{} + } + case 331: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1786 + { + yyVAL.tableIdent = NewTableIdent("") + } + case 332: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1790 + { + yyVAL.tableIdent = yyDollar[1].tableIdent + } + case 333: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1794 + { + yyVAL.tableIdent = yyDollar[2].tableIdent + } + case 335: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1801 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 336: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1807 + { + yyVAL.str = JoinStr + } + case 337: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1811 + { + yyVAL.str = JoinStr + } + case 338: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1815 + { + yyVAL.str = JoinStr + } + case 339: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1821 + { + yyVAL.str = StraightJoinStr + } + case 340: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1827 + { + yyVAL.str = LeftJoinStr + } + case 341: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1831 + { + yyVAL.str = LeftJoinStr + } + case 342: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1835 + { + yyVAL.str = RightJoinStr + } + case 343: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1839 + { + yyVAL.str = RightJoinStr + } + case 344: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1845 + { + yyVAL.str = NaturalJoinStr + } + case 345: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1849 + { + if yyDollar[2].str == LeftJoinStr { + yyVAL.str = NaturalLeftJoinStr + } else { + yyVAL.str = NaturalRightJoinStr + } + } + case 346: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1859 + { + yyVAL.tableName = yyDollar[2].tableName + } + case 347: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1863 + { + yyVAL.tableName = yyDollar[1].tableName + } + case 348: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1869 + { + yyVAL.tableName = TableName{Name: yyDollar[1].tableIdent} + } + case 349: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1873 + { + yyVAL.tableName = TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent} + } + case 350: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1878 + { + yyVAL.indexHints = nil + } + case 351: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1882 + { + yyVAL.indexHints = &IndexHints{Type: UseStr, Indexes: yyDollar[4].columns} + } + case 352: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1886 + { + yyVAL.indexHints = &IndexHints{Type: IgnoreStr, Indexes: yyDollar[4].columns} + } + case 353: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1890 + { + yyVAL.indexHints = &IndexHints{Type: ForceStr, Indexes: yyDollar[4].columns} + } + case 354: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1895 + { + yyVAL.expr = nil + } + case 355: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1899 + { + yyVAL.expr = yyDollar[2].expr + } + case 356: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1905 + { + yyVAL.expr = yyDollar[1].expr + } + case 357: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1909 + { + yyVAL.expr = &AndExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 358: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1913 + { + yyVAL.expr = &OrExpr{Left: yyDollar[1].expr, Right: yyDollar[3].expr} + } + case 359: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1917 + { + yyVAL.expr = &NotExpr{Expr: yyDollar[2].expr} + } + case 360: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1921 + { + yyVAL.expr = &IsExpr{Operator: yyDollar[3].str, Expr: yyDollar[1].expr} + } + case 361: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1925 + { + yyVAL.expr = yyDollar[1].expr + } + case 362: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1929 + { + yyVAL.expr = &Default{ColName: yyDollar[2].str} + } + case 363: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:1935 + { + yyVAL.str = "" + } + case 364: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1939 + { + yyVAL.str = string(yyDollar[2].bytes) + } + case 365: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1945 + { + yyVAL.boolVal = BoolVal(true) + } + case 366: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1949 + { + yyVAL.boolVal = BoolVal(false) + } + case 367: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1955 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: yyDollar[2].str, Right: yyDollar[3].expr} + } + case 368: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1959 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: InStr, Right: yyDollar[3].colTuple} + } + case 369: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1963 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotInStr, Right: yyDollar[4].colTuple} + } + case 370: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1967 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: LikeStr, Right: yyDollar[3].expr, Escape: yyDollar[4].expr} + } + case 371: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1971 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotLikeStr, Right: yyDollar[4].expr, Escape: yyDollar[5].expr} + } + case 372: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:1975 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: RegexpStr, Right: yyDollar[3].expr} + } + case 373: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:1979 + { + yyVAL.expr = &ComparisonExpr{Left: yyDollar[1].expr, Operator: NotRegexpStr, Right: yyDollar[4].expr} + } + case 374: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:1983 + { + yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: BetweenStr, From: yyDollar[3].expr, To: yyDollar[5].expr} + } + case 375: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:1987 + { + yyVAL.expr = &RangeCond{Left: yyDollar[1].expr, Operator: NotBetweenStr, From: yyDollar[4].expr, To: yyDollar[6].expr} + } + case 376: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:1991 + { + yyVAL.expr = &ExistsExpr{Subquery: yyDollar[2].subquery} + } + case 377: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:1997 + { + yyVAL.str = IsNullStr + } + case 378: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2001 + { + yyVAL.str = IsNotNullStr + } + case 379: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2005 + { + yyVAL.str = IsTrueStr + } + case 380: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2009 + { + yyVAL.str = IsNotTrueStr + } + case 381: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2013 + { + yyVAL.str = IsFalseStr + } + case 382: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2017 + { + yyVAL.str = IsNotFalseStr + } + case 383: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2023 + { + yyVAL.str = EqualStr + } + case 384: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2027 + { + yyVAL.str = LessThanStr + } + case 385: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2031 + { + yyVAL.str = GreaterThanStr + } + case 386: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2035 + { + yyVAL.str = LessEqualStr + } + case 387: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2039 + { + yyVAL.str = GreaterEqualStr + } + case 388: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2043 + { + yyVAL.str = NotEqualStr + } + case 389: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2047 + { + yyVAL.str = NullSafeEqualStr + } + case 390: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2052 + { + yyVAL.expr = nil + } + case 391: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2056 + { + yyVAL.expr = yyDollar[2].expr + } + case 392: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2062 + { + yyVAL.colTuple = yyDollar[1].valTuple + } + case 393: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2066 + { + yyVAL.colTuple = yyDollar[1].subquery + } + case 394: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2070 + { + yyVAL.colTuple = ListArg(yyDollar[1].bytes) + } + case 395: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2076 + { + yyVAL.subquery = &Subquery{yyDollar[2].selStmt} + } + case 396: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2082 + { + yyVAL.exprs = Exprs{yyDollar[1].expr} + } + case 397: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2086 + { + yyVAL.exprs = append(yyDollar[1].exprs, yyDollar[3].expr) + } + case 398: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2092 + { + yyVAL.expr = yyDollar[1].expr + } + case 399: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2096 + { + yyVAL.expr = yyDollar[1].boolVal + } + case 400: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2100 + { + yyVAL.expr = yyDollar[1].colName + } + case 401: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2104 + { + yyVAL.expr = yyDollar[1].expr + } + case 402: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2108 + { + yyVAL.expr = yyDollar[1].subquery + } + case 403: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2112 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitAndStr, Right: yyDollar[3].expr} + } + case 404: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2116 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitOrStr, Right: yyDollar[3].expr} + } + case 405: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2120 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: BitXorStr, Right: yyDollar[3].expr} + } + case 406: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2124 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: PlusStr, Right: yyDollar[3].expr} + } + case 407: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2128 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MinusStr, Right: yyDollar[3].expr} + } + case 408: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2132 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: MultStr, Right: yyDollar[3].expr} + } + case 409: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2136 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: DivStr, Right: yyDollar[3].expr} + } + case 410: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2140 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: IntDivStr, Right: yyDollar[3].expr} + } + case 411: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2144 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} + } + case 412: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2148 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ModStr, Right: yyDollar[3].expr} + } + case 413: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2152 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftLeftStr, Right: yyDollar[3].expr} + } + case 414: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2156 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].expr, Operator: ShiftRightStr, Right: yyDollar[3].expr} + } + case 415: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2160 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONExtractOp, Right: yyDollar[3].expr} + } + case 416: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2164 + { + yyVAL.expr = &BinaryExpr{Left: yyDollar[1].colName, Operator: JSONUnquoteExtractOp, Right: yyDollar[3].expr} + } + case 417: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2168 + { + yyVAL.expr = &CollateExpr{Expr: yyDollar[1].expr, Charset: yyDollar[3].str} + } + case 418: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2172 + { + yyVAL.expr = &UnaryExpr{Operator: BinaryStr, Expr: yyDollar[2].expr} + } + case 419: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2176 + { + yyVAL.expr = &UnaryExpr{Operator: UBinaryStr, Expr: yyDollar[2].expr} + } + case 420: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2180 + { + if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { + yyVAL.expr = num + } else { + yyVAL.expr = &UnaryExpr{Operator: UPlusStr, Expr: yyDollar[2].expr} + } + } + case 421: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2188 + { + if num, ok := yyDollar[2].expr.(*SQLVal); ok && num.Type == IntVal { + // Handle double negative + if num.Val[0] == '-' { + num.Val = num.Val[1:] + yyVAL.expr = num + } else { + yyVAL.expr = NewIntVal(append([]byte("-"), num.Val...)) + } + } else { + yyVAL.expr = &UnaryExpr{Operator: UMinusStr, Expr: yyDollar[2].expr} + } + } + case 422: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2202 + { + yyVAL.expr = &UnaryExpr{Operator: TildaStr, Expr: yyDollar[2].expr} + } + case 423: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2206 + { + yyVAL.expr = &UnaryExpr{Operator: BangStr, Expr: yyDollar[2].expr} + } + case 424: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2210 + { + // This rule prevents the usage of INTERVAL + // as a function. If support is needed for that, + // we'll need to revisit this. The solution + // will be non-trivial because of grammar conflicts. + yyVAL.expr = &IntervalExpr{Expr: yyDollar[2].expr, Unit: yyDollar[3].colIdent.String()} + } + case 429: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2228 + { + yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Exprs: yyDollar[3].selectExprs} + } + case 430: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2232 + { + yyVAL.expr = &FuncExpr{Name: yyDollar[1].colIdent, Distinct: true, Exprs: yyDollar[4].selectExprs} + } + case 431: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:2236 + { + yyVAL.expr = &FuncExpr{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].colIdent, Exprs: yyDollar[5].selectExprs} + } + case 432: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2246 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("left"), Exprs: yyDollar[3].selectExprs} + } + case 433: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2250 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("right"), Exprs: yyDollar[3].selectExprs} + } + case 434: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:2254 + { + yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} + } + case 435: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:2258 + { + yyVAL.expr = &ConvertExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].convertType} + } + case 436: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:2262 + { + yyVAL.expr = &ConvertUsingExpr{Expr: yyDollar[3].expr, Type: yyDollar[5].str} + } + case 437: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:2266 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: nil} + } + case 438: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:2270 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 439: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:2274 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 440: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:2278 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: nil} + } + case 441: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:2282 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 442: + yyDollar = yyS[yypt-8 : yypt+1] + //line sql.y:2286 + { + yyVAL.expr = &SubstrExpr{Name: yyDollar[3].colName, From: yyDollar[5].expr, To: yyDollar[7].expr} + } + case 443: + yyDollar = yyS[yypt-9 : yypt+1] + //line sql.y:2290 + { + yyVAL.expr = &MatchExpr{Columns: yyDollar[3].selectExprs, Expr: yyDollar[7].expr, Option: yyDollar[8].str} + } + case 444: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:2294 + { + yyVAL.expr = &GroupConcatExpr{Distinct: yyDollar[3].str, Exprs: yyDollar[4].selectExprs, OrderBy: yyDollar[5].orderBy, Separator: yyDollar[6].str} + } + case 445: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2298 + { + yyVAL.expr = &CaseExpr{Expr: yyDollar[2].expr, Whens: yyDollar[3].whens, Else: yyDollar[4].expr} + } + case 446: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2302 + { + yyVAL.expr = &ValuesFuncExpr{Name: yyDollar[3].colName} + } + case 447: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2312 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_timestamp")} + } + case 448: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2316 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_timestamp")} + } + case 449: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2320 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_time")} + } + case 450: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2324 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("utc_date")} + } + case 451: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2329 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("localtime")} + } + case 452: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2334 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("localtimestamp")} + } + case 453: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2339 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_date")} + } + case 454: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2344 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("current_time")} + } + case 457: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2358 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("if"), Exprs: yyDollar[3].selectExprs} + } + case 458: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2362 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("database"), Exprs: yyDollar[3].selectExprs} + } + case 459: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2366 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("mod"), Exprs: yyDollar[3].selectExprs} + } + case 460: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2370 + { + yyVAL.expr = &FuncExpr{Name: NewColIdent("replace"), Exprs: yyDollar[3].selectExprs} + } + case 461: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2376 + { + yyVAL.str = "" + } + case 462: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2380 + { + yyVAL.str = BooleanModeStr + } + case 463: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2384 + { + yyVAL.str = NaturalLanguageModeStr + } + case 464: + yyDollar = yyS[yypt-7 : yypt+1] + //line sql.y:2388 + { + yyVAL.str = NaturalLanguageModeWithQueryExpansionStr + } + case 465: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2392 + { + yyVAL.str = QueryExpansionStr + } + case 466: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2398 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 467: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2402 + { + yyVAL.str = string(yyDollar[1].bytes) + } + case 468: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2408 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 469: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2412 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: yyDollar[3].str, Operator: CharacterSetStr} + } + case 470: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2416 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal, Charset: string(yyDollar[3].bytes)} + } + case 471: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2420 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 472: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2424 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 473: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2428 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + yyVAL.convertType.Length = yyDollar[2].LengthScaleOption.Length + yyVAL.convertType.Scale = yyDollar[2].LengthScaleOption.Scale + } + case 474: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2434 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 475: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2438 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 476: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2442 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 477: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2446 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 478: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2450 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes), Length: yyDollar[2].optVal} + } + case 479: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2454 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 480: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2458 + { + yyVAL.convertType = &ConvertType{Type: string(yyDollar[1].bytes)} + } + case 481: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2463 + { + yyVAL.expr = nil + } + case 482: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2467 + { + yyVAL.expr = yyDollar[1].expr + } + case 483: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2472 + { + yyVAL.str = string("") + } + case 484: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2476 + { + yyVAL.str = " separator '" + string(yyDollar[2].bytes) + "'" + } + case 485: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2482 + { + yyVAL.whens = []*When{yyDollar[1].when} + } + case 486: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2486 + { + yyVAL.whens = append(yyDollar[1].whens, yyDollar[2].when) + } + case 487: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2492 + { + yyVAL.when = &When{Cond: yyDollar[2].expr, Val: yyDollar[4].expr} + } + case 488: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2497 + { + yyVAL.expr = nil + } + case 489: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2501 + { + yyVAL.expr = yyDollar[2].expr + } + case 490: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2507 + { + yyVAL.colName = &ColName{Name: yyDollar[1].colIdent} + } + case 491: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2511 + { + yyVAL.colName = &ColName{Qualifier: TableName{Name: yyDollar[1].tableIdent}, Name: yyDollar[3].colIdent} + } + case 492: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2515 + { + yyVAL.colName = &ColName{Qualifier: TableName{Qualifier: yyDollar[1].tableIdent, Name: yyDollar[3].tableIdent}, Name: yyDollar[5].colIdent} + } + case 493: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2521 + { + yyVAL.expr = NewStrVal(yyDollar[1].bytes) + } + case 494: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2525 + { + yyVAL.expr = NewHexVal(yyDollar[1].bytes) + } + case 495: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2529 + { + yyVAL.expr = NewBitVal(yyDollar[1].bytes) + } + case 496: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2533 + { + yyVAL.expr = NewIntVal(yyDollar[1].bytes) + } + case 497: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2537 + { + yyVAL.expr = NewFloatVal(yyDollar[1].bytes) + } + case 498: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2541 + { + yyVAL.expr = NewHexNum(yyDollar[1].bytes) + } + case 499: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2545 + { + yyVAL.expr = NewValArg(yyDollar[1].bytes) + } + case 500: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2549 + { + yyVAL.expr = &NullVal{} + } + case 501: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2555 + { + // TODO(sougou): Deprecate this construct. + if yyDollar[1].colIdent.Lowered() != "value" { + yylex.Error("expecting value after next") + return 1 + } + yyVAL.expr = NewIntVal([]byte("1")) + } + case 502: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2564 + { + yyVAL.expr = NewIntVal(yyDollar[1].bytes) + } + case 503: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2568 + { + yyVAL.expr = NewValArg(yyDollar[1].bytes) + } + case 504: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2573 + { + yyVAL.exprs = nil + } + case 505: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2577 + { + yyVAL.exprs = yyDollar[3].exprs + } + case 506: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2582 + { + yyVAL.expr = nil + } + case 507: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2586 + { + yyVAL.expr = yyDollar[2].expr + } + case 508: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2591 + { + yyVAL.orderBy = nil + } + case 509: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2595 + { + yyVAL.orderBy = yyDollar[3].orderBy + } + case 510: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2601 + { + yyVAL.orderBy = OrderBy{yyDollar[1].order} + } + case 511: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2605 + { + yyVAL.orderBy = append(yyDollar[1].orderBy, yyDollar[3].order) + } + case 512: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2611 + { + yyVAL.order = &Order{Expr: yyDollar[1].expr, Direction: yyDollar[2].str} + } + case 513: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2616 + { + yyVAL.str = AscScr + } + case 514: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2620 + { + yyVAL.str = AscScr + } + case 515: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2624 + { + yyVAL.str = DescScr + } + case 516: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2629 + { + yyVAL.limit = nil + } + case 517: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2633 + { + yyVAL.limit = &Limit{Rowcount: yyDollar[2].expr} + } + case 518: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2637 + { + yyVAL.limit = &Limit{Offset: yyDollar[2].expr, Rowcount: yyDollar[4].expr} + } + case 519: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2641 + { + yyVAL.limit = &Limit{Offset: yyDollar[4].expr, Rowcount: yyDollar[2].expr} + } + case 520: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2646 + { + yyVAL.str = "" + } + case 521: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2650 + { + yyVAL.str = ForUpdateStr + } + case 522: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2654 + { + yyVAL.str = ShareModeStr + } + case 523: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2667 + { + yyVAL.ins = &Insert{Rows: yyDollar[2].values} + } + case 524: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2671 + { + yyVAL.ins = &Insert{Rows: yyDollar[1].selStmt} + } + case 525: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2675 + { + // Drop the redundant parenthesis. + yyVAL.ins = &Insert{Rows: yyDollar[2].selStmt} + } + case 526: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2680 + { + yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].values} + } + case 527: + yyDollar = yyS[yypt-4 : yypt+1] + //line sql.y:2684 + { + yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[4].selStmt} + } + case 528: + yyDollar = yyS[yypt-6 : yypt+1] + //line sql.y:2688 + { + // Drop the redundant parenthesis. + yyVAL.ins = &Insert{Columns: yyDollar[2].columns, Rows: yyDollar[5].selStmt} + } + case 529: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2695 + { + yyVAL.columns = Columns{yyDollar[1].colIdent} + } + case 530: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2699 + { + yyVAL.columns = Columns{yyDollar[3].colIdent} + } + case 531: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2703 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[3].colIdent) + } + case 532: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2707 + { + yyVAL.columns = append(yyVAL.columns, yyDollar[5].colIdent) + } + case 533: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2712 + { + yyVAL.updateExprs = nil + } + case 534: + yyDollar = yyS[yypt-5 : yypt+1] + //line sql.y:2716 + { + yyVAL.updateExprs = yyDollar[5].updateExprs + } + case 535: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2722 + { + yyVAL.values = Values{yyDollar[1].valTuple} + } + case 536: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2726 + { + yyVAL.values = append(yyDollar[1].values, yyDollar[3].valTuple) + } + case 537: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2732 + { + yyVAL.valTuple = yyDollar[1].valTuple + } + case 538: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2736 + { + yyVAL.valTuple = ValTuple{} + } + case 539: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2742 + { + yyVAL.valTuple = ValTuple(yyDollar[2].exprs) + } + case 540: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2748 + { + if len(yyDollar[1].valTuple) == 1 { + yyVAL.expr = &ParenExpr{yyDollar[1].valTuple[0]} + } else { + yyVAL.expr = yyDollar[1].valTuple + } + } + case 541: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2758 + { + yyVAL.updateExprs = UpdateExprs{yyDollar[1].updateExpr} + } + case 542: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2762 + { + yyVAL.updateExprs = append(yyDollar[1].updateExprs, yyDollar[3].updateExpr) + } + case 543: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2768 + { + yyVAL.updateExpr = &UpdateExpr{Name: yyDollar[1].colName, Expr: yyDollar[3].expr} + } + case 544: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2774 + { + yyVAL.setExprs = SetExprs{yyDollar[1].setExpr} + } + case 545: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2778 + { + yyVAL.setExprs = append(yyDollar[1].setExprs, yyDollar[3].setExpr) + } + case 546: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2784 + { + yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: NewStrVal([]byte("on"))} + } + case 547: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2788 + { + yyVAL.setExpr = &SetExpr{Name: yyDollar[1].colIdent, Expr: yyDollar[3].expr} + } + case 548: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2792 + { + yyVAL.setExpr = &SetExpr{Name: NewColIdent(string(yyDollar[1].bytes)), Expr: yyDollar[2].expr} + } + case 550: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2799 + { + yyVAL.bytes = []byte("charset") + } + case 552: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2806 + { + yyVAL.expr = NewStrVal([]byte(yyDollar[1].colIdent.String())) + } + case 553: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2810 + { + yyVAL.expr = NewStrVal(yyDollar[1].bytes) + } + case 554: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2814 + { + yyVAL.expr = &Default{} + } + case 557: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2823 + { + yyVAL.byt = 0 + } + case 558: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2825 + { + yyVAL.byt = 1 + } + case 559: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2828 + { + yyVAL.empty = struct{}{} + } + case 560: + yyDollar = yyS[yypt-3 : yypt+1] + //line sql.y:2830 + { + yyVAL.empty = struct{}{} + } + case 561: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2833 + { + yyVAL.str = "" + } + case 562: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2835 + { + yyVAL.str = IgnoreStr + } + case 563: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2839 + { + yyVAL.empty = struct{}{} + } + case 564: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2841 + { + yyVAL.empty = struct{}{} + } + case 565: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2843 + { + yyVAL.empty = struct{}{} + } + case 566: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2845 + { + yyVAL.empty = struct{}{} + } + case 567: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2847 + { + yyVAL.empty = struct{}{} + } + case 568: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2849 + { + yyVAL.empty = struct{}{} + } + case 569: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2851 + { + yyVAL.empty = struct{}{} + } + case 570: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2853 + { + yyVAL.empty = struct{}{} + } + case 571: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2855 + { + yyVAL.empty = struct{}{} + } + case 572: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2857 + { + yyVAL.empty = struct{}{} + } + case 573: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2860 + { + yyVAL.empty = struct{}{} + } + case 574: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2862 + { + yyVAL.empty = struct{}{} + } + case 575: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2864 + { + yyVAL.empty = struct{}{} + } + case 576: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2868 + { + yyVAL.empty = struct{}{} + } + case 577: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2870 + { + yyVAL.empty = struct{}{} + } + case 578: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2873 + { + yyVAL.empty = struct{}{} + } + case 579: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2875 + { + yyVAL.empty = struct{}{} + } + case 580: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2877 + { + yyVAL.empty = struct{}{} + } + case 581: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:2880 + { + yyVAL.colIdent = ColIdent{} + } + case 582: + yyDollar = yyS[yypt-2 : yypt+1] + //line sql.y:2882 + { + yyVAL.colIdent = yyDollar[2].colIdent + } + case 583: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2886 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 584: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2890 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 586: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2897 + { + yyVAL.colIdent = NewColIdent(string(yyDollar[1].bytes)) + } + case 587: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2903 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 588: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2907 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 590: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:2914 + { + yyVAL.tableIdent = NewTableIdent(string(yyDollar[1].bytes)) + } + case 781: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:3130 + { + if incNesting(yylex) { + yylex.Error("max nesting level reached") + return 1 + } + } + case 782: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:3139 + { + decNesting(yylex) + } + case 783: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:3144 + { + forceEOF(yylex) + } + case 784: + yyDollar = yyS[yypt-0 : yypt+1] + //line sql.y:3149 + { + forceEOF(yylex) + } + case 785: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:3153 + { + forceEOF(yylex) + } + case 786: + yyDollar = yyS[yypt-1 : yypt+1] + //line sql.y:3157 + { + forceEOF(yylex) + } + } + goto yystack /* stack new state and value */ +} diff --git a/vendor/github.com/xwb1989/sqlparser/sql.y b/vendor/github.com/xwb1989/sqlparser/sql.y new file mode 100644 index 000000000..efbb794ca --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/sql.y @@ -0,0 +1,3159 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +%{ +package sqlparser + +func setParseTree(yylex interface{}, stmt Statement) { + yylex.(*Tokenizer).ParseTree = stmt +} + +func setAllowComments(yylex interface{}, allow bool) { + yylex.(*Tokenizer).AllowComments = allow +} + +func setDDL(yylex interface{}, ddl *DDL) { + yylex.(*Tokenizer).partialDDL = ddl +} + +func incNesting(yylex interface{}) bool { + yylex.(*Tokenizer).nesting++ + if yylex.(*Tokenizer).nesting == 200 { + return true + } + return false +} + +func decNesting(yylex interface{}) { + yylex.(*Tokenizer).nesting-- +} + +// forceEOF forces the lexer to end prematurely. Not all SQL statements +// are supported by the Parser, thus calling forceEOF will make the lexer +// return EOF early. +func forceEOF(yylex interface{}) { + yylex.(*Tokenizer).ForceEOF = true +} + +%} + +%union { + empty struct{} + statement Statement + selStmt SelectStatement + ddl *DDL + ins *Insert + byt byte + bytes []byte + bytes2 [][]byte + str string + strs []string + selectExprs SelectExprs + selectExpr SelectExpr + columns Columns + partitions Partitions + colName *ColName + tableExprs TableExprs + tableExpr TableExpr + joinCondition JoinCondition + tableName TableName + tableNames TableNames + indexHints *IndexHints + expr Expr + exprs Exprs + boolVal BoolVal + colTuple ColTuple + values Values + valTuple ValTuple + subquery *Subquery + whens []*When + when *When + orderBy OrderBy + order *Order + limit *Limit + updateExprs UpdateExprs + setExprs SetExprs + updateExpr *UpdateExpr + setExpr *SetExpr + colIdent ColIdent + tableIdent TableIdent + convertType *ConvertType + aliasedTableName *AliasedTableExpr + TableSpec *TableSpec + columnType ColumnType + colKeyOpt ColumnKeyOption + optVal *SQLVal + LengthScaleOption LengthScaleOption + columnDefinition *ColumnDefinition + indexDefinition *IndexDefinition + indexInfo *IndexInfo + indexOption *IndexOption + indexOptions []*IndexOption + indexColumn *IndexColumn + indexColumns []*IndexColumn + partDefs []*PartitionDefinition + partDef *PartitionDefinition + partSpec *PartitionSpec + vindexParam VindexParam + vindexParams []VindexParam + showFilter *ShowFilter +} + +%token LEX_ERROR +%left UNION +%token SELECT STREAM INSERT UPDATE DELETE FROM WHERE GROUP HAVING ORDER BY LIMIT OFFSET FOR +%token ALL DISTINCT AS EXISTS ASC DESC INTO DUPLICATE KEY DEFAULT SET LOCK KEYS +%token VALUES LAST_INSERT_ID +%token NEXT VALUE SHARE MODE +%token SQL_NO_CACHE SQL_CACHE +%left JOIN STRAIGHT_JOIN LEFT RIGHT INNER OUTER CROSS NATURAL USE FORCE +%left ON USING +%token '(' ',' ')' +%token ID HEX STRING INTEGRAL FLOAT HEXNUM VALUE_ARG LIST_ARG COMMENT COMMENT_KEYWORD BIT_LITERAL +%token NULL TRUE FALSE + +// Precedence dictated by mysql. But the vitess grammar is simplified. +// Some of these operators don't conflict in our situation. Nevertheless, +// it's better to have these listed in the correct order. Also, we don't +// support all operators yet. +%left OR +%left AND +%right NOT '!' +%left BETWEEN CASE WHEN THEN ELSE END +%left '=' '<' '>' LE GE NE NULL_SAFE_EQUAL IS LIKE REGEXP IN +%left '|' +%left '&' +%left SHIFT_LEFT SHIFT_RIGHT +%left '+' '-' +%left '*' '/' DIV '%' MOD +%left '^' +%right '~' UNARY +%left COLLATE +%right BINARY UNDERSCORE_BINARY +%right INTERVAL +%nonassoc '.' + +// There is no need to define precedence for the JSON +// operators because the syntax is restricted enough that +// they don't cause conflicts. +%token JSON_EXTRACT_OP JSON_UNQUOTE_EXTRACT_OP + +// DDL Tokens +%token CREATE ALTER DROP RENAME ANALYZE ADD +%token SCHEMA TABLE INDEX VIEW TO IGNORE IF UNIQUE PRIMARY COLUMN CONSTRAINT SPATIAL FULLTEXT FOREIGN KEY_BLOCK_SIZE +%token SHOW DESCRIBE EXPLAIN DATE ESCAPE REPAIR OPTIMIZE TRUNCATE +%token MAXVALUE PARTITION REORGANIZE LESS THAN PROCEDURE TRIGGER +%token VINDEX VINDEXES +%token STATUS VARIABLES + +// Transaction Tokens +%token BEGIN START TRANSACTION COMMIT ROLLBACK + +// Type Tokens +%token BIT TINYINT SMALLINT MEDIUMINT INT INTEGER BIGINT INTNUM +%token REAL DOUBLE FLOAT_TYPE DECIMAL NUMERIC +%token TIME TIMESTAMP DATETIME YEAR +%token CHAR VARCHAR BOOL CHARACTER VARBINARY NCHAR +%token TEXT TINYTEXT MEDIUMTEXT LONGTEXT +%token BLOB TINYBLOB MEDIUMBLOB LONGBLOB JSON ENUM +%token GEOMETRY POINT LINESTRING POLYGON GEOMETRYCOLLECTION MULTIPOINT MULTILINESTRING MULTIPOLYGON + +// Type Modifiers +%token NULLX AUTO_INCREMENT APPROXNUM SIGNED UNSIGNED ZEROFILL + +// Supported SHOW tokens +%token DATABASES TABLES VITESS_KEYSPACES VITESS_SHARDS VITESS_TABLETS VSCHEMA_TABLES EXTENDED FULL PROCESSLIST + +// SET tokens +%token NAMES CHARSET GLOBAL SESSION ISOLATION LEVEL READ WRITE ONLY REPEATABLE COMMITTED UNCOMMITTED SERIALIZABLE + +// Functions +%token CURRENT_TIMESTAMP DATABASE CURRENT_DATE +%token CURRENT_TIME LOCALTIME LOCALTIMESTAMP +%token UTC_DATE UTC_TIME UTC_TIMESTAMP +%token REPLACE +%token CONVERT CAST +%token SUBSTR SUBSTRING +%token GROUP_CONCAT SEPARATOR + +// Match +%token MATCH AGAINST BOOLEAN LANGUAGE WITH QUERY EXPANSION + +// MySQL reserved words that are unused by this grammar will map to this token. +%token UNUSED + +%type command +%type select_statement base_select union_lhs union_rhs +%type stream_statement insert_statement update_statement delete_statement set_statement +%type create_statement alter_statement rename_statement drop_statement truncate_statement +%type create_table_prefix +%type analyze_statement show_statement use_statement other_statement +%type begin_statement commit_statement rollback_statement +%type comment_opt comment_list +%type union_op insert_or_replace +%type distinct_opt straight_join_opt cache_opt match_option separator_opt +%type like_escape_opt +%type select_expression_list select_expression_list_opt +%type select_expression +%type expression +%type from_opt table_references +%type table_reference table_factor join_table +%type join_condition join_condition_opt on_expression_opt +%type table_name_list +%type inner_join outer_join straight_join natural_join +%type table_name into_table_name +%type aliased_table_name +%type index_hint_list +%type where_expression_opt +%type condition +%type boolean_value +%type compare +%type insert_data +%type value value_expression num_val +%type function_call_keyword function_call_nonkeyword function_call_generic function_call_conflict +%type is_suffix +%type col_tuple +%type expression_list +%type tuple_list +%type row_tuple tuple_or_empty +%type tuple_expression +%type subquery +%type column_name +%type when_expression_list +%type when_expression +%type expression_opt else_expression_opt +%type group_by_opt +%type having_opt +%type order_by_opt order_list +%type order +%type asc_desc_opt +%type limit_opt +%type lock_opt +%type ins_column_list column_list +%type opt_partition_clause partition_list +%type on_dup_opt +%type update_list +%type set_list transaction_chars +%type charset_or_character_set +%type update_expression +%type set_expression transaction_char isolation_level +%type for_from +%type ignore_opt default_opt +%type extended_opt full_opt from_database_opt tables_or_processlist +%type like_or_where_opt +%type exists_opt +%type not_exists_opt non_add_drop_or_rename_operation to_opt index_opt constraint_opt +%type reserved_keyword non_reserved_keyword +%type sql_id reserved_sql_id col_alias as_ci_opt using_opt +%type charset_value +%type table_id reserved_table_id table_alias as_opt_id +%type as_opt +%type force_eof ddl_force_eof +%type charset +%type set_session_or_global show_session_or_global +%type convert_type +%type column_type +%type int_type decimal_type numeric_type time_type char_type spatial_type +%type length_opt column_default_opt column_comment_opt on_update_opt +%type charset_opt collate_opt +%type unsigned_opt zero_fill_opt +%type float_length_opt decimal_length_opt +%type null_opt auto_increment_opt +%type column_key_opt +%type enum_values +%type column_definition +%type index_definition +%type index_or_key +%type equal_opt +%type table_spec table_column_list +%type table_option_list table_option table_opt_value +%type index_info +%type index_column +%type index_column_list +%type index_option +%type index_option_list +%type partition_definitions +%type partition_definition +%type partition_operation +%type vindex_param +%type vindex_param_list vindex_params_opt +%type vindex_type vindex_type_opt +%type alter_object_type + +%start any_command + +%% + +any_command: + command semicolon_opt + { + setParseTree(yylex, $1) + } + +semicolon_opt: +/*empty*/ {} +| ';' {} + +command: + select_statement + { + $$ = $1 + } +| stream_statement +| insert_statement +| update_statement +| delete_statement +| set_statement +| create_statement +| alter_statement +| rename_statement +| drop_statement +| truncate_statement +| analyze_statement +| show_statement +| use_statement +| begin_statement +| commit_statement +| rollback_statement +| other_statement + +select_statement: + base_select order_by_opt limit_opt lock_opt + { + sel := $1.(*Select) + sel.OrderBy = $2 + sel.Limit = $3 + sel.Lock = $4 + $$ = sel + } +| union_lhs union_op union_rhs order_by_opt limit_opt lock_opt + { + $$ = &Union{Type: $2, Left: $1, Right: $3, OrderBy: $4, Limit: $5, Lock: $6} + } +| SELECT comment_opt cache_opt NEXT num_val for_from table_name + { + $$ = &Select{Comments: Comments($2), Cache: $3, SelectExprs: SelectExprs{Nextval{Expr: $5}}, From: TableExprs{&AliasedTableExpr{Expr: $7}}} + } + +stream_statement: + STREAM comment_opt select_expression FROM table_name + { + $$ = &Stream{Comments: Comments($2), SelectExpr: $3, Table: $5} + } + +// base_select is an unparenthesized SELECT with no order by clause or beyond. +base_select: + SELECT comment_opt cache_opt distinct_opt straight_join_opt select_expression_list from_opt where_expression_opt group_by_opt having_opt + { + $$ = &Select{Comments: Comments($2), Cache: $3, Distinct: $4, Hints: $5, SelectExprs: $6, From: $7, Where: NewWhere(WhereStr, $8), GroupBy: GroupBy($9), Having: NewWhere(HavingStr, $10)} + } + +union_lhs: + select_statement + { + $$ = $1 + } +| openb select_statement closeb + { + $$ = &ParenSelect{Select: $2} + } + +union_rhs: + base_select + { + $$ = $1 + } +| openb select_statement closeb + { + $$ = &ParenSelect{Select: $2} + } + + +insert_statement: + insert_or_replace comment_opt ignore_opt into_table_name opt_partition_clause insert_data on_dup_opt + { + // insert_data returns a *Insert pre-filled with Columns & Values + ins := $6 + ins.Action = $1 + ins.Comments = $2 + ins.Ignore = $3 + ins.Table = $4 + ins.Partitions = $5 + ins.OnDup = OnDup($7) + $$ = ins + } +| insert_or_replace comment_opt ignore_opt into_table_name opt_partition_clause SET update_list on_dup_opt + { + cols := make(Columns, 0, len($7)) + vals := make(ValTuple, 0, len($8)) + for _, updateList := range $7 { + cols = append(cols, updateList.Name.Name) + vals = append(vals, updateList.Expr) + } + $$ = &Insert{Action: $1, Comments: Comments($2), Ignore: $3, Table: $4, Partitions: $5, Columns: cols, Rows: Values{vals}, OnDup: OnDup($8)} + } + +insert_or_replace: + INSERT + { + $$ = InsertStr + } +| REPLACE + { + $$ = ReplaceStr + } + +update_statement: + UPDATE comment_opt table_references SET update_list where_expression_opt order_by_opt limit_opt + { + $$ = &Update{Comments: Comments($2), TableExprs: $3, Exprs: $5, Where: NewWhere(WhereStr, $6), OrderBy: $7, Limit: $8} + } + +delete_statement: + DELETE comment_opt FROM table_name opt_partition_clause where_expression_opt order_by_opt limit_opt + { + $$ = &Delete{Comments: Comments($2), TableExprs: TableExprs{&AliasedTableExpr{Expr:$4}}, Partitions: $5, Where: NewWhere(WhereStr, $6), OrderBy: $7, Limit: $8} + } +| DELETE comment_opt FROM table_name_list USING table_references where_expression_opt + { + $$ = &Delete{Comments: Comments($2), Targets: $4, TableExprs: $6, Where: NewWhere(WhereStr, $7)} + } +| DELETE comment_opt table_name_list from_or_using table_references where_expression_opt + { + $$ = &Delete{Comments: Comments($2), Targets: $3, TableExprs: $5, Where: NewWhere(WhereStr, $6)} + } + +from_or_using: + FROM {} +| USING {} + +table_name_list: + table_name + { + $$ = TableNames{$1} + } +| table_name_list ',' table_name + { + $$ = append($$, $3) + } + +opt_partition_clause: + { + $$ = nil + } +| PARTITION openb partition_list closeb + { + $$ = $3 + } + +set_statement: + SET comment_opt set_list + { + $$ = &Set{Comments: Comments($2), Exprs: $3} + } +| SET comment_opt set_session_or_global set_list + { + $$ = &Set{Comments: Comments($2), Scope: $3, Exprs: $4} + } +| SET comment_opt set_session_or_global TRANSACTION transaction_chars + { + $$ = &Set{Comments: Comments($2), Scope: $3, Exprs: $5} + } +| SET comment_opt TRANSACTION transaction_chars + { + $$ = &Set{Comments: Comments($2), Exprs: $4} + } + +transaction_chars: + transaction_char + { + $$ = SetExprs{$1} + } +| transaction_chars ',' transaction_char + { + $$ = append($$, $3) + } + +transaction_char: + ISOLATION LEVEL isolation_level + { + $$ = $3 + } +| READ WRITE + { + $$ = &SetExpr{Name: NewColIdent("tx_read_only"), Expr: NewIntVal([]byte("0"))} + } +| READ ONLY + { + $$ = &SetExpr{Name: NewColIdent("tx_read_only"), Expr: NewIntVal([]byte("1"))} + } + +isolation_level: + REPEATABLE READ + { + $$ = &SetExpr{Name: NewColIdent("tx_isolation"), Expr: NewStrVal([]byte("repeatable read"))} + } +| READ COMMITTED + { + $$ = &SetExpr{Name: NewColIdent("tx_isolation"), Expr: NewStrVal([]byte("read committed"))} + } +| READ UNCOMMITTED + { + $$ = &SetExpr{Name: NewColIdent("tx_isolation"), Expr: NewStrVal([]byte("read uncommitted"))} + } +| SERIALIZABLE + { + $$ = &SetExpr{Name: NewColIdent("tx_isolation"), Expr: NewStrVal([]byte("serializable"))} + } + +set_session_or_global: + SESSION + { + $$ = SessionStr + } +| GLOBAL + { + $$ = GlobalStr + } + +create_statement: + create_table_prefix table_spec + { + $1.TableSpec = $2 + $$ = $1 + } +| CREATE constraint_opt INDEX ID using_opt ON table_name ddl_force_eof + { + // Change this to an alter statement + $$ = &DDL{Action: AlterStr, Table: $7, NewName:$7} + } +| CREATE VIEW table_name ddl_force_eof + { + $$ = &DDL{Action: CreateStr, NewName: $3.ToViewName()} + } +| CREATE OR REPLACE VIEW table_name ddl_force_eof + { + $$ = &DDL{Action: CreateStr, NewName: $5.ToViewName()} + } +| CREATE VINDEX sql_id vindex_type_opt vindex_params_opt + { + $$ = &DDL{Action: CreateVindexStr, VindexSpec: &VindexSpec{ + Name: $3, + Type: $4, + Params: $5, + }} + } +| CREATE DATABASE not_exists_opt ID ddl_force_eof + { + $$ = &DBDDL{Action: CreateStr, DBName: string($4)} + } +| CREATE SCHEMA not_exists_opt ID ddl_force_eof + { + $$ = &DBDDL{Action: CreateStr, DBName: string($4)} + } + +vindex_type_opt: + { + $$ = NewColIdent("") + } +| USING vindex_type + { + $$ = $2 + } + +vindex_type: + ID + { + $$ = NewColIdent(string($1)) + } + +vindex_params_opt: + { + var v []VindexParam + $$ = v + } +| WITH vindex_param_list + { + $$ = $2 + } + +vindex_param_list: + vindex_param + { + $$ = make([]VindexParam, 0, 4) + $$ = append($$, $1) + } +| vindex_param_list ',' vindex_param + { + $$ = append($$, $3) + } + +vindex_param: + reserved_sql_id '=' table_opt_value + { + $$ = VindexParam{Key: $1, Val: $3} + } + +create_table_prefix: + CREATE TABLE not_exists_opt table_name + { + $$ = &DDL{Action: CreateStr, NewName: $4} + setDDL(yylex, $$) + } + +table_spec: + '(' table_column_list ')' table_option_list + { + $$ = $2 + $$.Options = $4 + } + +table_column_list: + column_definition + { + $$ = &TableSpec{} + $$.AddColumn($1) + } +| table_column_list ',' column_definition + { + $$.AddColumn($3) + } +| table_column_list ',' index_definition + { + $$.AddIndex($3) + } + +column_definition: + ID column_type null_opt column_default_opt on_update_opt auto_increment_opt column_key_opt column_comment_opt + { + $2.NotNull = $3 + $2.Default = $4 + $2.OnUpdate = $5 + $2.Autoincrement = $6 + $2.KeyOpt = $7 + $2.Comment = $8 + $$ = &ColumnDefinition{Name: NewColIdent(string($1)), Type: $2} + } +column_type: + numeric_type unsigned_opt zero_fill_opt + { + $$ = $1 + $$.Unsigned = $2 + $$.Zerofill = $3 + } +| char_type +| time_type +| spatial_type + +numeric_type: + int_type length_opt + { + $$ = $1 + $$.Length = $2 + } +| decimal_type + { + $$ = $1 + } + +int_type: + BIT + { + $$ = ColumnType{Type: string($1)} + } +| TINYINT + { + $$ = ColumnType{Type: string($1)} + } +| SMALLINT + { + $$ = ColumnType{Type: string($1)} + } +| MEDIUMINT + { + $$ = ColumnType{Type: string($1)} + } +| INT + { + $$ = ColumnType{Type: string($1)} + } +| INTEGER + { + $$ = ColumnType{Type: string($1)} + } +| BIGINT + { + $$ = ColumnType{Type: string($1)} + } + +decimal_type: +REAL float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| DOUBLE float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| FLOAT_TYPE float_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| DECIMAL decimal_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| NUMERIC decimal_length_opt + { + $$ = ColumnType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } + +time_type: + DATE + { + $$ = ColumnType{Type: string($1)} + } +| TIME length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| TIMESTAMP length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| DATETIME length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| YEAR + { + $$ = ColumnType{Type: string($1)} + } + +char_type: + CHAR length_opt charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Length: $2, Charset: $3, Collate: $4} + } +| VARCHAR length_opt charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Length: $2, Charset: $3, Collate: $4} + } +| BINARY length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| VARBINARY length_opt + { + $$ = ColumnType{Type: string($1), Length: $2} + } +| TEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| TINYTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| MEDIUMTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| LONGTEXT charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), Charset: $2, Collate: $3} + } +| BLOB + { + $$ = ColumnType{Type: string($1)} + } +| TINYBLOB + { + $$ = ColumnType{Type: string($1)} + } +| MEDIUMBLOB + { + $$ = ColumnType{Type: string($1)} + } +| LONGBLOB + { + $$ = ColumnType{Type: string($1)} + } +| JSON + { + $$ = ColumnType{Type: string($1)} + } +| ENUM '(' enum_values ')' charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), EnumValues: $3, Charset: $5, Collate: $6} + } +// need set_values / SetValues ? +| SET '(' enum_values ')' charset_opt collate_opt + { + $$ = ColumnType{Type: string($1), EnumValues: $3, Charset: $5, Collate: $6} + } + +spatial_type: + GEOMETRY + { + $$ = ColumnType{Type: string($1)} + } +| POINT + { + $$ = ColumnType{Type: string($1)} + } +| LINESTRING + { + $$ = ColumnType{Type: string($1)} + } +| POLYGON + { + $$ = ColumnType{Type: string($1)} + } +| GEOMETRYCOLLECTION + { + $$ = ColumnType{Type: string($1)} + } +| MULTIPOINT + { + $$ = ColumnType{Type: string($1)} + } +| MULTILINESTRING + { + $$ = ColumnType{Type: string($1)} + } +| MULTIPOLYGON + { + $$ = ColumnType{Type: string($1)} + } + +enum_values: + STRING + { + $$ = make([]string, 0, 4) + $$ = append($$, "'" + string($1) + "'") + } +| enum_values ',' STRING + { + $$ = append($1, "'" + string($3) + "'") + } + +length_opt: + { + $$ = nil + } +| '(' INTEGRAL ')' + { + $$ = NewIntVal($2) + } + +float_length_opt: + { + $$ = LengthScaleOption{} + } +| '(' INTEGRAL ',' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + Scale: NewIntVal($4), + } + } + +decimal_length_opt: + { + $$ = LengthScaleOption{} + } +| '(' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + } + } +| '(' INTEGRAL ',' INTEGRAL ')' + { + $$ = LengthScaleOption{ + Length: NewIntVal($2), + Scale: NewIntVal($4), + } + } + +unsigned_opt: + { + $$ = BoolVal(false) + } +| UNSIGNED + { + $$ = BoolVal(true) + } + +zero_fill_opt: + { + $$ = BoolVal(false) + } +| ZEROFILL + { + $$ = BoolVal(true) + } + +// Null opt returns false to mean NULL (i.e. the default) and true for NOT NULL +null_opt: + { + $$ = BoolVal(false) + } +| NULL + { + $$ = BoolVal(false) + } +| NOT NULL + { + $$ = BoolVal(true) + } + +column_default_opt: + { + $$ = nil + } +| DEFAULT STRING + { + $$ = NewStrVal($2) + } +| DEFAULT INTEGRAL + { + $$ = NewIntVal($2) + } +| DEFAULT FLOAT + { + $$ = NewFloatVal($2) + } +| DEFAULT NULL + { + $$ = NewValArg($2) + } +| DEFAULT CURRENT_TIMESTAMP + { + $$ = NewValArg($2) + } +| DEFAULT BIT_LITERAL + { + $$ = NewBitVal($2) + } + +on_update_opt: + { + $$ = nil + } +| ON UPDATE CURRENT_TIMESTAMP +{ + $$ = NewValArg($3) +} + +auto_increment_opt: + { + $$ = BoolVal(false) + } +| AUTO_INCREMENT + { + $$ = BoolVal(true) + } + +charset_opt: + { + $$ = "" + } +| CHARACTER SET ID + { + $$ = string($3) + } +| CHARACTER SET BINARY + { + $$ = string($3) + } + +collate_opt: + { + $$ = "" + } +| COLLATE ID + { + $$ = string($2) + } + +column_key_opt: + { + $$ = colKeyNone + } +| PRIMARY KEY + { + $$ = colKeyPrimary + } +| KEY + { + $$ = colKey + } +| UNIQUE KEY + { + $$ = colKeyUniqueKey + } +| UNIQUE + { + $$ = colKeyUnique + } + +column_comment_opt: + { + $$ = nil + } +| COMMENT_KEYWORD STRING + { + $$ = NewStrVal($2) + } + +index_definition: + index_info '(' index_column_list ')' index_option_list + { + $$ = &IndexDefinition{Info: $1, Columns: $3, Options: $5} + } +| index_info '(' index_column_list ')' + { + $$ = &IndexDefinition{Info: $1, Columns: $3} + } + +index_option_list: + index_option + { + $$ = []*IndexOption{$1} + } +| index_option_list index_option + { + $$ = append($$, $2) + } + +index_option: + USING ID + { + $$ = &IndexOption{Name: string($1), Using: string($2)} + } +| KEY_BLOCK_SIZE equal_opt INTEGRAL + { + // should not be string + $$ = &IndexOption{Name: string($1), Value: NewIntVal($3)} + } +| COMMENT_KEYWORD STRING + { + $$ = &IndexOption{Name: string($1), Value: NewStrVal($2)} + } + +equal_opt: + /* empty */ + { + $$ = "" + } +| '=' + { + $$ = string($1) + } + +index_info: + PRIMARY KEY + { + $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent("PRIMARY"), Primary: true, Unique: true} + } +| SPATIAL index_or_key ID + { + $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent(string($3)), Spatial: true, Unique: false} + } +| UNIQUE index_or_key ID + { + $$ = &IndexInfo{Type: string($1) + " " + string($2), Name: NewColIdent(string($3)), Unique: true} + } +| UNIQUE ID + { + $$ = &IndexInfo{Type: string($1), Name: NewColIdent(string($2)), Unique: true} + } +| index_or_key ID + { + $$ = &IndexInfo{Type: string($1), Name: NewColIdent(string($2)), Unique: false} + } + +index_or_key: + INDEX + { + $$ = string($1) + } + | KEY + { + $$ = string($1) + } + +index_column_list: + index_column + { + $$ = []*IndexColumn{$1} + } +| index_column_list ',' index_column + { + $$ = append($$, $3) + } + +index_column: + sql_id length_opt + { + $$ = &IndexColumn{Column: $1, Length: $2} + } + +table_option_list: + { + $$ = "" + } +| table_option + { + $$ = " " + string($1) + } +| table_option_list ',' table_option + { + $$ = string($1) + ", " + string($3) + } + +// rather than explicitly parsing the various keywords for table options, +// just accept any number of keywords, IDs, strings, numbers, and '=' +table_option: + table_opt_value + { + $$ = $1 + } +| table_option table_opt_value + { + $$ = $1 + " " + $2 + } +| table_option '=' table_opt_value + { + $$ = $1 + "=" + $3 + } + +table_opt_value: + reserved_sql_id + { + $$ = $1.String() + } +| STRING + { + $$ = "'" + string($1) + "'" + } +| INTEGRAL + { + $$ = string($1) + } + +alter_statement: + ALTER ignore_opt TABLE table_name non_add_drop_or_rename_operation force_eof + { + $$ = &DDL{Action: AlterStr, Table: $4, NewName: $4} + } +| ALTER ignore_opt TABLE table_name ADD alter_object_type force_eof + { + $$ = &DDL{Action: AlterStr, Table: $4, NewName: $4} + } +| ALTER ignore_opt TABLE table_name DROP alter_object_type force_eof + { + $$ = &DDL{Action: AlterStr, Table: $4, NewName: $4} + } +| ALTER ignore_opt TABLE table_name ADD VINDEX sql_id '(' column_list ')' vindex_type_opt vindex_params_opt + { + $$ = &DDL{ + Action: AddColVindexStr, + Table: $4, + VindexSpec: &VindexSpec{ + Name: $7, + Type: $11, + Params: $12, + }, + VindexCols: $9, + } + } +| ALTER ignore_opt TABLE table_name DROP VINDEX sql_id + { + $$ = &DDL{ + Action: DropColVindexStr, + Table: $4, + VindexSpec: &VindexSpec{ + Name: $7, + }, + } + } +| ALTER ignore_opt TABLE table_name RENAME to_opt table_name + { + // Change this to a rename statement + $$ = &DDL{Action: RenameStr, Table: $4, NewName: $7} + } +| ALTER ignore_opt TABLE table_name RENAME index_opt force_eof + { + // Rename an index can just be an alter + $$ = &DDL{Action: AlterStr, Table: $4, NewName: $4} + } +| ALTER VIEW table_name ddl_force_eof + { + $$ = &DDL{Action: AlterStr, Table: $3.ToViewName(), NewName: $3.ToViewName()} + } +| ALTER ignore_opt TABLE table_name partition_operation + { + $$ = &DDL{Action: AlterStr, Table: $4, PartitionSpec: $5} + } + +alter_object_type: + COLUMN +| CONSTRAINT +| FOREIGN +| FULLTEXT +| ID +| INDEX +| KEY +| PRIMARY +| SPATIAL +| PARTITION +| UNIQUE + +partition_operation: + REORGANIZE PARTITION sql_id INTO openb partition_definitions closeb + { + $$ = &PartitionSpec{Action: ReorganizeStr, Name: $3, Definitions: $6} + } + +partition_definitions: + partition_definition + { + $$ = []*PartitionDefinition{$1} + } +| partition_definitions ',' partition_definition + { + $$ = append($1, $3) + } + +partition_definition: + PARTITION sql_id VALUES LESS THAN openb value_expression closeb + { + $$ = &PartitionDefinition{Name: $2, Limit: $7} + } +| PARTITION sql_id VALUES LESS THAN openb MAXVALUE closeb + { + $$ = &PartitionDefinition{Name: $2, Maxvalue: true} + } + +rename_statement: + RENAME TABLE table_name TO table_name + { + $$ = &DDL{Action: RenameStr, Table: $3, NewName: $5} + } + +drop_statement: + DROP TABLE exists_opt table_name + { + var exists bool + if $3 != 0 { + exists = true + } + $$ = &DDL{Action: DropStr, Table: $4, IfExists: exists} + } +| DROP INDEX ID ON table_name ddl_force_eof + { + // Change this to an alter statement + $$ = &DDL{Action: AlterStr, Table: $5, NewName: $5} + } +| DROP VIEW exists_opt table_name ddl_force_eof + { + var exists bool + if $3 != 0 { + exists = true + } + $$ = &DDL{Action: DropStr, Table: $4.ToViewName(), IfExists: exists} + } +| DROP DATABASE exists_opt ID + { + $$ = &DBDDL{Action: DropStr, DBName: string($4)} + } +| DROP SCHEMA exists_opt ID + { + $$ = &DBDDL{Action: DropStr, DBName: string($4)} + } + +truncate_statement: + TRUNCATE TABLE table_name + { + $$ = &DDL{Action: TruncateStr, Table: $3} + } +| TRUNCATE table_name + { + $$ = &DDL{Action: TruncateStr, Table: $2} + } +analyze_statement: + ANALYZE TABLE table_name + { + $$ = &DDL{Action: AlterStr, Table: $3, NewName: $3} + } + +show_statement: + SHOW BINARY ID ddl_force_eof /* SHOW BINARY LOGS */ + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW CHARACTER SET ddl_force_eof + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW CREATE DATABASE ddl_force_eof + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +/* Rule to handle SHOW CREATE EVENT, SHOW CREATE FUNCTION, etc. */ +| SHOW CREATE ID ddl_force_eof + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW CREATE PROCEDURE ddl_force_eof + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW CREATE TABLE ddl_force_eof + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW CREATE TRIGGER ddl_force_eof + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW CREATE VIEW ddl_force_eof + { + $$ = &Show{Type: string($2) + " " + string($3)} + } +| SHOW DATABASES ddl_force_eof + { + $$ = &Show{Type: string($2)} + } +| SHOW INDEX ddl_force_eof + { + $$ = &Show{Type: string($2)} + } +| SHOW KEYS ddl_force_eof + { + $$ = &Show{Type: string($2)} + } +| SHOW PROCEDURE ddl_force_eof + { + $$ = &Show{Type: string($2)} + } +| SHOW show_session_or_global STATUS ddl_force_eof + { + $$ = &Show{Scope: $2, Type: string($3)} + } +| SHOW TABLE ddl_force_eof + { + $$ = &Show{Type: string($2)} + } +| SHOW extended_opt full_opt tables_or_processlist from_database_opt like_or_where_opt + { + // this is ugly, but I couldn't find a better way for now + if $4 == "processlist" { + $$ = &Show{Type: $4} + } else { + showTablesOpt := &ShowTablesOpt{Extended: $2, Full:$3, DbName:$5, Filter:$6} + $$ = &Show{Type: $4, ShowTablesOpt: showTablesOpt} + } + } +| SHOW show_session_or_global VARIABLES ddl_force_eof + { + $$ = &Show{Scope: $2, Type: string($3)} + } +| SHOW VINDEXES + { + $$ = &Show{Type: string($2)} + } +| SHOW VINDEXES ON table_name + { + $$ = &Show{Type: string($2), OnTable: $4} + } +| SHOW VITESS_KEYSPACES + { + $$ = &Show{Type: string($2)} + } +| SHOW VITESS_SHARDS + { + $$ = &Show{Type: string($2)} + } +| SHOW VITESS_TABLETS + { + $$ = &Show{Type: string($2)} + } +| SHOW VSCHEMA_TABLES + { + $$ = &Show{Type: string($2)} + } +/* + * Catch-all for show statements without vitess keywords: + * + * SHOW BINARY LOGS + * SHOW INVALID + */ +| SHOW ID ddl_force_eof + { + $$ = &Show{Type: string($2)} + } + +tables_or_processlist: + TABLES + { + $$ = string($1) + } +| PROCESSLIST + { + $$ = string($1) + } + +extended_opt: + /* empty */ + { + $$ = "" + } +| EXTENDED + { + $$ = "extended " + } + +full_opt: + /* empty */ + { + $$ = "" + } +| FULL + { + $$ = "full " + } + +from_database_opt: + /* empty */ + { + $$ = "" + } +| FROM table_id + { + $$ = $2.v + } +| IN table_id + { + $$ = $2.v + } + +like_or_where_opt: + /* empty */ + { + $$ = nil + } +| LIKE STRING + { + $$ = &ShowFilter{Like:string($2)} + } +| WHERE expression + { + $$ = &ShowFilter{Filter:$2} + } + +show_session_or_global: + /* empty */ + { + $$ = "" + } +| SESSION + { + $$ = SessionStr + } +| GLOBAL + { + $$ = GlobalStr + } + +use_statement: + USE table_id + { + $$ = &Use{DBName: $2} + } +| USE + { + $$ = &Use{DBName:TableIdent{v:""}} + } + +begin_statement: + BEGIN + { + $$ = &Begin{} + } +| START TRANSACTION + { + $$ = &Begin{} + } + +commit_statement: + COMMIT + { + $$ = &Commit{} + } + +rollback_statement: + ROLLBACK + { + $$ = &Rollback{} + } + +other_statement: + DESC force_eof + { + $$ = &OtherRead{} + } +| DESCRIBE force_eof + { + $$ = &OtherRead{} + } +| EXPLAIN force_eof + { + $$ = &OtherRead{} + } +| REPAIR force_eof + { + $$ = &OtherAdmin{} + } +| OPTIMIZE force_eof + { + $$ = &OtherAdmin{} + } + +comment_opt: + { + setAllowComments(yylex, true) + } + comment_list + { + $$ = $2 + setAllowComments(yylex, false) + } + +comment_list: + { + $$ = nil + } +| comment_list COMMENT + { + $$ = append($1, $2) + } + +union_op: + UNION + { + $$ = UnionStr + } +| UNION ALL + { + $$ = UnionAllStr + } +| UNION DISTINCT + { + $$ = UnionDistinctStr + } + +cache_opt: +{ + $$ = "" +} +| SQL_NO_CACHE +{ + $$ = SQLNoCacheStr +} +| SQL_CACHE +{ + $$ = SQLCacheStr +} + +distinct_opt: + { + $$ = "" + } +| DISTINCT + { + $$ = DistinctStr + } + +straight_join_opt: + { + $$ = "" + } +| STRAIGHT_JOIN + { + $$ = StraightJoinHint + } + +select_expression_list_opt: + { + $$ = nil + } +| select_expression_list + { + $$ = $1 + } + +select_expression_list: + select_expression + { + $$ = SelectExprs{$1} + } +| select_expression_list ',' select_expression + { + $$ = append($$, $3) + } + +select_expression: + '*' + { + $$ = &StarExpr{} + } +| expression as_ci_opt + { + $$ = &AliasedExpr{Expr: $1, As: $2} + } +| table_id '.' '*' + { + $$ = &StarExpr{TableName: TableName{Name: $1}} + } +| table_id '.' reserved_table_id '.' '*' + { + $$ = &StarExpr{TableName: TableName{Qualifier: $1, Name: $3}} + } + +as_ci_opt: + { + $$ = ColIdent{} + } +| col_alias + { + $$ = $1 + } +| AS col_alias + { + $$ = $2 + } + +col_alias: + sql_id +| STRING + { + $$ = NewColIdent(string($1)) + } + +from_opt: + { + $$ = TableExprs{&AliasedTableExpr{Expr:TableName{Name: NewTableIdent("dual")}}} + } +| FROM table_references + { + $$ = $2 + } + +table_references: + table_reference + { + $$ = TableExprs{$1} + } +| table_references ',' table_reference + { + $$ = append($$, $3) + } + +table_reference: + table_factor +| join_table + +table_factor: + aliased_table_name + { + $$ = $1 + } +| subquery as_opt table_id + { + $$ = &AliasedTableExpr{Expr:$1, As: $3} + } +| openb table_references closeb + { + $$ = &ParenTableExpr{Exprs: $2} + } + +aliased_table_name: +table_name as_opt_id index_hint_list + { + $$ = &AliasedTableExpr{Expr:$1, As: $2, Hints: $3} + } +| table_name PARTITION openb partition_list closeb as_opt_id index_hint_list + { + $$ = &AliasedTableExpr{Expr:$1, Partitions: $4, As: $6, Hints: $7} + } + +column_list: + sql_id + { + $$ = Columns{$1} + } +| column_list ',' sql_id + { + $$ = append($$, $3) + } + +partition_list: + sql_id + { + $$ = Partitions{$1} + } +| partition_list ',' sql_id + { + $$ = append($$, $3) + } + +// There is a grammar conflict here: +// 1: INSERT INTO a SELECT * FROM b JOIN c ON b.i = c.i +// 2: INSERT INTO a SELECT * FROM b JOIN c ON DUPLICATE KEY UPDATE a.i = 1 +// When yacc encounters the ON clause, it cannot determine which way to +// resolve. The %prec override below makes the parser choose the +// first construct, which automatically makes the second construct a +// syntax error. This is the same behavior as MySQL. +join_table: + table_reference inner_join table_factor join_condition_opt + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, Condition: $4} + } +| table_reference straight_join table_factor on_expression_opt + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, Condition: $4} + } +| table_reference outer_join table_reference join_condition + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3, Condition: $4} + } +| table_reference natural_join table_factor + { + $$ = &JoinTableExpr{LeftExpr: $1, Join: $2, RightExpr: $3} + } + +join_condition: + ON expression + { $$ = JoinCondition{On: $2} } +| USING '(' column_list ')' + { $$ = JoinCondition{Using: $3} } + +join_condition_opt: +%prec JOIN + { $$ = JoinCondition{} } +| join_condition + { $$ = $1 } + +on_expression_opt: +%prec JOIN + { $$ = JoinCondition{} } +| ON expression + { $$ = JoinCondition{On: $2} } + +as_opt: + { $$ = struct{}{} } +| AS + { $$ = struct{}{} } + +as_opt_id: + { + $$ = NewTableIdent("") + } +| table_alias + { + $$ = $1 + } +| AS table_alias + { + $$ = $2 + } + +table_alias: + table_id +| STRING + { + $$ = NewTableIdent(string($1)) + } + +inner_join: + JOIN + { + $$ = JoinStr + } +| INNER JOIN + { + $$ = JoinStr + } +| CROSS JOIN + { + $$ = JoinStr + } + +straight_join: + STRAIGHT_JOIN + { + $$ = StraightJoinStr + } + +outer_join: + LEFT JOIN + { + $$ = LeftJoinStr + } +| LEFT OUTER JOIN + { + $$ = LeftJoinStr + } +| RIGHT JOIN + { + $$ = RightJoinStr + } +| RIGHT OUTER JOIN + { + $$ = RightJoinStr + } + +natural_join: + NATURAL JOIN + { + $$ = NaturalJoinStr + } +| NATURAL outer_join + { + if $2 == LeftJoinStr { + $$ = NaturalLeftJoinStr + } else { + $$ = NaturalRightJoinStr + } + } + +into_table_name: + INTO table_name + { + $$ = $2 + } +| table_name + { + $$ = $1 + } + +table_name: + table_id + { + $$ = TableName{Name: $1} + } +| table_id '.' reserved_table_id + { + $$ = TableName{Qualifier: $1, Name: $3} + } + +index_hint_list: + { + $$ = nil + } +| USE INDEX openb column_list closeb + { + $$ = &IndexHints{Type: UseStr, Indexes: $4} + } +| IGNORE INDEX openb column_list closeb + { + $$ = &IndexHints{Type: IgnoreStr, Indexes: $4} + } +| FORCE INDEX openb column_list closeb + { + $$ = &IndexHints{Type: ForceStr, Indexes: $4} + } + +where_expression_opt: + { + $$ = nil + } +| WHERE expression + { + $$ = $2 + } + +expression: + condition + { + $$ = $1 + } +| expression AND expression + { + $$ = &AndExpr{Left: $1, Right: $3} + } +| expression OR expression + { + $$ = &OrExpr{Left: $1, Right: $3} + } +| NOT expression + { + $$ = &NotExpr{Expr: $2} + } +| expression IS is_suffix + { + $$ = &IsExpr{Operator: $3, Expr: $1} + } +| value_expression + { + $$ = $1 + } +| DEFAULT default_opt + { + $$ = &Default{ColName: $2} + } + +default_opt: + /* empty */ + { + $$ = "" + } +| openb ID closeb + { + $$ = string($2) + } + +boolean_value: + TRUE + { + $$ = BoolVal(true) + } +| FALSE + { + $$ = BoolVal(false) + } + +condition: + value_expression compare value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: $2, Right: $3} + } +| value_expression IN col_tuple + { + $$ = &ComparisonExpr{Left: $1, Operator: InStr, Right: $3} + } +| value_expression NOT IN col_tuple + { + $$ = &ComparisonExpr{Left: $1, Operator: NotInStr, Right: $4} + } +| value_expression LIKE value_expression like_escape_opt + { + $$ = &ComparisonExpr{Left: $1, Operator: LikeStr, Right: $3, Escape: $4} + } +| value_expression NOT LIKE value_expression like_escape_opt + { + $$ = &ComparisonExpr{Left: $1, Operator: NotLikeStr, Right: $4, Escape: $5} + } +| value_expression REGEXP value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: RegexpStr, Right: $3} + } +| value_expression NOT REGEXP value_expression + { + $$ = &ComparisonExpr{Left: $1, Operator: NotRegexpStr, Right: $4} + } +| value_expression BETWEEN value_expression AND value_expression + { + $$ = &RangeCond{Left: $1, Operator: BetweenStr, From: $3, To: $5} + } +| value_expression NOT BETWEEN value_expression AND value_expression + { + $$ = &RangeCond{Left: $1, Operator: NotBetweenStr, From: $4, To: $6} + } +| EXISTS subquery + { + $$ = &ExistsExpr{Subquery: $2} + } + +is_suffix: + NULL + { + $$ = IsNullStr + } +| NOT NULL + { + $$ = IsNotNullStr + } +| TRUE + { + $$ = IsTrueStr + } +| NOT TRUE + { + $$ = IsNotTrueStr + } +| FALSE + { + $$ = IsFalseStr + } +| NOT FALSE + { + $$ = IsNotFalseStr + } + +compare: + '=' + { + $$ = EqualStr + } +| '<' + { + $$ = LessThanStr + } +| '>' + { + $$ = GreaterThanStr + } +| LE + { + $$ = LessEqualStr + } +| GE + { + $$ = GreaterEqualStr + } +| NE + { + $$ = NotEqualStr + } +| NULL_SAFE_EQUAL + { + $$ = NullSafeEqualStr + } + +like_escape_opt: + { + $$ = nil + } +| ESCAPE value_expression + { + $$ = $2 + } + +col_tuple: + row_tuple + { + $$ = $1 + } +| subquery + { + $$ = $1 + } +| LIST_ARG + { + $$ = ListArg($1) + } + +subquery: + openb select_statement closeb + { + $$ = &Subquery{$2} + } + +expression_list: + expression + { + $$ = Exprs{$1} + } +| expression_list ',' expression + { + $$ = append($1, $3) + } + +value_expression: + value + { + $$ = $1 + } +| boolean_value + { + $$ = $1 + } +| column_name + { + $$ = $1 + } +| tuple_expression + { + $$ = $1 + } +| subquery + { + $$ = $1 + } +| value_expression '&' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitAndStr, Right: $3} + } +| value_expression '|' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitOrStr, Right: $3} + } +| value_expression '^' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: BitXorStr, Right: $3} + } +| value_expression '+' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: PlusStr, Right: $3} + } +| value_expression '-' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: MinusStr, Right: $3} + } +| value_expression '*' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: MultStr, Right: $3} + } +| value_expression '/' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: DivStr, Right: $3} + } +| value_expression DIV value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: IntDivStr, Right: $3} + } +| value_expression '%' value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ModStr, Right: $3} + } +| value_expression MOD value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ModStr, Right: $3} + } +| value_expression SHIFT_LEFT value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ShiftLeftStr, Right: $3} + } +| value_expression SHIFT_RIGHT value_expression + { + $$ = &BinaryExpr{Left: $1, Operator: ShiftRightStr, Right: $3} + } +| column_name JSON_EXTRACT_OP value + { + $$ = &BinaryExpr{Left: $1, Operator: JSONExtractOp, Right: $3} + } +| column_name JSON_UNQUOTE_EXTRACT_OP value + { + $$ = &BinaryExpr{Left: $1, Operator: JSONUnquoteExtractOp, Right: $3} + } +| value_expression COLLATE charset + { + $$ = &CollateExpr{Expr: $1, Charset: $3} + } +| BINARY value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: BinaryStr, Expr: $2} + } +| UNDERSCORE_BINARY value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: UBinaryStr, Expr: $2} + } +| '+' value_expression %prec UNARY + { + if num, ok := $2.(*SQLVal); ok && num.Type == IntVal { + $$ = num + } else { + $$ = &UnaryExpr{Operator: UPlusStr, Expr: $2} + } + } +| '-' value_expression %prec UNARY + { + if num, ok := $2.(*SQLVal); ok && num.Type == IntVal { + // Handle double negative + if num.Val[0] == '-' { + num.Val = num.Val[1:] + $$ = num + } else { + $$ = NewIntVal(append([]byte("-"), num.Val...)) + } + } else { + $$ = &UnaryExpr{Operator: UMinusStr, Expr: $2} + } + } +| '~' value_expression + { + $$ = &UnaryExpr{Operator: TildaStr, Expr: $2} + } +| '!' value_expression %prec UNARY + { + $$ = &UnaryExpr{Operator: BangStr, Expr: $2} + } +| INTERVAL value_expression sql_id + { + // This rule prevents the usage of INTERVAL + // as a function. If support is needed for that, + // we'll need to revisit this. The solution + // will be non-trivial because of grammar conflicts. + $$ = &IntervalExpr{Expr: $2, Unit: $3.String()} + } +| function_call_generic +| function_call_keyword +| function_call_nonkeyword +| function_call_conflict + +/* + Regular function calls without special token or syntax, guaranteed to not + introduce side effects due to being a simple identifier +*/ +function_call_generic: + sql_id openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Name: $1, Exprs: $3} + } +| sql_id openb DISTINCT select_expression_list closeb + { + $$ = &FuncExpr{Name: $1, Distinct: true, Exprs: $4} + } +| table_id '.' reserved_sql_id openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Qualifier: $1, Name: $3, Exprs: $5} + } + +/* + Function calls using reserved keywords, with dedicated grammar rules + as a result +*/ +function_call_keyword: + LEFT openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("left"), Exprs: $3} + } +| RIGHT openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("right"), Exprs: $3} + } +| CONVERT openb expression ',' convert_type closeb + { + $$ = &ConvertExpr{Expr: $3, Type: $5} + } +| CAST openb expression AS convert_type closeb + { + $$ = &ConvertExpr{Expr: $3, Type: $5} + } +| CONVERT openb expression USING charset closeb + { + $$ = &ConvertUsingExpr{Expr: $3, Type: $5} + } +| SUBSTR openb column_name ',' value_expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: nil} + } +| SUBSTR openb column_name ',' value_expression ',' value_expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: $7} + } +| SUBSTR openb column_name FROM value_expression FOR value_expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: $7} + } +| SUBSTRING openb column_name ',' value_expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: nil} + } +| SUBSTRING openb column_name ',' value_expression ',' value_expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: $7} + } +| SUBSTRING openb column_name FROM value_expression FOR value_expression closeb + { + $$ = &SubstrExpr{Name: $3, From: $5, To: $7} + } +| MATCH openb select_expression_list closeb AGAINST openb value_expression match_option closeb + { + $$ = &MatchExpr{Columns: $3, Expr: $7, Option: $8} + } +| GROUP_CONCAT openb distinct_opt select_expression_list order_by_opt separator_opt closeb + { + $$ = &GroupConcatExpr{Distinct: $3, Exprs: $4, OrderBy: $5, Separator: $6} + } +| CASE expression_opt when_expression_list else_expression_opt END + { + $$ = &CaseExpr{Expr: $2, Whens: $3, Else: $4} + } +| VALUES openb column_name closeb + { + $$ = &ValuesFuncExpr{Name: $3} + } + +/* + Function calls using non reserved keywords but with special syntax forms. + Dedicated grammar rules are needed because of the special syntax +*/ +function_call_nonkeyword: + CURRENT_TIMESTAMP func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_timestamp")} + } +| UTC_TIMESTAMP func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_timestamp")} + } +| UTC_TIME func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_time")} + } +| UTC_DATE func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("utc_date")} + } + // now +| LOCALTIME func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("localtime")} + } + // now +| LOCALTIMESTAMP func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("localtimestamp")} + } + // curdate +| CURRENT_DATE func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_date")} + } + // curtime +| CURRENT_TIME func_datetime_precision_opt + { + $$ = &FuncExpr{Name:NewColIdent("current_time")} + } + +func_datetime_precision_opt: + /* empty */ +| openb closeb + +/* + Function calls using non reserved keywords with *normal* syntax forms. Because + the names are non-reserved, they need a dedicated rule so as not to conflict +*/ +function_call_conflict: + IF openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("if"), Exprs: $3} + } +| DATABASE openb select_expression_list_opt closeb + { + $$ = &FuncExpr{Name: NewColIdent("database"), Exprs: $3} + } +| MOD openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("mod"), Exprs: $3} + } +| REPLACE openb select_expression_list closeb + { + $$ = &FuncExpr{Name: NewColIdent("replace"), Exprs: $3} + } + +match_option: +/*empty*/ + { + $$ = "" + } +| IN BOOLEAN MODE + { + $$ = BooleanModeStr + } +| IN NATURAL LANGUAGE MODE + { + $$ = NaturalLanguageModeStr + } +| IN NATURAL LANGUAGE MODE WITH QUERY EXPANSION + { + $$ = NaturalLanguageModeWithQueryExpansionStr + } +| WITH QUERY EXPANSION + { + $$ = QueryExpansionStr + } + +charset: + ID +{ + $$ = string($1) +} +| STRING +{ + $$ = string($1) +} + +convert_type: + BINARY length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| CHAR length_opt charset_opt + { + $$ = &ConvertType{Type: string($1), Length: $2, Charset: $3, Operator: CharacterSetStr} + } +| CHAR length_opt ID + { + $$ = &ConvertType{Type: string($1), Length: $2, Charset: string($3)} + } +| DATE + { + $$ = &ConvertType{Type: string($1)} + } +| DATETIME length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| DECIMAL decimal_length_opt + { + $$ = &ConvertType{Type: string($1)} + $$.Length = $2.Length + $$.Scale = $2.Scale + } +| JSON + { + $$ = &ConvertType{Type: string($1)} + } +| NCHAR length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| SIGNED + { + $$ = &ConvertType{Type: string($1)} + } +| SIGNED INTEGER + { + $$ = &ConvertType{Type: string($1)} + } +| TIME length_opt + { + $$ = &ConvertType{Type: string($1), Length: $2} + } +| UNSIGNED + { + $$ = &ConvertType{Type: string($1)} + } +| UNSIGNED INTEGER + { + $$ = &ConvertType{Type: string($1)} + } + +expression_opt: + { + $$ = nil + } +| expression + { + $$ = $1 + } + +separator_opt: + { + $$ = string("") + } +| SEPARATOR STRING + { + $$ = " separator '"+string($2)+"'" + } + +when_expression_list: + when_expression + { + $$ = []*When{$1} + } +| when_expression_list when_expression + { + $$ = append($1, $2) + } + +when_expression: + WHEN expression THEN expression + { + $$ = &When{Cond: $2, Val: $4} + } + +else_expression_opt: + { + $$ = nil + } +| ELSE expression + { + $$ = $2 + } + +column_name: + sql_id + { + $$ = &ColName{Name: $1} + } +| table_id '.' reserved_sql_id + { + $$ = &ColName{Qualifier: TableName{Name: $1}, Name: $3} + } +| table_id '.' reserved_table_id '.' reserved_sql_id + { + $$ = &ColName{Qualifier: TableName{Qualifier: $1, Name: $3}, Name: $5} + } + +value: + STRING + { + $$ = NewStrVal($1) + } +| HEX + { + $$ = NewHexVal($1) + } +| BIT_LITERAL + { + $$ = NewBitVal($1) + } +| INTEGRAL + { + $$ = NewIntVal($1) + } +| FLOAT + { + $$ = NewFloatVal($1) + } +| HEXNUM + { + $$ = NewHexNum($1) + } +| VALUE_ARG + { + $$ = NewValArg($1) + } +| NULL + { + $$ = &NullVal{} + } + +num_val: + sql_id + { + // TODO(sougou): Deprecate this construct. + if $1.Lowered() != "value" { + yylex.Error("expecting value after next") + return 1 + } + $$ = NewIntVal([]byte("1")) + } +| INTEGRAL VALUES + { + $$ = NewIntVal($1) + } +| VALUE_ARG VALUES + { + $$ = NewValArg($1) + } + +group_by_opt: + { + $$ = nil + } +| GROUP BY expression_list + { + $$ = $3 + } + +having_opt: + { + $$ = nil + } +| HAVING expression + { + $$ = $2 + } + +order_by_opt: + { + $$ = nil + } +| ORDER BY order_list + { + $$ = $3 + } + +order_list: + order + { + $$ = OrderBy{$1} + } +| order_list ',' order + { + $$ = append($1, $3) + } + +order: + expression asc_desc_opt + { + $$ = &Order{Expr: $1, Direction: $2} + } + +asc_desc_opt: + { + $$ = AscScr + } +| ASC + { + $$ = AscScr + } +| DESC + { + $$ = DescScr + } + +limit_opt: + { + $$ = nil + } +| LIMIT expression + { + $$ = &Limit{Rowcount: $2} + } +| LIMIT expression ',' expression + { + $$ = &Limit{Offset: $2, Rowcount: $4} + } +| LIMIT expression OFFSET expression + { + $$ = &Limit{Offset: $4, Rowcount: $2} + } + +lock_opt: + { + $$ = "" + } +| FOR UPDATE + { + $$ = ForUpdateStr + } +| LOCK IN SHARE MODE + { + $$ = ShareModeStr + } + +// insert_data expands all combinations into a single rule. +// This avoids a shift/reduce conflict while encountering the +// following two possible constructs: +// insert into t1(a, b) (select * from t2) +// insert into t1(select * from t2) +// Because the rules are together, the parser can keep shifting +// the tokens until it disambiguates a as sql_id and select as keyword. +insert_data: + VALUES tuple_list + { + $$ = &Insert{Rows: $2} + } +| select_statement + { + $$ = &Insert{Rows: $1} + } +| openb select_statement closeb + { + // Drop the redundant parenthesis. + $$ = &Insert{Rows: $2} + } +| openb ins_column_list closeb VALUES tuple_list + { + $$ = &Insert{Columns: $2, Rows: $5} + } +| openb ins_column_list closeb select_statement + { + $$ = &Insert{Columns: $2, Rows: $4} + } +| openb ins_column_list closeb openb select_statement closeb + { + // Drop the redundant parenthesis. + $$ = &Insert{Columns: $2, Rows: $5} + } + +ins_column_list: + sql_id + { + $$ = Columns{$1} + } +| sql_id '.' sql_id + { + $$ = Columns{$3} + } +| ins_column_list ',' sql_id + { + $$ = append($$, $3) + } +| ins_column_list ',' sql_id '.' sql_id + { + $$ = append($$, $5) + } + +on_dup_opt: + { + $$ = nil + } +| ON DUPLICATE KEY UPDATE update_list + { + $$ = $5 + } + +tuple_list: + tuple_or_empty + { + $$ = Values{$1} + } +| tuple_list ',' tuple_or_empty + { + $$ = append($1, $3) + } + +tuple_or_empty: + row_tuple + { + $$ = $1 + } +| openb closeb + { + $$ = ValTuple{} + } + +row_tuple: + openb expression_list closeb + { + $$ = ValTuple($2) + } + +tuple_expression: + row_tuple + { + if len($1) == 1 { + $$ = &ParenExpr{$1[0]} + } else { + $$ = $1 + } + } + +update_list: + update_expression + { + $$ = UpdateExprs{$1} + } +| update_list ',' update_expression + { + $$ = append($1, $3) + } + +update_expression: + column_name '=' expression + { + $$ = &UpdateExpr{Name: $1, Expr: $3} + } + +set_list: + set_expression + { + $$ = SetExprs{$1} + } +| set_list ',' set_expression + { + $$ = append($1, $3) + } + +set_expression: + reserved_sql_id '=' ON + { + $$ = &SetExpr{Name: $1, Expr: NewStrVal([]byte("on"))} + } +| reserved_sql_id '=' expression + { + $$ = &SetExpr{Name: $1, Expr: $3} + } +| charset_or_character_set charset_value collate_opt + { + $$ = &SetExpr{Name: NewColIdent(string($1)), Expr: $2} + } + +charset_or_character_set: + CHARSET +| CHARACTER SET + { + $$ = []byte("charset") + } +| NAMES + +charset_value: + sql_id + { + $$ = NewStrVal([]byte($1.String())) + } +| STRING + { + $$ = NewStrVal($1) + } +| DEFAULT + { + $$ = &Default{} + } + +for_from: + FOR +| FROM + +exists_opt: + { $$ = 0 } +| IF EXISTS + { $$ = 1 } + +not_exists_opt: + { $$ = struct{}{} } +| IF NOT EXISTS + { $$ = struct{}{} } + +ignore_opt: + { $$ = "" } +| IGNORE + { $$ = IgnoreStr } + +non_add_drop_or_rename_operation: + ALTER + { $$ = struct{}{} } +| AUTO_INCREMENT + { $$ = struct{}{} } +| CHARACTER + { $$ = struct{}{} } +| COMMENT_KEYWORD + { $$ = struct{}{} } +| DEFAULT + { $$ = struct{}{} } +| ORDER + { $$ = struct{}{} } +| CONVERT + { $$ = struct{}{} } +| PARTITION + { $$ = struct{}{} } +| UNUSED + { $$ = struct{}{} } +| ID + { $$ = struct{}{} } + +to_opt: + { $$ = struct{}{} } +| TO + { $$ = struct{}{} } +| AS + { $$ = struct{}{} } + +index_opt: + INDEX + { $$ = struct{}{} } +| KEY + { $$ = struct{}{} } + +constraint_opt: + { $$ = struct{}{} } +| UNIQUE + { $$ = struct{}{} } +| sql_id + { $$ = struct{}{} } + +using_opt: + { $$ = ColIdent{} } +| USING sql_id + { $$ = $2 } + +sql_id: + ID + { + $$ = NewColIdent(string($1)) + } +| non_reserved_keyword + { + $$ = NewColIdent(string($1)) + } + +reserved_sql_id: + sql_id +| reserved_keyword + { + $$ = NewColIdent(string($1)) + } + +table_id: + ID + { + $$ = NewTableIdent(string($1)) + } +| non_reserved_keyword + { + $$ = NewTableIdent(string($1)) + } + +reserved_table_id: + table_id +| reserved_keyword + { + $$ = NewTableIdent(string($1)) + } + +/* + These are not all necessarily reserved in MySQL, but some are. + + These are more importantly reserved because they may conflict with our grammar. + If you want to move one that is not reserved in MySQL (i.e. ESCAPE) to the + non_reserved_keywords, you'll need to deal with any conflicts. + + Sorted alphabetically +*/ +reserved_keyword: + ADD +| AND +| AS +| ASC +| AUTO_INCREMENT +| BETWEEN +| BINARY +| BY +| CASE +| COLLATE +| CONVERT +| CREATE +| CROSS +| CURRENT_DATE +| CURRENT_TIME +| CURRENT_TIMESTAMP +| SUBSTR +| SUBSTRING +| DATABASE +| DATABASES +| DEFAULT +| DELETE +| DESC +| DESCRIBE +| DISTINCT +| DIV +| DROP +| ELSE +| END +| ESCAPE +| EXISTS +| EXPLAIN +| FALSE +| FOR +| FORCE +| FROM +| GROUP +| HAVING +| IF +| IGNORE +| IN +| INDEX +| INNER +| INSERT +| INTERVAL +| INTO +| IS +| JOIN +| KEY +| LEFT +| LIKE +| LIMIT +| LOCALTIME +| LOCALTIMESTAMP +| LOCK +| MATCH +| MAXVALUE +| MOD +| NATURAL +| NEXT // next should be doable as non-reserved, but is not due to the special `select next num_val` query that vitess supports +| NOT +| NULL +| ON +| OR +| ORDER +| OUTER +| REGEXP +| RENAME +| REPLACE +| RIGHT +| SCHEMA +| SELECT +| SEPARATOR +| SET +| SHOW +| STRAIGHT_JOIN +| TABLE +| TABLES +| THEN +| TO +| TRUE +| UNION +| UNIQUE +| UPDATE +| USE +| USING +| UTC_DATE +| UTC_TIME +| UTC_TIMESTAMP +| VALUES +| WHEN +| WHERE + +/* + These are non-reserved Vitess, because they don't cause conflicts in the grammar. + Some of them may be reserved in MySQL. The good news is we backtick quote them + when we rewrite the query, so no issue should arise. + + Sorted alphabetically +*/ +non_reserved_keyword: + AGAINST +| BEGIN +| BIGINT +| BIT +| BLOB +| BOOL +| CHAR +| CHARACTER +| CHARSET +| COMMENT_KEYWORD +| COMMIT +| COMMITTED +| DATE +| DATETIME +| DECIMAL +| DOUBLE +| DUPLICATE +| ENUM +| EXPANSION +| FLOAT_TYPE +| FOREIGN +| FULLTEXT +| GEOMETRY +| GEOMETRYCOLLECTION +| GLOBAL +| INT +| INTEGER +| ISOLATION +| JSON +| KEY_BLOCK_SIZE +| KEYS +| LANGUAGE +| LAST_INSERT_ID +| LESS +| LEVEL +| LINESTRING +| LONGBLOB +| LONGTEXT +| MEDIUMBLOB +| MEDIUMINT +| MEDIUMTEXT +| MODE +| MULTILINESTRING +| MULTIPOINT +| MULTIPOLYGON +| NAMES +| NCHAR +| NUMERIC +| OFFSET +| ONLY +| OPTIMIZE +| PARTITION +| POINT +| POLYGON +| PRIMARY +| PROCEDURE +| QUERY +| READ +| REAL +| REORGANIZE +| REPAIR +| REPEATABLE +| ROLLBACK +| SESSION +| SERIALIZABLE +| SHARE +| SIGNED +| SMALLINT +| SPATIAL +| START +| STATUS +| TEXT +| THAN +| TIME +| TIMESTAMP +| TINYBLOB +| TINYINT +| TINYTEXT +| TRANSACTION +| TRIGGER +| TRUNCATE +| UNCOMMITTED +| UNSIGNED +| UNUSED +| VARBINARY +| VARCHAR +| VARIABLES +| VIEW +| VINDEX +| VINDEXES +| VITESS_KEYSPACES +| VITESS_SHARDS +| VITESS_TABLETS +| VSCHEMA_TABLES +| WITH +| WRITE +| YEAR +| ZEROFILL + +openb: + '(' + { + if incNesting(yylex) { + yylex.Error("max nesting level reached") + return 1 + } + } + +closeb: + ')' + { + decNesting(yylex) + } + +force_eof: +{ + forceEOF(yylex) +} + +ddl_force_eof: + { + forceEOF(yylex) + } +| openb + { + forceEOF(yylex) + } +| reserved_sql_id + { + forceEOF(yylex) + } diff --git a/vendor/github.com/xwb1989/sqlparser/token.go b/vendor/github.com/xwb1989/sqlparser/token.go new file mode 100644 index 000000000..23efa77cf --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/token.go @@ -0,0 +1,950 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "errors" + "fmt" + "io" + + "github.com/xwb1989/sqlparser/dependency/bytes2" + "github.com/xwb1989/sqlparser/dependency/sqltypes" +) + +const ( + defaultBufSize = 4096 + eofChar = 0x100 +) + +// Tokenizer is the struct used to generate SQL +// tokens for the parser. +type Tokenizer struct { + InStream io.Reader + AllowComments bool + ForceEOF bool + lastChar uint16 + Position int + lastToken []byte + LastError error + posVarIndex int + ParseTree Statement + partialDDL *DDL + nesting int + multi bool + specialComment *Tokenizer + + buf []byte + bufPos int + bufSize int +} + +// NewStringTokenizer creates a new Tokenizer for the +// sql string. +func NewStringTokenizer(sql string) *Tokenizer { + buf := []byte(sql) + return &Tokenizer{ + buf: buf, + bufSize: len(buf), + } +} + +// NewTokenizer creates a new Tokenizer reading a sql +// string from the io.Reader. +func NewTokenizer(r io.Reader) *Tokenizer { + return &Tokenizer{ + InStream: r, + buf: make([]byte, defaultBufSize), + } +} + +// keywords is a map of mysql keywords that fall into two categories: +// 1) keywords considered reserved by MySQL +// 2) keywords for us to handle specially in sql.y +// +// Those marked as UNUSED are likely reserved keywords. We add them here so that +// when rewriting queries we can properly backtick quote them so they don't cause issues +// +// NOTE: If you add new keywords, add them also to the reserved_keywords or +// non_reserved_keywords grammar in sql.y -- this will allow the keyword to be used +// in identifiers. See the docs for each grammar to determine which one to put it into. +var keywords = map[string]int{ + "accessible": UNUSED, + "add": ADD, + "against": AGAINST, + "all": ALL, + "alter": ALTER, + "analyze": ANALYZE, + "and": AND, + "as": AS, + "asc": ASC, + "asensitive": UNUSED, + "auto_increment": AUTO_INCREMENT, + "before": UNUSED, + "begin": BEGIN, + "between": BETWEEN, + "bigint": BIGINT, + "binary": BINARY, + "_binary": UNDERSCORE_BINARY, + "bit": BIT, + "blob": BLOB, + "bool": BOOL, + "boolean": BOOLEAN, + "both": UNUSED, + "by": BY, + "call": UNUSED, + "cascade": UNUSED, + "case": CASE, + "cast": CAST, + "change": UNUSED, + "char": CHAR, + "character": CHARACTER, + "charset": CHARSET, + "check": UNUSED, + "collate": COLLATE, + "column": COLUMN, + "comment": COMMENT_KEYWORD, + "committed": COMMITTED, + "commit": COMMIT, + "condition": UNUSED, + "constraint": CONSTRAINT, + "continue": UNUSED, + "convert": CONVERT, + "substr": SUBSTR, + "substring": SUBSTRING, + "create": CREATE, + "cross": CROSS, + "current_date": CURRENT_DATE, + "current_time": CURRENT_TIME, + "current_timestamp": CURRENT_TIMESTAMP, + "current_user": UNUSED, + "cursor": UNUSED, + "database": DATABASE, + "databases": DATABASES, + "day_hour": UNUSED, + "day_microsecond": UNUSED, + "day_minute": UNUSED, + "day_second": UNUSED, + "date": DATE, + "datetime": DATETIME, + "dec": UNUSED, + "decimal": DECIMAL, + "declare": UNUSED, + "default": DEFAULT, + "delayed": UNUSED, + "delete": DELETE, + "desc": DESC, + "describe": DESCRIBE, + "deterministic": UNUSED, + "distinct": DISTINCT, + "distinctrow": UNUSED, + "div": DIV, + "double": DOUBLE, + "drop": DROP, + "duplicate": DUPLICATE, + "each": UNUSED, + "else": ELSE, + "elseif": UNUSED, + "enclosed": UNUSED, + "end": END, + "enum": ENUM, + "escape": ESCAPE, + "escaped": UNUSED, + "exists": EXISTS, + "exit": UNUSED, + "explain": EXPLAIN, + "expansion": EXPANSION, + "extended": EXTENDED, + "false": FALSE, + "fetch": UNUSED, + "float": FLOAT_TYPE, + "float4": UNUSED, + "float8": UNUSED, + "for": FOR, + "force": FORCE, + "foreign": FOREIGN, + "from": FROM, + "full": FULL, + "fulltext": FULLTEXT, + "generated": UNUSED, + "geometry": GEOMETRY, + "geometrycollection": GEOMETRYCOLLECTION, + "get": UNUSED, + "global": GLOBAL, + "grant": UNUSED, + "group": GROUP, + "group_concat": GROUP_CONCAT, + "having": HAVING, + "high_priority": UNUSED, + "hour_microsecond": UNUSED, + "hour_minute": UNUSED, + "hour_second": UNUSED, + "if": IF, + "ignore": IGNORE, + "in": IN, + "index": INDEX, + "infile": UNUSED, + "inout": UNUSED, + "inner": INNER, + "insensitive": UNUSED, + "insert": INSERT, + "int": INT, + "int1": UNUSED, + "int2": UNUSED, + "int3": UNUSED, + "int4": UNUSED, + "int8": UNUSED, + "integer": INTEGER, + "interval": INTERVAL, + "into": INTO, + "io_after_gtids": UNUSED, + "is": IS, + "isolation": ISOLATION, + "iterate": UNUSED, + "join": JOIN, + "json": JSON, + "key": KEY, + "keys": KEYS, + "key_block_size": KEY_BLOCK_SIZE, + "kill": UNUSED, + "language": LANGUAGE, + "last_insert_id": LAST_INSERT_ID, + "leading": UNUSED, + "leave": UNUSED, + "left": LEFT, + "less": LESS, + "level": LEVEL, + "like": LIKE, + "limit": LIMIT, + "linear": UNUSED, + "lines": UNUSED, + "linestring": LINESTRING, + "load": UNUSED, + "localtime": LOCALTIME, + "localtimestamp": LOCALTIMESTAMP, + "lock": LOCK, + "long": UNUSED, + "longblob": LONGBLOB, + "longtext": LONGTEXT, + "loop": UNUSED, + "low_priority": UNUSED, + "master_bind": UNUSED, + "match": MATCH, + "maxvalue": MAXVALUE, + "mediumblob": MEDIUMBLOB, + "mediumint": MEDIUMINT, + "mediumtext": MEDIUMTEXT, + "middleint": UNUSED, + "minute_microsecond": UNUSED, + "minute_second": UNUSED, + "mod": MOD, + "mode": MODE, + "modifies": UNUSED, + "multilinestring": MULTILINESTRING, + "multipoint": MULTIPOINT, + "multipolygon": MULTIPOLYGON, + "names": NAMES, + "natural": NATURAL, + "nchar": NCHAR, + "next": NEXT, + "not": NOT, + "no_write_to_binlog": UNUSED, + "null": NULL, + "numeric": NUMERIC, + "offset": OFFSET, + "on": ON, + "only": ONLY, + "optimize": OPTIMIZE, + "optimizer_costs": UNUSED, + "option": UNUSED, + "optionally": UNUSED, + "or": OR, + "order": ORDER, + "out": UNUSED, + "outer": OUTER, + "outfile": UNUSED, + "partition": PARTITION, + "point": POINT, + "polygon": POLYGON, + "precision": UNUSED, + "primary": PRIMARY, + "processlist": PROCESSLIST, + "procedure": PROCEDURE, + "query": QUERY, + "range": UNUSED, + "read": READ, + "reads": UNUSED, + "read_write": UNUSED, + "real": REAL, + "references": UNUSED, + "regexp": REGEXP, + "release": UNUSED, + "rename": RENAME, + "reorganize": REORGANIZE, + "repair": REPAIR, + "repeat": UNUSED, + "repeatable": REPEATABLE, + "replace": REPLACE, + "require": UNUSED, + "resignal": UNUSED, + "restrict": UNUSED, + "return": UNUSED, + "revoke": UNUSED, + "right": RIGHT, + "rlike": REGEXP, + "rollback": ROLLBACK, + "schema": SCHEMA, + "schemas": UNUSED, + "second_microsecond": UNUSED, + "select": SELECT, + "sensitive": UNUSED, + "separator": SEPARATOR, + "serializable": SERIALIZABLE, + "session": SESSION, + "set": SET, + "share": SHARE, + "show": SHOW, + "signal": UNUSED, + "signed": SIGNED, + "smallint": SMALLINT, + "spatial": SPATIAL, + "specific": UNUSED, + "sql": UNUSED, + "sqlexception": UNUSED, + "sqlstate": UNUSED, + "sqlwarning": UNUSED, + "sql_big_result": UNUSED, + "sql_cache": SQL_CACHE, + "sql_calc_found_rows": UNUSED, + "sql_no_cache": SQL_NO_CACHE, + "sql_small_result": UNUSED, + "ssl": UNUSED, + "start": START, + "starting": UNUSED, + "status": STATUS, + "stored": UNUSED, + "straight_join": STRAIGHT_JOIN, + "stream": STREAM, + "table": TABLE, + "tables": TABLES, + "terminated": UNUSED, + "text": TEXT, + "than": THAN, + "then": THEN, + "time": TIME, + "timestamp": TIMESTAMP, + "tinyblob": TINYBLOB, + "tinyint": TINYINT, + "tinytext": TINYTEXT, + "to": TO, + "trailing": UNUSED, + "transaction": TRANSACTION, + "trigger": TRIGGER, + "true": TRUE, + "truncate": TRUNCATE, + "uncommitted": UNCOMMITTED, + "undo": UNUSED, + "union": UNION, + "unique": UNIQUE, + "unlock": UNUSED, + "unsigned": UNSIGNED, + "update": UPDATE, + "usage": UNUSED, + "use": USE, + "using": USING, + "utc_date": UTC_DATE, + "utc_time": UTC_TIME, + "utc_timestamp": UTC_TIMESTAMP, + "values": VALUES, + "variables": VARIABLES, + "varbinary": VARBINARY, + "varchar": VARCHAR, + "varcharacter": UNUSED, + "varying": UNUSED, + "virtual": UNUSED, + "vindex": VINDEX, + "vindexes": VINDEXES, + "view": VIEW, + "vitess_keyspaces": VITESS_KEYSPACES, + "vitess_shards": VITESS_SHARDS, + "vitess_tablets": VITESS_TABLETS, + "vschema_tables": VSCHEMA_TABLES, + "when": WHEN, + "where": WHERE, + "while": UNUSED, + "with": WITH, + "write": WRITE, + "xor": UNUSED, + "year": YEAR, + "year_month": UNUSED, + "zerofill": ZEROFILL, +} + +// keywordStrings contains the reverse mapping of token to keyword strings +var keywordStrings = map[int]string{} + +func init() { + for str, id := range keywords { + if id == UNUSED { + continue + } + keywordStrings[id] = str + } +} + +// KeywordString returns the string corresponding to the given keyword +func KeywordString(id int) string { + str, ok := keywordStrings[id] + if !ok { + return "" + } + return str +} + +// Lex returns the next token form the Tokenizer. +// This function is used by go yacc. +func (tkn *Tokenizer) Lex(lval *yySymType) int { + typ, val := tkn.Scan() + for typ == COMMENT { + if tkn.AllowComments { + break + } + typ, val = tkn.Scan() + } + lval.bytes = val + tkn.lastToken = val + return typ +} + +// Error is called by go yacc if there's a parsing error. +func (tkn *Tokenizer) Error(err string) { + buf := &bytes2.Buffer{} + if tkn.lastToken != nil { + fmt.Fprintf(buf, "%s at position %v near '%s'", err, tkn.Position, tkn.lastToken) + } else { + fmt.Fprintf(buf, "%s at position %v", err, tkn.Position) + } + tkn.LastError = errors.New(buf.String()) + + // Try and re-sync to the next statement + if tkn.lastChar != ';' { + tkn.skipStatement() + } +} + +// Scan scans the tokenizer for the next token and returns +// the token type and an optional value. +func (tkn *Tokenizer) Scan() (int, []byte) { + if tkn.specialComment != nil { + // Enter specialComment scan mode. + // for scanning such kind of comment: /*! MySQL-specific code */ + specialComment := tkn.specialComment + tok, val := specialComment.Scan() + if tok != 0 { + // return the specialComment scan result as the result + return tok, val + } + // leave specialComment scan mode after all stream consumed. + tkn.specialComment = nil + } + if tkn.lastChar == 0 { + tkn.next() + } + + if tkn.ForceEOF { + tkn.skipStatement() + return 0, nil + } + + tkn.skipBlank() + switch ch := tkn.lastChar; { + case isLetter(ch): + tkn.next() + if ch == 'X' || ch == 'x' { + if tkn.lastChar == '\'' { + tkn.next() + return tkn.scanHex() + } + } + if ch == 'B' || ch == 'b' { + if tkn.lastChar == '\'' { + tkn.next() + return tkn.scanBitLiteral() + } + } + isDbSystemVariable := false + if ch == '@' && tkn.lastChar == '@' { + isDbSystemVariable = true + } + return tkn.scanIdentifier(byte(ch), isDbSystemVariable) + case isDigit(ch): + return tkn.scanNumber(false) + case ch == ':': + return tkn.scanBindVar() + case ch == ';' && tkn.multi: + return 0, nil + default: + tkn.next() + switch ch { + case eofChar: + return 0, nil + case '=', ',', ';', '(', ')', '+', '*', '%', '^', '~': + return int(ch), nil + case '&': + if tkn.lastChar == '&' { + tkn.next() + return AND, nil + } + return int(ch), nil + case '|': + if tkn.lastChar == '|' { + tkn.next() + return OR, nil + } + return int(ch), nil + case '?': + tkn.posVarIndex++ + buf := new(bytes2.Buffer) + fmt.Fprintf(buf, ":v%d", tkn.posVarIndex) + return VALUE_ARG, buf.Bytes() + case '.': + if isDigit(tkn.lastChar) { + return tkn.scanNumber(true) + } + return int(ch), nil + case '/': + switch tkn.lastChar { + case '/': + tkn.next() + return tkn.scanCommentType1("//") + case '*': + tkn.next() + switch tkn.lastChar { + case '!': + return tkn.scanMySQLSpecificComment() + default: + return tkn.scanCommentType2() + } + default: + return int(ch), nil + } + case '#': + return tkn.scanCommentType1("#") + case '-': + switch tkn.lastChar { + case '-': + tkn.next() + return tkn.scanCommentType1("--") + case '>': + tkn.next() + if tkn.lastChar == '>' { + tkn.next() + return JSON_UNQUOTE_EXTRACT_OP, nil + } + return JSON_EXTRACT_OP, nil + } + return int(ch), nil + case '<': + switch tkn.lastChar { + case '>': + tkn.next() + return NE, nil + case '<': + tkn.next() + return SHIFT_LEFT, nil + case '=': + tkn.next() + switch tkn.lastChar { + case '>': + tkn.next() + return NULL_SAFE_EQUAL, nil + default: + return LE, nil + } + default: + return int(ch), nil + } + case '>': + switch tkn.lastChar { + case '=': + tkn.next() + return GE, nil + case '>': + tkn.next() + return SHIFT_RIGHT, nil + default: + return int(ch), nil + } + case '!': + if tkn.lastChar == '=' { + tkn.next() + return NE, nil + } + return int(ch), nil + case '\'', '"': + return tkn.scanString(ch, STRING) + case '`': + return tkn.scanLiteralIdentifier() + default: + return LEX_ERROR, []byte{byte(ch)} + } + } +} + +// skipStatement scans until the EOF, or end of statement is encountered. +func (tkn *Tokenizer) skipStatement() { + ch := tkn.lastChar + for ch != ';' && ch != eofChar { + tkn.next() + ch = tkn.lastChar + } +} + +func (tkn *Tokenizer) skipBlank() { + ch := tkn.lastChar + for ch == ' ' || ch == '\n' || ch == '\r' || ch == '\t' { + tkn.next() + ch = tkn.lastChar + } +} + +func (tkn *Tokenizer) scanIdentifier(firstByte byte, isDbSystemVariable bool) (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteByte(firstByte) + for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || (isDbSystemVariable && isCarat(tkn.lastChar)) { + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + lowered := bytes.ToLower(buffer.Bytes()) + loweredStr := string(lowered) + if keywordID, found := keywords[loweredStr]; found { + return keywordID, lowered + } + // dual must always be case-insensitive + if loweredStr == "dual" { + return ID, lowered + } + return ID, buffer.Bytes() +} + +func (tkn *Tokenizer) scanHex() (int, []byte) { + buffer := &bytes2.Buffer{} + tkn.scanMantissa(16, buffer) + if tkn.lastChar != '\'' { + return LEX_ERROR, buffer.Bytes() + } + tkn.next() + if buffer.Len()%2 != 0 { + return LEX_ERROR, buffer.Bytes() + } + return HEX, buffer.Bytes() +} + +func (tkn *Tokenizer) scanBitLiteral() (int, []byte) { + buffer := &bytes2.Buffer{} + tkn.scanMantissa(2, buffer) + if tkn.lastChar != '\'' { + return LEX_ERROR, buffer.Bytes() + } + tkn.next() + return BIT_LITERAL, buffer.Bytes() +} + +func (tkn *Tokenizer) scanLiteralIdentifier() (int, []byte) { + buffer := &bytes2.Buffer{} + backTickSeen := false + for { + if backTickSeen { + if tkn.lastChar != '`' { + break + } + backTickSeen = false + buffer.WriteByte('`') + tkn.next() + continue + } + // The previous char was not a backtick. + switch tkn.lastChar { + case '`': + backTickSeen = true + case eofChar: + // Premature EOF. + return LEX_ERROR, buffer.Bytes() + default: + buffer.WriteByte(byte(tkn.lastChar)) + } + tkn.next() + } + if buffer.Len() == 0 { + return LEX_ERROR, buffer.Bytes() + } + return ID, buffer.Bytes() +} + +func (tkn *Tokenizer) scanBindVar() (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteByte(byte(tkn.lastChar)) + token := VALUE_ARG + tkn.next() + if tkn.lastChar == ':' { + token = LIST_ARG + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + if !isLetter(tkn.lastChar) { + return LEX_ERROR, buffer.Bytes() + } + for isLetter(tkn.lastChar) || isDigit(tkn.lastChar) || tkn.lastChar == '.' { + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() + } + return token, buffer.Bytes() +} + +func (tkn *Tokenizer) scanMantissa(base int, buffer *bytes2.Buffer) { + for digitVal(tkn.lastChar) < base { + tkn.consumeNext(buffer) + } +} + +func (tkn *Tokenizer) scanNumber(seenDecimalPoint bool) (int, []byte) { + token := INTEGRAL + buffer := &bytes2.Buffer{} + if seenDecimalPoint { + token = FLOAT + buffer.WriteByte('.') + tkn.scanMantissa(10, buffer) + goto exponent + } + + // 0x construct. + if tkn.lastChar == '0' { + tkn.consumeNext(buffer) + if tkn.lastChar == 'x' || tkn.lastChar == 'X' { + token = HEXNUM + tkn.consumeNext(buffer) + tkn.scanMantissa(16, buffer) + goto exit + } + } + + tkn.scanMantissa(10, buffer) + + if tkn.lastChar == '.' { + token = FLOAT + tkn.consumeNext(buffer) + tkn.scanMantissa(10, buffer) + } + +exponent: + if tkn.lastChar == 'e' || tkn.lastChar == 'E' { + token = FLOAT + tkn.consumeNext(buffer) + if tkn.lastChar == '+' || tkn.lastChar == '-' { + tkn.consumeNext(buffer) + } + tkn.scanMantissa(10, buffer) + } + +exit: + // A letter cannot immediately follow a number. + if isLetter(tkn.lastChar) { + return LEX_ERROR, buffer.Bytes() + } + + return token, buffer.Bytes() +} + +func (tkn *Tokenizer) scanString(delim uint16, typ int) (int, []byte) { + var buffer bytes2.Buffer + for { + ch := tkn.lastChar + if ch == eofChar { + // Unterminated string. + return LEX_ERROR, buffer.Bytes() + } + + if ch != delim && ch != '\\' { + buffer.WriteByte(byte(ch)) + + // Scan ahead to the next interesting character. + start := tkn.bufPos + for ; tkn.bufPos < tkn.bufSize; tkn.bufPos++ { + ch = uint16(tkn.buf[tkn.bufPos]) + if ch == delim || ch == '\\' { + break + } + } + + buffer.Write(tkn.buf[start:tkn.bufPos]) + tkn.Position += (tkn.bufPos - start) + + if tkn.bufPos >= tkn.bufSize { + // Reached the end of the buffer without finding a delim or + // escape character. + tkn.next() + continue + } + + tkn.bufPos++ + tkn.Position++ + } + tkn.next() // Read one past the delim or escape character. + + if ch == '\\' { + if tkn.lastChar == eofChar { + // String terminates mid escape character. + return LEX_ERROR, buffer.Bytes() + } + if decodedChar := sqltypes.SQLDecodeMap[byte(tkn.lastChar)]; decodedChar == sqltypes.DontEscape { + ch = tkn.lastChar + } else { + ch = uint16(decodedChar) + } + + } else if ch == delim && tkn.lastChar != delim { + // Correctly terminated string, which is not a double delim. + break + } + + buffer.WriteByte(byte(ch)) + tkn.next() + } + + return typ, buffer.Bytes() +} + +func (tkn *Tokenizer) scanCommentType1(prefix string) (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteString(prefix) + for tkn.lastChar != eofChar { + if tkn.lastChar == '\n' { + tkn.consumeNext(buffer) + break + } + tkn.consumeNext(buffer) + } + return COMMENT, buffer.Bytes() +} + +func (tkn *Tokenizer) scanCommentType2() (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteString("/*") + for { + if tkn.lastChar == '*' { + tkn.consumeNext(buffer) + if tkn.lastChar == '/' { + tkn.consumeNext(buffer) + break + } + continue + } + if tkn.lastChar == eofChar { + return LEX_ERROR, buffer.Bytes() + } + tkn.consumeNext(buffer) + } + return COMMENT, buffer.Bytes() +} + +func (tkn *Tokenizer) scanMySQLSpecificComment() (int, []byte) { + buffer := &bytes2.Buffer{} + buffer.WriteString("/*!") + tkn.next() + for { + if tkn.lastChar == '*' { + tkn.consumeNext(buffer) + if tkn.lastChar == '/' { + tkn.consumeNext(buffer) + break + } + continue + } + if tkn.lastChar == eofChar { + return LEX_ERROR, buffer.Bytes() + } + tkn.consumeNext(buffer) + } + _, sql := ExtractMysqlComment(buffer.String()) + tkn.specialComment = NewStringTokenizer(sql) + return tkn.Scan() +} + +func (tkn *Tokenizer) consumeNext(buffer *bytes2.Buffer) { + if tkn.lastChar == eofChar { + // This should never happen. + panic("unexpected EOF") + } + buffer.WriteByte(byte(tkn.lastChar)) + tkn.next() +} + +func (tkn *Tokenizer) next() { + if tkn.bufPos >= tkn.bufSize && tkn.InStream != nil { + // Try and refill the buffer + var err error + tkn.bufPos = 0 + if tkn.bufSize, err = tkn.InStream.Read(tkn.buf); err != io.EOF && err != nil { + tkn.LastError = err + } + } + + if tkn.bufPos >= tkn.bufSize { + if tkn.lastChar != eofChar { + tkn.Position++ + tkn.lastChar = eofChar + } + } else { + tkn.Position++ + tkn.lastChar = uint16(tkn.buf[tkn.bufPos]) + tkn.bufPos++ + } +} + +// reset clears any internal state. +func (tkn *Tokenizer) reset() { + tkn.ParseTree = nil + tkn.partialDDL = nil + tkn.specialComment = nil + tkn.posVarIndex = 0 + tkn.nesting = 0 + tkn.ForceEOF = false +} + +func isLetter(ch uint16) bool { + return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch == '@' +} + +func isCarat(ch uint16) bool { + return ch == '.' || ch == '\'' || ch == '"' || ch == '`' +} + +func digitVal(ch uint16) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch) - '0' + case 'a' <= ch && ch <= 'f': + return int(ch) - 'a' + 10 + case 'A' <= ch && ch <= 'F': + return int(ch) - 'A' + 10 + } + return 16 // larger than any legal digit val +} + +func isDigit(ch uint16) bool { + return '0' <= ch && ch <= '9' +} diff --git a/vendor/github.com/xwb1989/sqlparser/tracked_buffer.go b/vendor/github.com/xwb1989/sqlparser/tracked_buffer.go new file mode 100644 index 000000000..ec421a5fb --- /dev/null +++ b/vendor/github.com/xwb1989/sqlparser/tracked_buffer.go @@ -0,0 +1,140 @@ +/* +Copyright 2017 Google Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sqlparser + +import ( + "bytes" + "fmt" +) + +// NodeFormatter defines the signature of a custom node formatter +// function that can be given to TrackedBuffer for code generation. +type NodeFormatter func(buf *TrackedBuffer, node SQLNode) + +// TrackedBuffer is used to rebuild a query from the ast. +// bindLocations keeps track of locations in the buffer that +// use bind variables for efficient future substitutions. +// nodeFormatter is the formatting function the buffer will +// use to format a node. By default(nil), it's FormatNode. +// But you can supply a different formatting function if you +// want to generate a query that's different from the default. +type TrackedBuffer struct { + *bytes.Buffer + bindLocations []bindLocation + nodeFormatter NodeFormatter +} + +// NewTrackedBuffer creates a new TrackedBuffer. +func NewTrackedBuffer(nodeFormatter NodeFormatter) *TrackedBuffer { + return &TrackedBuffer{ + Buffer: new(bytes.Buffer), + nodeFormatter: nodeFormatter, + } +} + +// WriteNode function, initiates the writing of a single SQLNode tree by passing +// through to Myprintf with a default format string +func (buf *TrackedBuffer) WriteNode(node SQLNode) *TrackedBuffer { + buf.Myprintf("%v", node) + return buf +} + +// Myprintf mimics fmt.Fprintf(buf, ...), but limited to Node(%v), +// Node.Value(%s) and string(%s). It also allows a %a for a value argument, in +// which case it adds tracking info for future substitutions. +// +// The name must be something other than the usual Printf() to avoid "go vet" +// warnings due to our custom format specifiers. +func (buf *TrackedBuffer) Myprintf(format string, values ...interface{}) { + end := len(format) + fieldnum := 0 + for i := 0; i < end; { + lasti := i + for i < end && format[i] != '%' { + i++ + } + if i > lasti { + buf.WriteString(format[lasti:i]) + } + if i >= end { + break + } + i++ // '%' + switch format[i] { + case 'c': + switch v := values[fieldnum].(type) { + case byte: + buf.WriteByte(v) + case rune: + buf.WriteRune(v) + default: + panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v)) + } + case 's': + switch v := values[fieldnum].(type) { + case []byte: + buf.Write(v) + case string: + buf.WriteString(v) + default: + panic(fmt.Sprintf("unexpected TrackedBuffer type %T", v)) + } + case 'v': + node := values[fieldnum].(SQLNode) + if buf.nodeFormatter == nil { + node.Format(buf) + } else { + buf.nodeFormatter(buf, node) + } + case 'a': + buf.WriteArg(values[fieldnum].(string)) + default: + panic("unexpected") + } + fieldnum++ + i++ + } +} + +// WriteArg writes a value argument into the buffer along with +// tracking information for future substitutions. arg must contain +// the ":" or "::" prefix. +func (buf *TrackedBuffer) WriteArg(arg string) { + buf.bindLocations = append(buf.bindLocations, bindLocation{ + offset: buf.Len(), + length: len(arg), + }) + buf.WriteString(arg) +} + +// ParsedQuery returns a ParsedQuery that contains bind +// locations for easy substitution. +func (buf *TrackedBuffer) ParsedQuery() *ParsedQuery { + return &ParsedQuery{Query: buf.String(), bindLocations: buf.bindLocations} +} + +// HasBindVars returns true if the parsed query uses bind vars. +func (buf *TrackedBuffer) HasBindVars() bool { + return len(buf.bindLocations) != 0 +} + +// BuildParsedQuery builds a ParsedQuery from the input. +func BuildParsedQuery(in string, vars ...interface{}) *ParsedQuery { + buf := NewTrackedBuffer(nil) + buf.Myprintf(in, vars...) + return buf.ParsedQuery() +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 5530affcd..fc5731f42 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -460,6 +460,12 @@ "revision": "7e6a47b300b10be9449610a6ff4fbae17d6e95b6", "revisionTime": "2018-01-16T16:19:11Z" }, + { + "checksumSHA1": "mA7isU/nIAT5ytwIzK65H0vlVqI=", + "path": "github.com/klauspost/compress/flate", + "revision": "5fb1f31b0a61e9858f12f39266e059848a5f1cea", + "revisionTime": "2018-04-02T19:26:10Z" + }, { "path": "github.com/klauspost/cpuid", "revision": "349c675778172472f5e8f3a3e0fe187e302e5a10", @@ -471,6 +477,12 @@ "revision": "cb6bfca970f6908083f26f39a79009d608efd5cd", "revisionTime": "2016-10-16T15:41:25Z" }, + { + "checksumSHA1": "o/9oGuVccfxPYuRdzvW0lv7Zbzg=", + "path": "github.com/klauspost/pgzip", + "revision": "90b2c57fba35a1dd05cb40f9200722763808d99b", + "revisionTime": "2018-06-06T15:09:39Z" + }, { "checksumSHA1": "ehsrWipiGIWqa4To8TmelIx06vI=", "path": "github.com/klauspost/reedsolomon", @@ -790,6 +802,42 @@ "revision": "173748da739a410c5b0b813b956f89ff94730b4c", "revisionTime": "2016-08-30T17:39:30Z" }, + { + "checksumSHA1": "MWqyOvDMkW+XYe2RJ5mplvut+aE=", + "path": "github.com/ugorji/go/codec", + "revision": "ded73eae5db7e7a0ef6f55aace87a2873c5d2b74", + "revisionTime": "2017-01-07T13:32:03Z" + }, + { + "checksumSHA1": "6ksZHYhLc3yOzTbcWKb3bDENhD4=", + "path": "github.com/xwb1989/sqlparser", + "revision": "120387863bf27d04bc07db8015110a6e96d0146c", + "revisionTime": "2018-06-06T15:21:19Z" + }, + { + "checksumSHA1": "L/Q8Ylbo+wnj5whDFfMxxwyxmdo=", + "path": "github.com/xwb1989/sqlparser/dependency/bytes2", + "revision": "120387863bf27d04bc07db8015110a6e96d0146c", + "revisionTime": "2018-06-06T15:21:19Z" + }, + { + "checksumSHA1": "f9K0yQdwD0Z2yc3bmDw2uqXt4hU=", + "path": "github.com/xwb1989/sqlparser/dependency/hack", + "revision": "120387863bf27d04bc07db8015110a6e96d0146c", + "revisionTime": "2018-06-06T15:21:19Z" + }, + { + "checksumSHA1": "xpu1JU/VZ7gGNbU5Ol9Nm1oS4tY=", + "path": "github.com/xwb1989/sqlparser/dependency/querypb", + "revision": "120387863bf27d04bc07db8015110a6e96d0146c", + "revisionTime": "2018-06-06T15:21:19Z" + }, + { + "checksumSHA1": "KbNIySCQgMG81TRMJp1IDRfSgv8=", + "path": "github.com/xwb1989/sqlparser/dependency/sqltypes", + "revision": "120387863bf27d04bc07db8015110a6e96d0146c", + "revisionTime": "2018-06-06T15:21:19Z" + }, { "checksumSHA1": "6NS7FWJl1FobB+Xfe4SzBGD+75g=", "path": "go.uber.org/atomic",