Improve conditional import expression recovery

This improves recovery when parsing conditional import expressions
by repositioning the synthetic closing ')' that was inserted by the
scanner in a less than optimal location.

This approach relies on the beforeSynthetic field being set correctly.
While the scanner still sets the 'next' and 'beforeSynthetic' fields
directly for efficiency, most other functions which update an existing
token stream have been revised to call 'setNext()' rather than setting
the 'next' field directly. This ensures that the 'beforeSynthetic' field
will be correctly updated.

Change-Id: Id631fd600c64d1feaf00593acb74a7070e354f07
Reviewed-on: https://dart-review.googlesource.com/52120
Reviewed-by: Brian Wilkerson <brianwilkerson@google.com>
Commit-Queue: Dan Rubel <danrubel@google.com>
This commit is contained in:
danrubel 2018-04-20 13:58:50 +00:00 committed by commit-bot@chromium.org
parent 1f7fa3e1ad
commit 10b441b16b
12 changed files with 111 additions and 74 deletions

View file

@ -22,7 +22,12 @@ import 'package:front_end/src/fasta/parser.dart'
import 'package:front_end/src/fasta/scanner.dart' hide StringToken;
import 'package:front_end/src/scanner/errors.dart' show translateErrorToken;
import 'package:front_end/src/scanner/token.dart'
show StringToken, SyntheticBeginToken, SyntheticStringToken, SyntheticToken;
show
BeginToken,
StringToken,
SyntheticBeginToken,
SyntheticStringToken,
SyntheticToken;
import 'package:front_end/src/fasta/problems.dart' show unhandled;
import 'package:front_end/src/fasta/messages.dart'
@ -2250,26 +2255,25 @@ class AstBuilder extends ScopeListener {
}
if (parameters == null && (getOrSet == null || optional('set', getOrSet))) {
Token previous = typeParameters?.endToken;
if (previous == null) {
Token token = typeParameters?.endToken;
if (token == null) {
if (name is AstNode) {
previous = name.endToken;
token = name.endToken;
} else if (name is _OperatorName) {
previous = name.name.endToken;
token = name.name.endToken;
} else {
throw new UnimplementedError();
}
}
Token leftParen =
new SyntheticBeginToken(TokenType.OPEN_PAREN, previous.end);
Token next = token.next;
int offset = next.charOffset;
BeginToken leftParen =
new SyntheticBeginToken(TokenType.OPEN_PAREN, offset);
token.setNext(leftParen);
Token rightParen =
new SyntheticToken(TokenType.CLOSE_PAREN, leftParen.offset);
rightParen.next = previous.next;
leftParen.next = rightParen;
previous.next = leftParen;
leftParen.previous = previous;
rightParen.previous = leftParen;
rightParen.next.previous = rightParen;
leftParen.setNext(new SyntheticToken(TokenType.CLOSE_PAREN, offset));
leftParen.endGroup = rightParen;
rightParen.setNext(next);
parameters = ast.formalParameterList(
leftParen, <FormalParameter>[], null, null, rightParen);
}

View file

@ -24,19 +24,16 @@ class ToAnalyzerTokenStreamConverter {
/// to be an analyzer token stream by removing error tokens and reporting
/// those errors to the associated error listener.
Token convertTokens(Token firstToken) {
Token previous = new Token.eof(-1);
Token token = firstToken;
token.previous = previous;
previous.next = token;
while (!token.isEof) {
if (token.type.kind == BAD_INPUT_TOKEN) {
translateErrorToken(token, reportError);
previous.next = token.next;
token.next.previous = previous;
Token token = new Token.eof(-1)..setNext(firstToken);
Token next = firstToken;
while (!next.isEof) {
if (next.type.kind == BAD_INPUT_TOKEN) {
translateErrorToken(next, reportError);
token.setNext(next.next);
} else {
previous = token;
token = next;
}
token = token.next;
next = token.next;
}
return firstToken;
}

View file

@ -50,7 +50,7 @@ class ImportDirectivesTest extends PartialCodeTest {
ParserErrorCode.EXPECTED_TOKEN
],
"import 'a.dart' if (_s_) '';",
failing: allExceptEof),
failing: ['functionNonVoid', 'getter', 'setter']),
new TestDescriptor(
'ifId',
"import 'a.dart' if (b",
@ -59,8 +59,7 @@ class ImportDirectivesTest extends PartialCodeTest {
ParserErrorCode.EXPECTED_TOKEN,
ParserErrorCode.EXPECTED_STRING_LITERAL
],
"import 'a.dart' if (b) '';",
failing: allExceptEof),
"import 'a.dart' if (b) '';"),
new TestDescriptor(
'ifEquals',
"import 'a.dart' if (b ==",
@ -70,8 +69,7 @@ class ImportDirectivesTest extends PartialCodeTest {
ScannerErrorCode.EXPECTED_TOKEN,
ParserErrorCode.EXPECTED_STRING_LITERAL
],
"import 'a.dart' if (b == '') '';",
failing: allExceptEof),
"import 'a.dart' if (b == '') '';"),
new TestDescriptor(
'ifCondition',
"import 'a.dart' if (b)",

View file

@ -26,6 +26,7 @@ abstract class AbstractRecoveryTest extends FastaParserTestCase {
try {
validUnit =
parseCompilationUnit(validCode, codes: expectedErrorsInValidCode);
validateTokenStream(validUnit.beginToken);
} catch (e) {
// print('');
// print(' Errors in valid code.');
@ -55,8 +56,14 @@ abstract class AbstractRecoveryTest extends FastaParserTestCase {
void validateTokenStream(Token token) {
while (!token.isEof) {
expect(token.end, lessThanOrEqualTo(token.next.offset));
token = token.next;
Token next = token.next;
expect(token.end, lessThanOrEqualTo(next.offset));
if (next.isSynthetic) {
if (const [')', ']', '}'].contains(next.lexeme)) {
expect(next.beforeSynthetic, token);
}
}
token = next;
}
}
}

View file

@ -852,7 +852,7 @@ class ElementListener extends Listener {
Token synthesizeIdentifier(Token token) {
Token synthesizedToken =
new StringToken.fromString(TokenType.IDENTIFIER, '?', token.charOffset);
synthesizedToken.next = token.next;
synthesizedToken.setNext(token.next);
return synthesizedToken;
}

View file

@ -63,12 +63,21 @@ class DottedNameIdentifierContext extends IdentifierContext {
Token ensureIdentifier(Token token, Parser parser) {
Token identifier = token.next;
assert(identifier.kind != IDENTIFIER_TOKEN);
const followingValues = const ['.', '==', ')'];
if (identifier.isIdentifier) {
return identifier;
// DottedNameIdentifierContext are only used in conditional import
// expressions. Although some top level keywords such as `import` can be
// used as identifiers, they are more likely the start of the next
// directive or declaration.
if (!identifier.isTopLevelKeyword ||
isOneOfOrEof(identifier.next, followingValues)) {
return identifier;
}
}
if (looksLikeStartOfNextDeclaration(identifier) ||
isOneOfOrEof(identifier, const ['.', '==', ')'])) {
isOneOfOrEof(identifier, followingValues)) {
identifier = parser.insertSyntheticIdentifier(token, this,
message: fasta.templateExpectedIdentifier.withArguments(identifier));
} else {

View file

@ -779,17 +779,14 @@ class Parser {
next = token.next;
}
if (next != leftParen.endGroup) {
reportRecoverableErrorWithToken(next, fasta.templateUnexpectedToken);
Token endGroup = leftParen.endGroup;
if (endGroup.isSynthetic) {
// The scanner did not place the synthetic ')' correctly, so move it.
// TODO(danrubel): Its costly to find the token before the endGroup.
// Consider beforeSynthetic field that points to the previous token
// only for synthetic tokens such as ')', '}', ']' so that the parser
// can easily move these to the correct location.
next = rewriter.moveSynthetic(token, endGroup);
} else {
reportRecoverableErrorWithToken(next, fasta.templateUnexpectedToken);
next = endGroup;
}
next = endGroup;
}
token = next;
assert(optional(')', token));
@ -3165,12 +3162,12 @@ class Parser {
assert(value != '>');
Token replacement = new Token(TokenType.GT, next.charOffset);
if (identical(value, '>>')) {
replacement.next = new Token(TokenType.GT, next.charOffset + 1);
replacement.setNext(new Token(TokenType.GT, next.charOffset + 1));
} else if (identical(value, '>=')) {
replacement.next = new Token(TokenType.EQ, next.charOffset + 1);
replacement.setNext(new Token(TokenType.EQ, next.charOffset + 1));
} else if (identical(value, '>>=')) {
replacement.next = new Token(TokenType.GT, next.charOffset + 1);
replacement.next.next = new Token(TokenType.EQ, next.charOffset + 2);
replacement.setNext(new Token(TokenType.GT, next.charOffset + 1));
replacement.next.setNext(new Token(TokenType.EQ, next.charOffset + 2));
} else {
// Recovery
rewriteAndRecover(token, fasta.templateExpectedToken.withArguments('>'),
@ -6513,7 +6510,7 @@ class Parser {
/// Create a short token chain from the [beginToken] and [endToken] and return
/// the [beginToken].
Token link(BeginToken beginToken, Token endToken) {
beginToken.next = endToken;
beginToken.setNext(endToken);
beginToken.endGroup = endToken;
return beginToken;
}

View file

@ -34,30 +34,38 @@ class TokenStreamRewriter {
/// after the [previousToken]. Return the [previousToken].
Token insertTokenAfter(Token previousToken, Token insertedToken) {
Token afterToken = previousToken.next;
previousToken.next = insertedToken;
insertedToken.previous = previousToken;
previousToken.setNext(insertedToken);
Token lastReplacement = _lastTokenInChain(insertedToken);
lastReplacement.next = afterToken;
afterToken.previous = lastReplacement;
lastReplacement.setNext(afterToken);
return previousToken;
}
/// Move [endGroup] (a synthetic `)`, `]`, `}`, or `>` token) after [token]
/// in the token stream and return [endGroup].
Token moveSynthetic(Token token, Token endGroup) {
assert(endGroup.beforeSynthetic != null);
Token next = token.next;
endGroup.beforeSynthetic.setNext(endGroup.next);
token.setNext(endGroup);
endGroup.setNext(next);
endGroup.offset = next.offset;
return endGroup;
}
/// Replace the single token immediately following the [previousToken] with
/// the chain of tokens starting at the [replacementToken]. Return the
/// [replacementToken].
Token replaceTokenFollowing(Token previousToken, Token replacementToken) {
Token replacedToken = previousToken.next;
previousToken.next = replacementToken;
replacementToken.previous = previousToken;
previousToken.setNext(replacementToken);
(replacementToken as SimpleToken).precedingComments =
replacedToken.precedingComments;
Token lastReplacement = _lastTokenInChain(replacementToken);
lastReplacement.next = replacedToken.next;
replacedToken.next.previous = lastReplacement;
_lastTokenInChain(replacementToken).setNext(replacedToken.next);
return replacementToken;
}

View file

@ -124,7 +124,7 @@ Token defaultRecoveryStrategy(
}
String value = new String.fromCharCodes(codeUnits);
return synthesizeToken(charOffset, value, TokenType.IDENTIFIER)
..next = next;
..setNext(next);
}
recoverExponent() {
@ -137,7 +137,7 @@ Token defaultRecoveryStrategy(
recoverHexDigit() {
return synthesizeToken(errorTail.charOffset, "0", TokenType.INT)
..next = errorTail.next;
..setNext(errorTail.next);
}
recoverStringInterpolation() {
@ -164,8 +164,7 @@ Token defaultRecoveryStrategy(
if (errorTail == null) {
error = next;
} else {
errorTail.next = next;
next.previous = errorTail;
errorTail.setNext(next);
}
errorTail = next;
next = next.next;
@ -204,23 +203,21 @@ Token defaultRecoveryStrategy(
if (goodTail == null) {
good = current;
} else {
goodTail.next = current;
current.previous = goodTail;
goodTail.setNext(current);
}
beforeGoodTail = goodTail;
goodTail = current;
}
error.previous = new Token.eof(-1)..next = error;
new Token.eof(-1).setNext(error);
Token tail;
if (good != null) {
errorTail.next = good;
good.previous = errorTail;
errorTail.setNext(good);
tail = goodTail;
} else {
tail = errorTail;
}
if (!tail.isEof) tail.next = new Token.eof(tail.end)..previous = tail;
if (!tail.isEof) tail.setNext(new Token.eof(tail.end));
return error;
}

View file

@ -61,12 +61,15 @@ UnlinkedUnitBuilder computeUnlinkedUnit(List<int> salt, List<int> content) {
/// Exclude all `native 'xyz';` token sequences.
void _excludeNativeClauses(Token token) {
for (; token.kind != EOF_TOKEN; token = token.next) {
if (optional('native', token) &&
token.next.kind == STRING_TOKEN &&
optional(';', token.next.next)) {
token.previous.next = token.next.next;
while (token.kind != EOF_TOKEN) {
Token next = token.next;
if (optional('native', next) &&
next.next.kind == STRING_TOKEN &&
optional(';', next.next.next)) {
next = next.next.next;
token.setNext(next);
}
token = next;
}
}

View file

@ -3,6 +3,8 @@
// BSD-style license that can be found in the LICENSE file.
import 'package:front_end/src/fasta/parser/token_stream_rewriter.dart';
import 'package:front_end/src/fasta/scanner.dart'
show ScannerResult, scanString;
import 'package:front_end/src/fasta/scanner/token.dart';
import 'package:front_end/src/scanner/token.dart' show Token;
import 'package:test/test.dart';
@ -77,6 +79,23 @@ abstract class TokenStreamRewriterTest {
expect(b.next, same(c));
}
void test_moveSynthetic() {
ScannerResult scanResult = scanString('Foo(bar; baz=0;');
expect(scanResult.hasErrors, isTrue);
Token open = scanResult.tokens.next.next;
expect(open.lexeme, '(');
Token close = open.endGroup;
expect(close.isSynthetic, isTrue);
expect(close.next.isEof, isTrue);
var rewriter = new TokenStreamRewriter();
Token result = rewriter.moveSynthetic(open.next, close);
expect(result, close);
expect(open.endGroup, close);
expect(open.next.next, close);
expect(close.next.isEof, isFalse);
}
void test_replaceTokenFollowing_multiple() {
var a = _makeToken(0, 'a');
var b = _makeToken(1, 'b');

View file

@ -229,9 +229,7 @@ class ScannerTest_Replacement extends ScannerTestBase {
token = token.next;
}
if (!token.previous.isEof) {
var head = new analyzer.Token.eof(-1);
token.previous = head;
head.next = token;
new analyzer.Token.eof(-1).setNext(token);
}
return token;
}