mirror of
https://github.com/dart-lang/sdk
synced 2024-09-15 22:31:50 +00:00
Make service protocol respect isolate lifecycle
Status: - Currently the getIsolate API tries to guess the status of the isolate when it is in between the IsolateRunnable and PauseStart, by reporting it as already in the PauseStart status. This can potentially make flaky all the tests that uses PauseStart to synchronize with the actual status of the Isolate. - In the previous situation the timestamp of the event is guessed too, potentially reporting a wrong value. - Even with the previous fix the is still a race condition that can lead to a getIsolate responde with a PauseStart status to be sent before the actual PauseStart event. Changes: - Fallback to None pause event if the isolate is in between IsolateRunnable and PauseStart, avoiding double posting. - Send the PauseStart event before the actual status change, partially avoiding the race conditions. - Set the pause timestamp before the actual status change, fully avoiding the race condition. Closes https://github.com/dart-lang/sdk/issues/28624 R=asiva@google.com, rmacnak@google.com Review-Url: https://codereview.chromium.org/3006883002 .
This commit is contained in:
parent
3124001cc7
commit
5534568ff1
|
@ -38,81 +38,86 @@ var tests = [
|
|||
var completer = new Completer();
|
||||
var sub;
|
||||
final Isolate firstIsolate = isolate;
|
||||
print("First isolate is: ${firstIsolate.id}");
|
||||
Isolate secondIsolate;
|
||||
sub = await isolate.vm.listenEventStream(VM.kDebugStream, (ServiceEvent c) {
|
||||
print("Event ${c.kind} on ${c.isolate.id}");
|
||||
switch (step) {
|
||||
case 0:
|
||||
expect(c.kind, equals("Resume"),
|
||||
expect(c.kind, equals(ServiceEvent.kResume),
|
||||
reason: "First isolate should resume");
|
||||
expect(c.isolate.id, equals(firstIsolate.id),
|
||||
reason: "First isolate should resume");
|
||||
break;
|
||||
case 1:
|
||||
expect(c.kind, equals("PauseStart"),
|
||||
expect(c.kind, equals(ServiceEvent.kPauseStart),
|
||||
reason: "Second isolate should pause on start");
|
||||
expect(c.isolate.id, equals(isolate.vm.isolates[1].id),
|
||||
reason: "Second isolate should pause on start");
|
||||
secondIsolate = c.isolate;
|
||||
print("Second isolate is: ${secondIsolate.id}");
|
||||
print("Resuming second isolate");
|
||||
secondIsolate.resume();
|
||||
break;
|
||||
case 2:
|
||||
expect(c.kind, equals("Resume"),
|
||||
expect(c.kind, equals(ServiceEvent.kResume),
|
||||
reason: "Second isolate should resume");
|
||||
expect(c.isolate.id, equals(secondIsolate.id),
|
||||
reason: "Second isolate should resume");
|
||||
break;
|
||||
case 3:
|
||||
expect(c.kind, equals("PauseBreakpoint"),
|
||||
expect(c.kind, equals(ServiceEvent.kPauseBreakpoint),
|
||||
reason: "First isolate should stop at debugger()");
|
||||
expect(c.isolate.id, equals(firstIsolate.id),
|
||||
reason: "First isolate should stop at debugger()");
|
||||
print("Resuming first isolate");
|
||||
firstIsolate.resume();
|
||||
break;
|
||||
case 4:
|
||||
expect(c.kind, equals("Resume"),
|
||||
expect(c.kind, equals(ServiceEvent.kResume),
|
||||
reason: "First isolate should resume (1)");
|
||||
expect(c.isolate.id, equals(firstIsolate.id),
|
||||
reason: "First isolate should resume (1)");
|
||||
break;
|
||||
case 5:
|
||||
expect(c.kind, equals("PauseBreakpoint"),
|
||||
expect(c.kind, equals(ServiceEvent.kPauseBreakpoint),
|
||||
reason: "First & Second isolate should stop at debugger()");
|
||||
break;
|
||||
case 6:
|
||||
expect(c.kind, equals("PauseBreakpoint"),
|
||||
expect(c.kind, equals(ServiceEvent.kPauseBreakpoint),
|
||||
reason: "First & Second isolate should stop at debugger()");
|
||||
print("Resuming second isolate");
|
||||
secondIsolate.resume();
|
||||
break;
|
||||
case 7:
|
||||
expect(c.kind, equals("Resume"),
|
||||
expect(c.kind, equals(ServiceEvent.kResume),
|
||||
reason: "Second isolate should resume before the exception");
|
||||
expect(c.isolate.id, equals(secondIsolate.id),
|
||||
reason: "Second isolate should resume before the exception");
|
||||
break;
|
||||
case 8:
|
||||
expect(c.kind, equals("PauseExit"),
|
||||
expect(c.kind, equals(ServiceEvent.kPauseExit),
|
||||
reason: "Second isolate should exit at the exception");
|
||||
expect(c.isolate.id, equals(secondIsolate.id),
|
||||
reason: "Second isolate should exit at the exception");
|
||||
print("Resuming first isolate");
|
||||
firstIsolate.resume();
|
||||
break;
|
||||
case 9:
|
||||
expect(c.kind, equals("Resume"),
|
||||
expect(c.kind, equals(ServiceEvent.kResume),
|
||||
reason: "First isolate should resume after the exception");
|
||||
expect(c.isolate.id, equals(firstIsolate.id),
|
||||
reason: "First isolate should resume after the exception");
|
||||
break;
|
||||
case 10:
|
||||
expect(c.kind, equals("PauseBreakpoint"),
|
||||
reason: "First isolate "
|
||||
"should stop at debugger() after exception.\n"
|
||||
"Probably the second resumed even though it was not expect "
|
||||
"to do it.");
|
||||
expect(c.isolate.id, equals(firstIsolate.id),
|
||||
reason: "First "
|
||||
"isolate should stop at debugger() after exception.\n"
|
||||
"Probably the second resumed even though it was not expect "
|
||||
"to do it.");
|
||||
expect(c.kind, equals(ServiceEvent.kPauseBreakpoint),
|
||||
reason: "First isolate should stop at debugger() after "
|
||||
"exception.");
|
||||
completer.complete();
|
||||
break;
|
||||
default:
|
||||
|
@ -122,6 +127,7 @@ var tests = [
|
|||
}
|
||||
step++;
|
||||
});
|
||||
print("Resuming first isolate");
|
||||
firstIsolate.resume();
|
||||
await completer.future;
|
||||
// We wait 1 second to account for delays in the service protocol.
|
||||
|
|
|
@ -6,12 +6,9 @@
|
|||
get_allocation_samples_test: Pass, RuntimeError # Inconsistent stack trace
|
||||
reload_sources_test: Pass, Slow # Reload is slow on the bots
|
||||
get_retained_size_rpc_test: Pass, RuntimeError # Issue 28193
|
||||
pause_on_start_and_exit_test: Pass, RuntimeError # Issue 28624
|
||||
pause_on_start_then_step_test: Pass, RuntimeError # Issue 28624
|
||||
isolate_lifecycle_test: Pass, RuntimeError # Issue 24174
|
||||
field_script_test: Pass, RuntimeError
|
||||
get_isolate_rpc_test: Pass, RuntimeError # Issue 29324
|
||||
issue_30555_test: Pass, RuntimeError
|
||||
|
||||
[ $system == windows && $mode == debug && $checked ]
|
||||
async_scope_test: Pass, Slow
|
||||
|
|
|
@ -1995,11 +1995,22 @@ void Isolate::PrintJSON(JSONStream* stream, bool ref) {
|
|||
ASSERT((debugger() == NULL) || (debugger()->PauseEvent() == NULL));
|
||||
ServiceEvent pause_event(this, ServiceEvent::kNone);
|
||||
jsobj.AddProperty("pauseEvent", &pause_event);
|
||||
} else if (message_handler()->is_paused_on_start() ||
|
||||
message_handler()->should_pause_on_start()) {
|
||||
ASSERT((debugger() == NULL) || (debugger()->PauseEvent() == NULL));
|
||||
ServiceEvent pause_event(this, ServiceEvent::kPauseStart);
|
||||
jsobj.AddProperty("pauseEvent", &pause_event);
|
||||
} else if (message_handler()->should_pause_on_start()) {
|
||||
if (message_handler()->is_paused_on_start()) {
|
||||
ASSERT((debugger() == NULL) || (debugger()->PauseEvent() == NULL));
|
||||
ServiceEvent pause_event(this, ServiceEvent::kPauseStart);
|
||||
jsobj.AddProperty("pauseEvent", &pause_event);
|
||||
} else {
|
||||
// Isolate is runnable but not paused on start.
|
||||
// Some service clients get confused if they see:
|
||||
// NotRunnable -> Runnable -> PausedAtStart
|
||||
// Treat Runnable+ShouldPauseOnStart as NotRunnable so they see:
|
||||
// NonRunnable -> PausedAtStart
|
||||
// The should_pause_on_start flag is set to false after resume.
|
||||
ASSERT((debugger() == NULL) || (debugger()->PauseEvent() == NULL));
|
||||
ServiceEvent pause_event(this, ServiceEvent::kNone);
|
||||
jsobj.AddProperty("pauseEvent", &pause_event);
|
||||
}
|
||||
} else if (message_handler()->is_paused_on_exit() &&
|
||||
((debugger() == NULL) || (debugger()->PauseEvent() == NULL))) {
|
||||
ServiceEvent pause_event(this, ServiceEvent::kPauseExit);
|
||||
|
|
|
@ -517,14 +517,8 @@ void MessageHandler::PausedOnStart(bool paused) {
|
|||
void MessageHandler::PausedOnStartLocked(MonitorLocker* ml, bool paused) {
|
||||
if (paused) {
|
||||
ASSERT(!is_paused_on_start_);
|
||||
is_paused_on_start_ = true;
|
||||
ASSERT(paused_timestamp_ == -1);
|
||||
paused_timestamp_ = OS::GetCurrentTimeMillis();
|
||||
} else {
|
||||
ASSERT(is_paused_on_start_);
|
||||
is_paused_on_start_ = false;
|
||||
paused_timestamp_ = -1;
|
||||
}
|
||||
if (is_paused_on_start_) {
|
||||
// Temporarily release the monitor when calling out to
|
||||
// NotifyPauseOnStart. This avoids a dead lock that can occur
|
||||
// when this message handler tries to post a message while a
|
||||
|
@ -532,12 +526,17 @@ void MessageHandler::PausedOnStartLocked(MonitorLocker* ml, bool paused) {
|
|||
ml->Exit();
|
||||
NotifyPauseOnStart();
|
||||
ml->Enter();
|
||||
is_paused_on_start_ = true;
|
||||
} else {
|
||||
ASSERT(is_paused_on_start_);
|
||||
ASSERT(paused_timestamp_ != -1);
|
||||
paused_timestamp_ = -1;
|
||||
// Resumed. Clear the resume request of the owning isolate.
|
||||
Isolate* owning_isolate = isolate();
|
||||
if (owning_isolate != NULL) {
|
||||
owning_isolate->GetAndClearResumeRequest();
|
||||
}
|
||||
is_paused_on_start_ = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -549,14 +548,8 @@ void MessageHandler::PausedOnExit(bool paused) {
|
|||
void MessageHandler::PausedOnExitLocked(MonitorLocker* ml, bool paused) {
|
||||
if (paused) {
|
||||
ASSERT(!is_paused_on_exit_);
|
||||
is_paused_on_exit_ = true;
|
||||
ASSERT(paused_timestamp_ == -1);
|
||||
paused_timestamp_ = OS::GetCurrentTimeMillis();
|
||||
} else {
|
||||
ASSERT(is_paused_on_exit_);
|
||||
is_paused_on_exit_ = false;
|
||||
paused_timestamp_ = -1;
|
||||
}
|
||||
if (is_paused_on_exit_) {
|
||||
// Temporarily release the monitor when calling out to
|
||||
// NotifyPauseOnExit. This avoids a dead lock that can
|
||||
// occur when this message handler tries to post a message
|
||||
|
@ -564,12 +557,17 @@ void MessageHandler::PausedOnExitLocked(MonitorLocker* ml, bool paused) {
|
|||
ml->Exit();
|
||||
NotifyPauseOnExit();
|
||||
ml->Enter();
|
||||
is_paused_on_exit_ = true;
|
||||
} else {
|
||||
ASSERT(is_paused_on_exit_);
|
||||
ASSERT(paused_timestamp_ != -1);
|
||||
paused_timestamp_ = -1;
|
||||
// Resumed. Clear the resume request of the owning isolate.
|
||||
Isolate* owning_isolate = isolate();
|
||||
if (owning_isolate != NULL) {
|
||||
owning_isolate->GetAndClearResumeRequest();
|
||||
}
|
||||
is_paused_on_exit_ = false;
|
||||
}
|
||||
}
|
||||
#endif // !defined(PRODUCT)
|
||||
|
|
|
@ -36,17 +36,13 @@ ServiceEvent::ServiceEvent(Isolate* isolate, EventKind event_kind)
|
|||
ASSERT(isolate == NULL ||
|
||||
!ServiceIsolate::IsServiceIsolateDescendant(isolate_));
|
||||
|
||||
if ((event_kind == ServiceEvent::kPauseStart) &&
|
||||
!isolate->message_handler()->is_paused_on_start()) {
|
||||
// We will pause on start but the message handler lacks a valid
|
||||
// paused timestamp because we haven't paused yet. Use the current time.
|
||||
timestamp_ = OS::GetCurrentTimeMillis();
|
||||
} else if ((event_kind == ServiceEvent::kPauseStart) ||
|
||||
(event_kind == ServiceEvent::kPauseExit)) {
|
||||
if ((event_kind == ServiceEvent::kPauseStart) ||
|
||||
(event_kind == ServiceEvent::kPauseExit)) {
|
||||
timestamp_ = isolate->message_handler()->paused_timestamp();
|
||||
} else if (event_kind == ServiceEvent::kResume) {
|
||||
timestamp_ = isolate->last_resume_timestamp();
|
||||
}
|
||||
ASSERT(timestamp_ > -1);
|
||||
}
|
||||
|
||||
void ServiceEvent::UpdateTimestamp() {
|
||||
|
|
Loading…
Reference in a new issue