2012-04-19 19:47:27 +00:00
|
|
|
// Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
|
|
|
|
// for details. All rights reserved. Use of this source code is governed by a
|
|
|
|
// BSD-style license that can be found in the LICENSE file.
|
|
|
|
|
2019-07-23 10:58:11 +00:00
|
|
|
#include <utility>
|
|
|
|
|
2012-04-19 19:47:27 +00:00
|
|
|
#include "vm/message_handler.h"
|
2014-08-12 23:19:53 +00:00
|
|
|
|
2012-04-19 19:47:27 +00:00
|
|
|
#include "vm/dart.h"
|
2019-07-23 10:58:11 +00:00
|
|
|
#include "vm/heap/safepoint.h"
|
2020-02-20 21:08:35 +00:00
|
|
|
#include "vm/isolate.h"
|
2014-08-12 23:19:53 +00:00
|
|
|
#include "vm/lockers.h"
|
2015-10-06 18:27:26 +00:00
|
|
|
#include "vm/object.h"
|
|
|
|
#include "vm/object_store.h"
|
2015-08-26 17:57:59 +00:00
|
|
|
#include "vm/os.h"
|
2014-08-12 23:19:53 +00:00
|
|
|
#include "vm/port.h"
|
2015-05-20 13:49:35 +00:00
|
|
|
#include "vm/thread_interrupter.h"
|
2012-04-19 19:47:27 +00:00
|
|
|
|
|
|
|
namespace dart {
|
|
|
|
|
2014-05-08 22:02:38 +00:00
|
|
|
DECLARE_FLAG(bool, trace_service_pause_events);
|
2012-04-19 19:47:27 +00:00
|
|
|
|
|
|
|
class MessageHandlerTask : public ThreadPool::Task {
|
|
|
|
public:
|
2016-11-08 21:54:47 +00:00
|
|
|
explicit MessageHandlerTask(MessageHandler* handler) : handler_(handler) {
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(handler != nullptr);
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
2014-08-20 18:37:29 +00:00
|
|
|
virtual void Run() {
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(handler_ != nullptr);
|
2012-04-19 19:47:27 +00:00
|
|
|
handler_->TaskCallback();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
MessageHandler* handler_;
|
|
|
|
|
|
|
|
DISALLOW_COPY_AND_ASSIGN(MessageHandlerTask);
|
|
|
|
};
|
|
|
|
|
2015-10-06 18:27:26 +00:00
|
|
|
// static
|
|
|
|
const char* MessageHandler::MessageStatusString(MessageStatus status) {
|
|
|
|
switch (status) {
|
|
|
|
case kOK:
|
|
|
|
return "OK";
|
|
|
|
case kError:
|
|
|
|
return "Error";
|
|
|
|
case kShutdown:
|
|
|
|
return "Shutdown";
|
|
|
|
default:
|
|
|
|
UNREACHABLE();
|
|
|
|
return "Illegal";
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-04-19 19:47:27 +00:00
|
|
|
MessageHandler::MessageHandler()
|
|
|
|
: queue_(new MessageQueue()),
|
|
|
|
oob_queue_(new MessageQueue()),
|
2015-05-04 20:20:44 +00:00
|
|
|
oob_message_handling_allowed_(true),
|
2018-01-18 16:38:54 +00:00
|
|
|
paused_for_messages_(false),
|
2014-07-03 12:56:02 +00:00
|
|
|
paused_(0),
|
2017-07-19 16:15:48 +00:00
|
|
|
#if !defined(PRODUCT)
|
2016-02-03 23:33:40 +00:00
|
|
|
should_pause_on_start_(false),
|
|
|
|
should_pause_on_exit_(false),
|
|
|
|
is_paused_on_start_(false),
|
|
|
|
is_paused_on_exit_(false),
|
2023-01-31 17:15:38 +00:00
|
|
|
remembered_paused_on_exit_status_(kOK),
|
2015-08-26 17:57:59 +00:00
|
|
|
paused_timestamp_(-1),
|
2017-07-19 16:15:48 +00:00
|
|
|
#endif
|
2019-06-18 00:14:28 +00:00
|
|
|
task_running_(false),
|
2017-07-19 16:15:48 +00:00
|
|
|
delete_me_(false),
|
2023-04-10 18:15:12 +00:00
|
|
|
pool_(nullptr),
|
|
|
|
start_callback_(nullptr),
|
|
|
|
end_callback_(nullptr),
|
2012-11-06 02:58:00 +00:00
|
|
|
callback_data_(0) {
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(queue_ != nullptr);
|
|
|
|
ASSERT(oob_queue_ != nullptr);
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MessageHandler::~MessageHandler() {
|
|
|
|
delete queue_;
|
|
|
|
delete oob_queue_;
|
2023-04-10 18:15:12 +00:00
|
|
|
queue_ = nullptr;
|
|
|
|
oob_queue_ = nullptr;
|
|
|
|
pool_ = nullptr;
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
const char* MessageHandler::name() const {
|
|
|
|
return "<unnamed>";
|
|
|
|
}
|
|
|
|
|
|
|
|
#if defined(DEBUG)
|
2023-08-08 10:57:47 +00:00
|
|
|
void MessageHandler::CheckAccess() const {
|
2012-04-19 19:47:27 +00:00
|
|
|
// By default there is no checking.
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void MessageHandler::MessageNotify(Message::Priority priority) {
|
|
|
|
// By default, there is no custom message notification.
|
|
|
|
}
|
|
|
|
|
2021-08-18 18:22:53 +00:00
|
|
|
bool MessageHandler::Run(ThreadPool* pool,
|
2012-04-19 19:47:27 +00:00
|
|
|
StartCallback start_callback,
|
|
|
|
EndCallback end_callback,
|
|
|
|
CallbackData data) {
|
|
|
|
MonitorLocker ml(&monitor_);
|
|
|
|
if (FLAG_trace_isolates) {
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2016-11-08 21:54:47 +00:00
|
|
|
"[+] Starting message handler:\n"
|
|
|
|
"\thandler: %s\n",
|
|
|
|
name());
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(pool_ == nullptr);
|
2016-08-04 22:34:24 +00:00
|
|
|
ASSERT(!delete_me_);
|
2012-04-19 19:47:27 +00:00
|
|
|
pool_ = pool;
|
|
|
|
start_callback_ = start_callback;
|
|
|
|
end_callback_ = end_callback;
|
|
|
|
callback_data_ = data;
|
2019-07-23 10:58:11 +00:00
|
|
|
task_running_ = true;
|
2021-08-18 18:22:53 +00:00
|
|
|
bool result = pool_->Run<MessageHandlerTask>(this);
|
|
|
|
if (!result) {
|
|
|
|
pool_ = nullptr;
|
|
|
|
start_callback_ = nullptr;
|
|
|
|
end_callback_ = nullptr;
|
|
|
|
callback_data_ = 0;
|
|
|
|
task_running_ = false;
|
|
|
|
}
|
|
|
|
return result;
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
2019-05-06 21:01:39 +00:00
|
|
|
void MessageHandler::PostMessage(std::unique_ptr<Message> message,
|
|
|
|
bool before_events) {
|
2015-03-10 18:15:37 +00:00
|
|
|
Message::Priority saved_priority;
|
2019-06-18 00:14:28 +00:00
|
|
|
|
2015-03-10 18:15:37 +00:00
|
|
|
{
|
|
|
|
MonitorLocker ml(&monitor_);
|
|
|
|
if (FLAG_trace_isolates) {
|
|
|
|
Isolate* source_isolate = Isolate::Current();
|
2019-09-05 21:41:42 +00:00
|
|
|
if (source_isolate != nullptr) {
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2017-08-26 01:06:58 +00:00
|
|
|
"[>] Posting message:\n"
|
|
|
|
"\tlen: %" Pd "\n\tsource: (%" Pd64
|
|
|
|
") %s\n\tdest: %s\n"
|
|
|
|
"\tdest_port: %" Pd64 "\n",
|
2018-02-21 18:57:14 +00:00
|
|
|
message->Size(), static_cast<int64_t>(source_isolate->main_port()),
|
2017-08-26 01:06:58 +00:00
|
|
|
source_isolate->name(), name(), message->dest_port());
|
|
|
|
} else {
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2017-08-26 01:06:58 +00:00
|
|
|
"[>] Posting message:\n"
|
|
|
|
"\tlen: %" Pd
|
|
|
|
"\n\tsource: <native code>\n"
|
|
|
|
"\tdest: %s\n"
|
|
|
|
"\tdest_port: %" Pd64 "\n",
|
2018-02-21 18:57:14 +00:00
|
|
|
message->Size(), name(), message->dest_port());
|
2015-03-10 18:15:37 +00:00
|
|
|
}
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
2015-03-10 18:15:37 +00:00
|
|
|
saved_priority = message->priority();
|
|
|
|
if (message->IsOOB()) {
|
2019-05-06 21:01:39 +00:00
|
|
|
oob_queue_->Enqueue(std::move(message), before_events);
|
2015-03-10 18:15:37 +00:00
|
|
|
} else {
|
2019-05-06 21:01:39 +00:00
|
|
|
queue_->Enqueue(std::move(message), before_events);
|
2015-03-10 18:15:37 +00:00
|
|
|
}
|
2018-01-18 16:38:54 +00:00
|
|
|
if (paused_for_messages_) {
|
|
|
|
ml.Notify();
|
|
|
|
}
|
2012-04-19 19:47:27 +00:00
|
|
|
|
2019-07-23 10:58:11 +00:00
|
|
|
if (pool_ != nullptr && !task_running_) {
|
2016-08-04 22:34:24 +00:00
|
|
|
ASSERT(!delete_me_);
|
2019-07-23 10:58:11 +00:00
|
|
|
task_running_ = true;
|
|
|
|
const bool launched_successfully = pool_->Run<MessageHandlerTask>(this);
|
|
|
|
ASSERT(launched_successfully);
|
2015-03-10 18:15:37 +00:00
|
|
|
}
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
2015-09-15 19:49:52 +00:00
|
|
|
|
2012-04-19 19:47:27 +00:00
|
|
|
// Invoke any custom message notification.
|
|
|
|
MessageNotify(saved_priority);
|
|
|
|
}
|
|
|
|
|
2019-05-06 21:01:39 +00:00
|
|
|
std::unique_ptr<Message> MessageHandler::DequeueMessage(
|
|
|
|
Message::Priority min_priority) {
|
2012-04-19 19:47:27 +00:00
|
|
|
// TODO(turnidge): Add assert that monitor_ is held here.
|
2019-05-06 21:01:39 +00:00
|
|
|
std::unique_ptr<Message> message = oob_queue_->Dequeue();
|
|
|
|
if ((message == nullptr) && (min_priority < Message::kOOBPriority)) {
|
2012-04-19 19:47:27 +00:00
|
|
|
message = queue_->Dequeue();
|
|
|
|
}
|
|
|
|
return message;
|
|
|
|
}
|
|
|
|
|
2015-10-06 18:27:26 +00:00
|
|
|
void MessageHandler::ClearOOBQueue() {
|
|
|
|
oob_queue_->Clear();
|
|
|
|
}
|
|
|
|
|
|
|
|
MessageHandler::MessageStatus MessageHandler::HandleMessages(
|
2016-03-01 20:33:50 +00:00
|
|
|
MonitorLocker* ml,
|
2015-10-06 18:27:26 +00:00
|
|
|
bool allow_normal_messages,
|
|
|
|
bool allow_multiple_normal_messages) {
|
2019-07-23 10:58:11 +00:00
|
|
|
ASSERT(monitor_.IsOwnedByCurrentThread());
|
|
|
|
|
|
|
|
// Scheduling of the mutator thread during the isolate start can cause this
|
|
|
|
// thread to safepoint.
|
|
|
|
// We want to avoid holding the message handler monitor during the safepoint
|
|
|
|
// operation to avoid possible deadlocks, which can occur if other threads are
|
|
|
|
// sending messages to this message handler.
|
|
|
|
//
|
|
|
|
// If isolate() returns nullptr [StartIsolateScope] does nothing.
|
|
|
|
ml->Exit();
|
2015-01-20 19:13:06 +00:00
|
|
|
StartIsolateScope start_isolate(isolate());
|
2019-07-23 10:58:11 +00:00
|
|
|
ml->Enter();
|
2015-01-20 19:13:06 +00:00
|
|
|
|
2019-11-05 20:25:43 +00:00
|
|
|
auto idle_time_handler =
|
2020-02-20 21:08:35 +00:00
|
|
|
isolate() != nullptr ? isolate()->group()->idle_time_handler() : nullptr;
|
2019-11-05 20:25:43 +00:00
|
|
|
|
2015-10-06 18:27:26 +00:00
|
|
|
MessageStatus max_status = kOK;
|
2016-11-08 21:54:47 +00:00
|
|
|
Message::Priority min_priority =
|
|
|
|
((allow_normal_messages && !paused()) ? Message::kNormalPriority
|
|
|
|
: Message::kOOBPriority);
|
2019-05-06 21:01:39 +00:00
|
|
|
std::unique_ptr<Message> message = DequeueMessage(min_priority);
|
|
|
|
while (message != nullptr) {
|
2018-02-21 18:57:14 +00:00
|
|
|
intptr_t message_len = message->Size();
|
2012-04-19 19:47:27 +00:00
|
|
|
if (FLAG_trace_isolates) {
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2016-11-08 21:54:47 +00:00
|
|
|
"[<] Handling message:\n"
|
|
|
|
"\tlen: %" Pd
|
|
|
|
"\n"
|
|
|
|
"\thandler: %s\n"
|
|
|
|
"\tport: %" Pd64 "\n",
|
|
|
|
message_len, name(), message->dest_port());
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Release the monitor_ temporarily while we handle the message.
|
|
|
|
// The monitor was acquired in MessageHandler::TaskCallback().
|
2016-03-01 20:33:50 +00:00
|
|
|
ml->Exit();
|
2012-04-19 19:47:27 +00:00
|
|
|
Message::Priority saved_priority = message->priority();
|
2015-02-18 16:43:07 +00:00
|
|
|
Dart_Port saved_dest_port = message->dest_port();
|
2019-11-05 20:25:43 +00:00
|
|
|
MessageStatus status = kOK;
|
|
|
|
{
|
|
|
|
DisableIdleTimerScope disable_idle_timer(idle_time_handler);
|
|
|
|
status = HandleMessage(std::move(message));
|
|
|
|
}
|
2015-10-06 18:27:26 +00:00
|
|
|
if (status > max_status) {
|
|
|
|
max_status = status;
|
|
|
|
}
|
2016-03-01 20:33:50 +00:00
|
|
|
ml->Enter();
|
2014-04-23 19:44:03 +00:00
|
|
|
if (FLAG_trace_isolates) {
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2016-11-08 21:54:47 +00:00
|
|
|
"[.] Message handled (%s):\n"
|
|
|
|
"\tlen: %" Pd
|
|
|
|
"\n"
|
|
|
|
"\thandler: %s\n"
|
|
|
|
"\tport: %" Pd64 "\n",
|
|
|
|
MessageStatusString(status), message_len, name(), saved_dest_port);
|
2014-04-23 19:44:03 +00:00
|
|
|
}
|
2015-10-06 18:27:26 +00:00
|
|
|
// If we are shutting down, do not process any more messages.
|
|
|
|
if (status == kShutdown) {
|
|
|
|
ClearOOBQueue();
|
2012-04-19 19:47:27 +00:00
|
|
|
break;
|
|
|
|
}
|
2015-10-06 18:27:26 +00:00
|
|
|
|
2018-01-17 22:01:28 +00:00
|
|
|
// Remember time since the last message. Don't consider OOB messages so
|
|
|
|
// using Observatory doesn't trigger additional idle tasks.
|
|
|
|
if ((FLAG_idle_timeout_micros != 0) &&
|
|
|
|
(saved_priority == Message::kNormalPriority)) {
|
2019-11-05 20:25:43 +00:00
|
|
|
if (idle_time_handler != nullptr) {
|
|
|
|
idle_time_handler->UpdateStartIdleTime();
|
|
|
|
}
|
2018-01-17 22:01:28 +00:00
|
|
|
}
|
|
|
|
|
2014-07-03 12:56:02 +00:00
|
|
|
// Some callers want to process only one normal message and then quit. At
|
|
|
|
// the same time it is OK to process multiple OOB messages.
|
|
|
|
if ((saved_priority == Message::kNormalPriority) &&
|
|
|
|
!allow_multiple_normal_messages) {
|
2015-10-06 18:27:26 +00:00
|
|
|
// We processed one normal message. Allow no more.
|
|
|
|
allow_normal_messages = false;
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
2014-07-03 12:56:02 +00:00
|
|
|
|
2015-10-06 18:27:26 +00:00
|
|
|
// Reevaluate the minimum allowable priority. The paused state
|
|
|
|
// may have changed as part of handling the message. We may also
|
2017-04-21 15:50:09 +00:00
|
|
|
// have encountered an error during message processing.
|
2015-10-06 18:27:26 +00:00
|
|
|
//
|
|
|
|
// Even if we encounter an error, we still process pending OOB
|
|
|
|
// messages so that we don't lose the message notification.
|
|
|
|
min_priority = (((max_status == kOK) && allow_normal_messages && !paused())
|
2016-11-08 21:54:47 +00:00
|
|
|
? Message::kNormalPriority
|
|
|
|
: Message::kOOBPriority);
|
2012-04-19 19:47:27 +00:00
|
|
|
message = DequeueMessage(min_priority);
|
|
|
|
}
|
2015-10-06 18:27:26 +00:00
|
|
|
return max_status;
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
2015-10-06 18:27:26 +00:00
|
|
|
MessageHandler::MessageStatus MessageHandler::HandleNextMessage() {
|
2012-04-19 19:47:27 +00:00
|
|
|
// We can only call HandleNextMessage when this handler is not
|
|
|
|
// assigned to a thread pool.
|
|
|
|
MonitorLocker ml(&monitor_);
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(pool_ == nullptr);
|
2016-08-04 22:34:24 +00:00
|
|
|
ASSERT(!delete_me_);
|
2012-04-19 19:47:27 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
CheckAccess();
|
|
|
|
#endif
|
2016-03-01 20:33:50 +00:00
|
|
|
return HandleMessages(&ml, true, false);
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
2018-01-18 16:38:54 +00:00
|
|
|
MessageHandler::MessageStatus MessageHandler::PauseAndHandleAllMessages(
|
|
|
|
int64_t timeout_millis) {
|
2019-07-23 10:58:11 +00:00
|
|
|
MonitorLocker ml(&monitor_, /*no_safepoint_scope=*/false);
|
2019-06-18 00:14:28 +00:00
|
|
|
ASSERT(task_running_);
|
2018-01-18 16:38:54 +00:00
|
|
|
ASSERT(!delete_me_);
|
|
|
|
#if defined(DEBUG)
|
|
|
|
CheckAccess();
|
|
|
|
#endif
|
|
|
|
paused_for_messages_ = true;
|
|
|
|
while (queue_->IsEmpty() && oob_queue_->IsEmpty()) {
|
2019-07-23 10:58:11 +00:00
|
|
|
Monitor::WaitResult wr;
|
|
|
|
{
|
|
|
|
// Ensure this thread is at a safepoint while we wait for new messages to
|
|
|
|
// arrive.
|
|
|
|
TransitionVMToNative transition(Thread::Current());
|
|
|
|
wr = ml.Wait(timeout_millis);
|
|
|
|
}
|
2019-06-18 00:14:28 +00:00
|
|
|
ASSERT(task_running_);
|
2018-01-18 16:38:54 +00:00
|
|
|
ASSERT(!delete_me_);
|
|
|
|
if (wr == Monitor::kTimedOut) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
if (queue_->IsEmpty()) {
|
|
|
|
// There are only OOB messages. Handle them and then continue waiting for
|
|
|
|
// normal messages unless there is an error.
|
|
|
|
MessageStatus status = HandleMessages(&ml, false, false);
|
|
|
|
if (status != kOK) {
|
|
|
|
paused_for_messages_ = false;
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
paused_for_messages_ = false;
|
|
|
|
return HandleMessages(&ml, true, true);
|
|
|
|
}
|
|
|
|
|
2015-10-06 18:27:26 +00:00
|
|
|
MessageHandler::MessageStatus MessageHandler::HandleOOBMessages() {
|
2015-05-04 20:20:44 +00:00
|
|
|
if (!oob_message_handling_allowed_) {
|
2015-10-06 18:27:26 +00:00
|
|
|
return kOK;
|
2015-05-04 20:20:44 +00:00
|
|
|
}
|
2012-04-19 19:47:27 +00:00
|
|
|
MonitorLocker ml(&monitor_);
|
2016-08-04 22:34:24 +00:00
|
|
|
ASSERT(!delete_me_);
|
2012-04-19 19:47:27 +00:00
|
|
|
#if defined(DEBUG)
|
|
|
|
CheckAccess();
|
|
|
|
#endif
|
2016-03-01 20:33:50 +00:00
|
|
|
return HandleMessages(&ml, false, false);
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
2017-07-19 16:15:48 +00:00
|
|
|
#if !defined(PRODUCT)
|
2016-02-03 23:33:40 +00:00
|
|
|
bool MessageHandler::ShouldPauseOnStart(MessageStatus status) const {
|
|
|
|
Isolate* owning_isolate = isolate();
|
2023-04-10 18:15:12 +00:00
|
|
|
if (owning_isolate == nullptr) {
|
2016-02-03 23:33:40 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
// If we are restarting or shutting down, we do not want to honor
|
|
|
|
// should_pause_on_start or should_pause_on_exit.
|
2023-01-31 18:28:59 +00:00
|
|
|
return (status != MessageHandler::kShutdown) && should_pause_on_start() &&
|
|
|
|
owning_isolate->is_runnable();
|
2014-10-27 16:57:21 +00:00
|
|
|
}
|
|
|
|
|
2016-02-03 23:33:40 +00:00
|
|
|
bool MessageHandler::ShouldPauseOnExit(MessageStatus status) const {
|
|
|
|
Isolate* owning_isolate = isolate();
|
2023-04-10 18:15:12 +00:00
|
|
|
if (owning_isolate == nullptr) {
|
2016-02-03 23:33:40 +00:00
|
|
|
return false;
|
|
|
|
}
|
2023-01-31 18:28:59 +00:00
|
|
|
return (status != MessageHandler::kShutdown) && should_pause_on_exit() &&
|
|
|
|
owning_isolate->is_runnable();
|
2016-02-03 23:33:40 +00:00
|
|
|
}
|
2017-07-19 16:15:48 +00:00
|
|
|
#endif
|
2016-02-03 23:33:40 +00:00
|
|
|
|
|
|
|
bool MessageHandler::HasOOBMessages() {
|
|
|
|
MonitorLocker ml(&monitor_);
|
|
|
|
return !oob_queue_->IsEmpty();
|
2015-10-06 18:27:26 +00:00
|
|
|
}
|
|
|
|
|
[vm] Make reloading of isolate groups use new safepoint-level mechanism
The current hot-reload implementation [0] will perform a reload by
first sending OOB messages to all isolates and waiting until those OOB
messages are being handled. The handler of the OOB message will block
the thread (and unschedule isolate) and notify the thread performing
reload it's ready.
This requires that all isolates within a group can actually run & block.
This is the case for the VM implementation of isolates (as they are
run an unlimited size thread pool).
Though flutter seems to multiplex several engine isolates on the same OS
thread. Reloading can then result in one engine isolate performing
reload waiting for another to act on the OOB message (which it will not
do as it's multiplexed on the same thread as the former).
Now that we have a more flexible safepointing mechanism (introduced in
[1]) we can utilize for hot reloading by introducing a new "reloading"
safepoint level.
Reload safepoints
-----------------------
We introduce a new safepoint level (SafepointLevel::kGCAndDeoptAndReload).
Being at a "reload safepoint" implies being at a "deopt safepoint"
which implies being at a "gc safepoint".
Code has to explicitly opt-into making safepoint checks participate /
check into "reload safepoints" using [ReloadParticipationScope]. We do
that at certain well-defined places where reload is possible (e.g. event
loop boundaries, descheduling of isolates, OOM message processing, ...).
While running under [NoReloadScope] we disable checking into "reload
safepoints".
Initiator of hot-reload
-----------------------
When a mutator initiates a reload operation (e.g. as part of a
`ReloadSources` `vm-service` API call) it will use a
[ReloadSafepointOperationScope] to get all other mutators to a
safepoint.
For mutators that aren't already at a "reload safepoint", we'll
notify them via an OOB message (instead of scheduling kVMInterrupt).
While waiting for all mutators to check into a "reload safepoint", the
thread is itself at a safepoint (as other mutators may perform lower
level safepoint operations - e.g. GC, Deopt, ...)
Once all mutators are at a "reload safepoint" the thread will take
ownership of all safepoint levels.
Other mutators
-----------------------
Mutators can be at a "reload safepoint" already (e.g. isolate is not
scheduled). If they try to exit safepoint they will block until the
reload operation is finished.
Mutators that are not at a "reload safepoint" (e.g. executing Dart or VM
code) will be sent an OOB message indicating it should check into a
"reload safepoint". We assume mutators make progress until they can
process OOB message.
Mutators may run under a [NoReloadScope] when handling the OOM message.
In that case they will not check into the "reload safepoint" and simply
ignore the message. To ensure the thread will eventually check-in,
we'll make the destructor of [~NoReloadScope] check & send itself a new OOB
message indicating reload should happen. Eventually getting the mutator
to process the OOM message (which is a well-defined place where we can
check into the reload safepoint).
Non-isolate mutators such as the background compiler do not react to OOB
messages. This means that either those mutators have to be stopped (e.g.
bg compiler) before initiating a reload safepoint operation, the
threads have to explicitly opt-into participating in reload safepoints
or the threads have to deschedule themselves eventually.
Misc
----
Owning a reload safepoint operation implies also owning the deopt &
gc safepoint operation. Yet some code would like to ensure it actually
runs under a [DeoptSafepointOperatoinScope]/[GCSafepointOperationScope].
=> The `Thread::OwnsGCSafepoint()` handles that.
While performing hot-reload we may exercise common code (e.g. kernel
loader, ...) that acquires safepoint locks. Normally it's disallows to
acquire safepoint locks while holding a safepoint operation (since
mutators may be stopped at places where they hold locks, creating
deadlock scenarios).
=> We explicitly opt code into participating in reload safepointing
requests. Those well-defined places aren't holding safepoint locks.
=> The `Thread::CanAcquireSafepointLocks()` will return `true` despite
owning a reload operation. (But if one also holds deopt/gc safepoint
operation it will return false)
Example where this matters: As part of hot-reload, we load kernel which
may create new symbols. The symbol creation code may acquire the symbol
lock and `InsertNewOrGet()` a symbol. This is safe as other mutators
don't hold the symbol lock at reload safepoints. The same cannot be said
for Deopt/GC safepoint operations - as they can interrupt code at many
more places where there's no guarantee that no locks are held.
[0] https://dart-review.googlesource.com/c/sdk/+/187461
[1] https://dart-review.googlesource.com/c/sdk/+/196927
Issue https://github.com/flutter/flutter/issues/124546
TEST=Newly added Reload_* tests.
Change-Id: I6842d7d2b284d043cc047fd702b7c5c7dd1fa3c5
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/296183
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Slava Egorov <vegorov@google.com>
2023-04-21 13:56:49 +00:00
|
|
|
#if defined(TESTING)
|
|
|
|
std::unique_ptr<Message> MessageHandler::StealOOBMessage() {
|
|
|
|
MonitorLocker ml(&monitor_);
|
|
|
|
ASSERT(!oob_queue_->IsEmpty());
|
|
|
|
return oob_queue_->Dequeue();
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2018-11-13 10:32:48 +00:00
|
|
|
bool MessageHandler::HasMessages() {
|
|
|
|
MonitorLocker ml(&monitor_);
|
|
|
|
return !queue_->IsEmpty();
|
|
|
|
}
|
|
|
|
|
2012-04-19 19:47:27 +00:00
|
|
|
void MessageHandler::TaskCallback() {
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(Isolate::Current() == nullptr);
|
2015-10-06 18:27:26 +00:00
|
|
|
MessageStatus status = kOK;
|
2012-04-19 19:47:27 +00:00
|
|
|
bool run_end_callback = false;
|
2016-08-04 22:34:24 +00:00
|
|
|
bool delete_me = false;
|
2023-04-10 18:15:12 +00:00
|
|
|
EndCallback end_callback = nullptr;
|
2017-01-24 20:43:37 +00:00
|
|
|
CallbackData callback_data = 0;
|
2012-04-19 19:47:27 +00:00
|
|
|
{
|
2015-10-06 18:27:26 +00:00
|
|
|
// We will occasionally release and reacquire this monitor in this
|
|
|
|
// function. Whenever we reacquire the monitor we *must* process
|
|
|
|
// all pending OOB messages, or we may miss a request for vm
|
|
|
|
// shutdown.
|
2012-04-19 19:47:27 +00:00
|
|
|
MonitorLocker ml(&monitor_);
|
2019-07-23 10:58:11 +00:00
|
|
|
|
|
|
|
// This method is running on the message handler task. Which means no
|
|
|
|
// other message handler tasks will be started until this one sets
|
|
|
|
// [task_running_] to false.
|
|
|
|
ASSERT(task_running_);
|
|
|
|
|
2017-07-19 16:15:48 +00:00
|
|
|
#if !defined(PRODUCT)
|
2016-02-03 23:33:40 +00:00
|
|
|
if (ShouldPauseOnStart(kOK)) {
|
|
|
|
if (!is_paused_on_start()) {
|
2016-03-02 17:20:40 +00:00
|
|
|
PausedOnStartLocked(&ml, true);
|
2015-03-05 19:02:42 +00:00
|
|
|
}
|
2015-10-06 18:27:26 +00:00
|
|
|
// More messages may have come in before we (re)acquired the monitor.
|
2016-03-01 20:33:50 +00:00
|
|
|
status = HandleMessages(&ml, false, false);
|
2016-02-03 23:33:40 +00:00
|
|
|
if (ShouldPauseOnStart(status)) {
|
2014-03-18 23:32:36 +00:00
|
|
|
// Still paused.
|
2015-08-28 20:19:50 +00:00
|
|
|
ASSERT(oob_queue_->IsEmpty());
|
2019-06-18 00:14:28 +00:00
|
|
|
task_running_ = false; // No task in queue.
|
2014-03-18 23:32:36 +00:00
|
|
|
return;
|
2015-03-05 19:02:42 +00:00
|
|
|
} else {
|
2016-03-02 17:20:40 +00:00
|
|
|
PausedOnStartLocked(&ml, false);
|
2014-03-18 23:32:36 +00:00
|
|
|
}
|
|
|
|
}
|
2017-08-29 20:02:02 +00:00
|
|
|
if (is_paused_on_exit()) {
|
|
|
|
status = HandleMessages(&ml, false, false);
|
|
|
|
if (ShouldPauseOnExit(status)) {
|
|
|
|
// Still paused.
|
|
|
|
ASSERT(oob_queue_->IsEmpty());
|
2019-06-18 00:14:28 +00:00
|
|
|
task_running_ = false; // No task in queue.
|
2017-08-29 20:02:02 +00:00
|
|
|
return;
|
|
|
|
} else {
|
|
|
|
PausedOnExitLocked(&ml, false);
|
2023-04-03 20:52:36 +00:00
|
|
|
if (status != kShutdown) {
|
|
|
|
status = remembered_paused_on_exit_status_;
|
|
|
|
}
|
2017-08-29 20:02:02 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
#endif // !defined(PRODUCT)
|
2014-03-18 23:32:36 +00:00
|
|
|
|
2015-10-06 18:27:26 +00:00
|
|
|
if (status == kOK) {
|
2019-09-05 21:41:42 +00:00
|
|
|
if (start_callback_ != nullptr) {
|
2015-10-06 18:27:26 +00:00
|
|
|
// Initialize the message handler by running its start function,
|
|
|
|
// if we have one. For an isolate, this will run the isolate's
|
|
|
|
// main() function.
|
|
|
|
//
|
|
|
|
// Release the monitor_ temporarily while we call the start callback.
|
2016-03-01 20:33:50 +00:00
|
|
|
ml.Exit();
|
2015-10-06 18:27:26 +00:00
|
|
|
status = start_callback_(callback_data_);
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(Isolate::Current() == nullptr);
|
|
|
|
start_callback_ = nullptr;
|
2016-03-01 20:33:50 +00:00
|
|
|
ml.Enter();
|
2015-10-06 18:27:26 +00:00
|
|
|
}
|
2012-04-19 19:47:27 +00:00
|
|
|
|
2020-05-05 12:08:43 +00:00
|
|
|
// Handle any pending messages for this message handler.
|
|
|
|
if (status != kShutdown) {
|
|
|
|
status = HandleMessages(&ml, (status == kOK), true);
|
2015-10-06 18:27:26 +00:00
|
|
|
}
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
2015-10-06 18:27:26 +00:00
|
|
|
// The isolate exits when it encounters an error or when it no
|
|
|
|
// longer has live ports.
|
2023-08-08 10:57:47 +00:00
|
|
|
if (status != kOK || !KeepAliveLocked()) {
|
2017-07-19 16:15:48 +00:00
|
|
|
#if !defined(PRODUCT)
|
2016-02-03 23:33:40 +00:00
|
|
|
if (ShouldPauseOnExit(status)) {
|
2017-08-29 20:02:02 +00:00
|
|
|
if (FLAG_trace_service_pause_events) {
|
|
|
|
OS::PrintErr(
|
|
|
|
"Isolate %s paused before exiting. "
|
|
|
|
"Use the Observatory to release it.\n",
|
|
|
|
name());
|
2014-05-08 22:02:38 +00:00
|
|
|
}
|
2023-01-31 17:15:38 +00:00
|
|
|
remembered_paused_on_exit_status_ = status;
|
2017-08-29 20:02:02 +00:00
|
|
|
PausedOnExitLocked(&ml, true);
|
|
|
|
// More messages may have come in while we released the monitor.
|
2023-01-31 17:15:38 +00:00
|
|
|
status = HandleMessages(&ml, /*allow_normal_messages=*/false,
|
|
|
|
/*allow_multiple_normal_messagesfalse=*/false);
|
2016-02-03 23:33:40 +00:00
|
|
|
if (ShouldPauseOnExit(status)) {
|
2015-08-28 20:19:50 +00:00
|
|
|
// Still paused.
|
|
|
|
ASSERT(oob_queue_->IsEmpty());
|
2019-06-18 00:14:28 +00:00
|
|
|
task_running_ = false; // No task in queue.
|
2015-08-28 20:19:50 +00:00
|
|
|
return;
|
|
|
|
} else {
|
2016-03-02 17:20:40 +00:00
|
|
|
PausedOnExitLocked(&ml, false);
|
2015-08-28 20:19:50 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-19 16:15:48 +00:00
|
|
|
#endif // !defined(PRODUCT)
|
2015-08-28 20:19:50 +00:00
|
|
|
if (FLAG_trace_isolates) {
|
2023-04-10 18:15:12 +00:00
|
|
|
if (status != kOK && thread() != nullptr) {
|
2016-02-05 23:46:55 +00:00
|
|
|
const Error& error = Error::Handle(thread()->sticky_error());
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2016-11-08 21:54:47 +00:00
|
|
|
"[-] Stopping message handler (%s):\n"
|
|
|
|
"\thandler: %s\n"
|
|
|
|
"\terror: %s\n",
|
|
|
|
MessageStatusString(status), name(), error.ToCString());
|
2015-10-06 18:27:26 +00:00
|
|
|
} else {
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2016-11-08 21:54:47 +00:00
|
|
|
"[-] Stopping message handler (%s):\n"
|
|
|
|
"\thandler: %s\n",
|
|
|
|
MessageStatusString(status), name());
|
2015-10-06 18:27:26 +00:00
|
|
|
}
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
2023-04-10 18:15:12 +00:00
|
|
|
pool_ = nullptr;
|
2017-01-24 03:45:59 +00:00
|
|
|
// Decide if we have a callback before releasing the monitor.
|
2017-01-24 16:59:34 +00:00
|
|
|
end_callback = end_callback_;
|
|
|
|
callback_data = callback_data_;
|
2023-04-10 18:15:12 +00:00
|
|
|
run_end_callback = end_callback_ != nullptr;
|
2016-08-04 22:34:24 +00:00
|
|
|
delete_me = delete_me_;
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
2015-08-28 20:19:50 +00:00
|
|
|
|
2019-06-18 00:14:28 +00:00
|
|
|
// Clear task_running_ last. This allows other tasks to potentially start
|
2015-08-28 21:30:03 +00:00
|
|
|
// for this message handler.
|
2015-08-28 20:19:50 +00:00
|
|
|
ASSERT(oob_queue_->IsEmpty());
|
2019-06-18 00:14:28 +00:00
|
|
|
task_running_ = false;
|
2015-08-24 17:47:40 +00:00
|
|
|
}
|
2016-08-04 22:34:24 +00:00
|
|
|
|
2017-01-24 03:45:59 +00:00
|
|
|
// The handler may have been deleted by another thread here if it is a native
|
|
|
|
// message handler.
|
|
|
|
|
2016-08-04 22:34:24 +00:00
|
|
|
// Message handlers either use delete_me or end_callback but not both.
|
2017-01-24 03:45:59 +00:00
|
|
|
ASSERT(!delete_me || !run_end_callback);
|
2016-08-04 22:34:24 +00:00
|
|
|
|
2017-01-24 03:45:59 +00:00
|
|
|
if (run_end_callback) {
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(end_callback != nullptr);
|
2017-01-24 16:59:34 +00:00
|
|
|
end_callback(callback_data);
|
2012-04-19 19:47:27 +00:00
|
|
|
// The handler may have been deleted after this point.
|
|
|
|
}
|
2016-08-04 22:34:24 +00:00
|
|
|
if (delete_me) {
|
|
|
|
delete this;
|
|
|
|
}
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void MessageHandler::ClosePort(Dart_Port port) {
|
|
|
|
if (FLAG_trace_isolates) {
|
2023-08-08 10:57:47 +00:00
|
|
|
MonitorLocker ml(&monitor_);
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2016-11-08 21:54:47 +00:00
|
|
|
"[-] Closing port:\n"
|
|
|
|
"\thandler: %s\n"
|
2023-08-08 10:57:47 +00:00
|
|
|
"\tport: %" Pd64 "\n",
|
|
|
|
name(), port);
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void MessageHandler::CloseAllPorts() {
|
|
|
|
MonitorLocker ml(&monitor_);
|
|
|
|
if (FLAG_trace_isolates) {
|
2018-06-13 19:51:40 +00:00
|
|
|
OS::PrintErr(
|
2016-11-08 21:54:47 +00:00
|
|
|
"[-] Closing all ports:\n"
|
|
|
|
"\thandler: %s\n",
|
|
|
|
name());
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
2012-12-20 20:03:07 +00:00
|
|
|
queue_->Clear();
|
|
|
|
oob_queue_->Clear();
|
2012-04-19 19:47:27 +00:00
|
|
|
}
|
|
|
|
|
2016-08-04 22:34:24 +00:00
|
|
|
void MessageHandler::RequestDeletion() {
|
|
|
|
{
|
|
|
|
MonitorLocker ml(&monitor_);
|
2019-06-18 00:14:28 +00:00
|
|
|
if (task_running_) {
|
2016-08-04 22:34:24 +00:00
|
|
|
// This message handler currently has a task running on the thread pool.
|
|
|
|
delete_me_ = true;
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// This message handler has no current task. Delete it.
|
|
|
|
delete this;
|
|
|
|
}
|
|
|
|
|
2017-07-19 16:15:48 +00:00
|
|
|
#if !defined(PRODUCT)
|
|
|
|
void MessageHandler::DebugDump() {
|
|
|
|
PortMap::DebugDumpForMessageHandler(this);
|
|
|
|
}
|
|
|
|
|
2016-02-03 23:33:40 +00:00
|
|
|
void MessageHandler::PausedOnStart(bool paused) {
|
|
|
|
MonitorLocker ml(&monitor_);
|
2016-03-02 17:20:40 +00:00
|
|
|
PausedOnStartLocked(&ml, paused);
|
2016-02-03 23:33:40 +00:00
|
|
|
}
|
|
|
|
|
2016-03-02 17:20:40 +00:00
|
|
|
void MessageHandler::PausedOnStartLocked(MonitorLocker* ml, bool paused) {
|
2016-02-03 23:33:40 +00:00
|
|
|
if (paused) {
|
|
|
|
ASSERT(!is_paused_on_start_);
|
2017-08-31 00:09:31 +00:00
|
|
|
ASSERT(paused_timestamp_ == -1);
|
2016-02-03 23:33:40 +00:00
|
|
|
paused_timestamp_ = OS::GetCurrentTimeMillis();
|
|
|
|
// Temporarily release the monitor when calling out to
|
|
|
|
// NotifyPauseOnStart. This avoids a dead lock that can occur
|
|
|
|
// when this message handler tries to post a message while a
|
|
|
|
// message is being posted to it.
|
2016-03-02 17:20:40 +00:00
|
|
|
ml->Exit();
|
2016-02-03 23:33:40 +00:00
|
|
|
NotifyPauseOnStart();
|
2016-03-02 17:20:40 +00:00
|
|
|
ml->Enter();
|
2017-08-31 00:09:31 +00:00
|
|
|
is_paused_on_start_ = true;
|
2016-02-03 23:33:40 +00:00
|
|
|
} else {
|
2017-08-31 00:09:31 +00:00
|
|
|
ASSERT(is_paused_on_start_);
|
|
|
|
ASSERT(paused_timestamp_ != -1);
|
|
|
|
paused_timestamp_ = -1;
|
2016-02-03 23:33:40 +00:00
|
|
|
// Resumed. Clear the resume request of the owning isolate.
|
|
|
|
Isolate* owning_isolate = isolate();
|
2023-04-10 18:15:12 +00:00
|
|
|
if (owning_isolate != nullptr) {
|
2016-02-03 23:33:40 +00:00
|
|
|
owning_isolate->GetAndClearResumeRequest();
|
|
|
|
}
|
2017-08-31 00:09:31 +00:00
|
|
|
is_paused_on_start_ = false;
|
2016-02-03 23:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void MessageHandler::PausedOnExit(bool paused) {
|
|
|
|
MonitorLocker ml(&monitor_);
|
2016-03-02 17:20:40 +00:00
|
|
|
PausedOnExitLocked(&ml, paused);
|
2016-02-03 23:33:40 +00:00
|
|
|
}
|
|
|
|
|
2016-03-02 17:20:40 +00:00
|
|
|
void MessageHandler::PausedOnExitLocked(MonitorLocker* ml, bool paused) {
|
2016-02-03 23:33:40 +00:00
|
|
|
if (paused) {
|
|
|
|
ASSERT(!is_paused_on_exit_);
|
2017-08-31 00:09:31 +00:00
|
|
|
ASSERT(paused_timestamp_ == -1);
|
2016-02-03 23:33:40 +00:00
|
|
|
paused_timestamp_ = OS::GetCurrentTimeMillis();
|
|
|
|
// Temporarily release the monitor when calling out to
|
|
|
|
// NotifyPauseOnExit. This avoids a dead lock that can
|
|
|
|
// occur when this message handler tries to post a message
|
|
|
|
// while a message is being posted to it.
|
2016-03-02 17:20:40 +00:00
|
|
|
ml->Exit();
|
2016-02-03 23:33:40 +00:00
|
|
|
NotifyPauseOnExit();
|
2016-03-02 17:20:40 +00:00
|
|
|
ml->Enter();
|
2017-08-31 00:09:31 +00:00
|
|
|
is_paused_on_exit_ = true;
|
2016-02-03 23:33:40 +00:00
|
|
|
} else {
|
2017-08-31 00:09:31 +00:00
|
|
|
ASSERT(is_paused_on_exit_);
|
|
|
|
ASSERT(paused_timestamp_ != -1);
|
|
|
|
paused_timestamp_ = -1;
|
2016-02-03 23:33:40 +00:00
|
|
|
// Resumed. Clear the resume request of the owning isolate.
|
|
|
|
Isolate* owning_isolate = isolate();
|
2023-04-10 18:15:12 +00:00
|
|
|
if (owning_isolate != nullptr) {
|
2016-02-03 23:33:40 +00:00
|
|
|
owning_isolate->GetAndClearResumeRequest();
|
|
|
|
}
|
2017-08-31 00:09:31 +00:00
|
|
|
is_paused_on_exit_ = false;
|
2016-02-03 23:33:40 +00:00
|
|
|
}
|
|
|
|
}
|
2017-07-19 16:15:48 +00:00
|
|
|
#endif // !defined(PRODUCT)
|
2016-02-03 23:33:40 +00:00
|
|
|
|
2016-03-02 17:20:40 +00:00
|
|
|
MessageHandler::AcquiredQueues::AcquiredQueues(MessageHandler* handler)
|
|
|
|
: handler_(handler), ml_(&handler->monitor_) {
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(handler != nullptr);
|
2016-03-02 17:20:40 +00:00
|
|
|
handler_->oob_message_handling_allowed_ = false;
|
2015-05-04 20:20:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
MessageHandler::AcquiredQueues::~AcquiredQueues() {
|
2023-04-10 18:15:12 +00:00
|
|
|
ASSERT(handler_ != nullptr);
|
2016-03-02 17:20:40 +00:00
|
|
|
handler_->oob_message_handling_allowed_ = true;
|
2015-05-04 20:20:44 +00:00
|
|
|
}
|
|
|
|
|
2012-04-19 19:47:27 +00:00
|
|
|
} // namespace dart
|