2016-05-17 19:19:06 +00:00
|
|
|
// Copyright (c) 2016, the Dart project authors. Please see the AUTHORS file
|
|
|
|
// for details. All rights reserved. Use of this source code is governed by a
|
|
|
|
// BSD-style license that can be found in the LICENSE file.
|
|
|
|
|
2016-10-26 07:26:03 +00:00
|
|
|
#ifndef RUNTIME_VM_ISOLATE_RELOAD_H_
|
|
|
|
#define RUNTIME_VM_ISOLATE_RELOAD_H_
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
#include <functional>
|
|
|
|
#include <memory>
|
|
|
|
|
2016-07-29 18:23:18 +00:00
|
|
|
#include "include/dart_tools_api.h"
|
|
|
|
|
2016-05-17 19:19:06 +00:00
|
|
|
#include "vm/globals.h"
|
|
|
|
#include "vm/growable_array.h"
|
2017-04-19 17:21:53 +00:00
|
|
|
#include "vm/hash_map.h"
|
2021-09-28 18:30:52 +00:00
|
|
|
#include "vm/heap/become.h"
|
2016-05-17 19:19:06 +00:00
|
|
|
#include "vm/log.h"
|
2017-02-23 20:40:48 +00:00
|
|
|
#include "vm/object.h"
|
2016-05-17 19:19:06 +00:00
|
|
|
|
|
|
|
DECLARE_FLAG(bool, trace_reload);
|
2016-07-15 17:52:20 +00:00
|
|
|
DECLARE_FLAG(bool, trace_reload_verbose);
|
2016-05-17 19:19:06 +00:00
|
|
|
|
|
|
|
// 'Trace Isolate Reload' TIR_Print
|
|
|
|
#if defined(_MSC_VER)
|
2016-11-08 21:54:47 +00:00
|
|
|
#define TIR_Print(format, ...) \
|
|
|
|
if (FLAG_trace_reload) Log::Current()->Print(format, __VA_ARGS__)
|
2016-05-17 19:19:06 +00:00
|
|
|
#else
|
2016-11-08 21:54:47 +00:00
|
|
|
#define TIR_Print(format, ...) \
|
|
|
|
if (FLAG_trace_reload) Log::Current()->Print(format, ##__VA_ARGS__)
|
2016-05-17 19:19:06 +00:00
|
|
|
#endif
|
|
|
|
|
2016-07-15 17:52:20 +00:00
|
|
|
// 'Verbose Trace Isolate Reload' VTIR_Print
|
|
|
|
#if defined(_MSC_VER)
|
2016-11-08 21:54:47 +00:00
|
|
|
#define VTIR_Print(format, ...) \
|
|
|
|
if (FLAG_trace_reload_verbose) Log::Current()->Print(format, __VA_ARGS__)
|
2016-07-15 17:52:20 +00:00
|
|
|
#else
|
2016-11-08 21:54:47 +00:00
|
|
|
#define VTIR_Print(format, ...) \
|
|
|
|
if (FLAG_trace_reload_verbose) Log::Current()->Print(format, ##__VA_ARGS__)
|
2016-07-15 17:52:20 +00:00
|
|
|
#endif
|
|
|
|
|
2017-08-24 16:25:53 +00:00
|
|
|
#if !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
|
2016-05-17 19:19:06 +00:00
|
|
|
namespace dart {
|
|
|
|
|
2016-07-29 18:23:18 +00:00
|
|
|
class BitVector;
|
2016-05-17 19:19:06 +00:00
|
|
|
class GrowableObjectArray;
|
|
|
|
class Isolate;
|
|
|
|
class Library;
|
2019-11-15 14:08:45 +00:00
|
|
|
class ObjectLocator;
|
2016-07-29 18:23:18 +00:00
|
|
|
class ObjectPointerVisitor;
|
|
|
|
class ObjectStore;
|
|
|
|
class Script;
|
2016-05-17 19:19:06 +00:00
|
|
|
class UpdateClassesVisitor;
|
|
|
|
|
2016-07-26 18:13:28 +00:00
|
|
|
class InstanceMorpher : public ZoneAllocated {
|
|
|
|
public:
|
2019-11-15 14:08:45 +00:00
|
|
|
// Creates a new [InstanceMorpher] based on the [from]/[to] class
|
|
|
|
// descriptions.
|
|
|
|
static InstanceMorpher* CreateFromClassDescriptors(
|
|
|
|
Zone* zone,
|
|
|
|
SharedClassTable* shared_class_table,
|
|
|
|
const Class& from,
|
|
|
|
const Class& to);
|
|
|
|
|
|
|
|
InstanceMorpher(Zone* zone,
|
|
|
|
classid_t cid,
|
|
|
|
SharedClassTable* shared_class_table,
|
|
|
|
ZoneGrowableArray<intptr_t>* mapping,
|
|
|
|
ZoneGrowableArray<intptr_t>* new_fields_offsets);
|
2016-07-26 18:13:28 +00:00
|
|
|
virtual ~InstanceMorpher() {}
|
|
|
|
|
|
|
|
// Adds an object to be morphed.
|
2020-04-25 05:21:27 +00:00
|
|
|
void AddObject(ObjectPtr object);
|
2016-07-26 18:13:28 +00:00
|
|
|
|
|
|
|
// Create the morphed objects based on the before() list.
|
2021-09-28 18:30:52 +00:00
|
|
|
void CreateMorphedCopies(Become* become);
|
2016-07-26 18:13:28 +00:00
|
|
|
|
2016-07-28 18:07:15 +00:00
|
|
|
// Append the morper info to JSON array.
|
|
|
|
void AppendTo(JSONArray* array);
|
|
|
|
|
2016-07-26 18:13:28 +00:00
|
|
|
// Returns the cid associated with the from_ and to_ class.
|
|
|
|
intptr_t cid() const { return cid_; }
|
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
// Dumps the field mappings for the [cid()] class.
|
|
|
|
void Dump() const;
|
|
|
|
|
2016-07-26 18:13:28 +00:00
|
|
|
private:
|
2019-11-15 14:08:45 +00:00
|
|
|
Zone* zone_;
|
|
|
|
classid_t cid_;
|
|
|
|
SharedClassTable* shared_class_table_;
|
|
|
|
ZoneGrowableArray<intptr_t>* mapping_;
|
|
|
|
ZoneGrowableArray<intptr_t>* new_fields_offsets_;
|
|
|
|
|
|
|
|
GrowableArray<const Instance*> before_;
|
2016-07-26 18:13:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class ReasonForCancelling : public ZoneAllocated {
|
|
|
|
public:
|
2016-08-02 16:36:46 +00:00
|
|
|
explicit ReasonForCancelling(Zone* zone) {}
|
2016-07-26 18:13:28 +00:00
|
|
|
virtual ~ReasonForCancelling() {}
|
|
|
|
|
|
|
|
// Reports a reason for cancelling reload.
|
2019-11-15 14:08:45 +00:00
|
|
|
void Report(IsolateGroupReloadContext* context);
|
2016-07-26 18:13:28 +00:00
|
|
|
|
|
|
|
// Conversion to a VM error object.
|
|
|
|
// Default implementation calls ToString.
|
2020-04-25 05:21:27 +00:00
|
|
|
virtual ErrorPtr ToError();
|
2016-07-26 18:13:28 +00:00
|
|
|
|
|
|
|
// Conversion to a string object.
|
|
|
|
// Default implementation calls ToError.
|
2020-04-25 05:21:27 +00:00
|
|
|
virtual StringPtr ToString();
|
2016-07-26 18:13:28 +00:00
|
|
|
|
2016-07-28 18:07:15 +00:00
|
|
|
// Append the reason to JSON array.
|
|
|
|
virtual void AppendTo(JSONArray* array);
|
|
|
|
|
2016-07-26 18:13:28 +00:00
|
|
|
// Concrete subclasses must override either ToError or ToString.
|
|
|
|
};
|
|
|
|
|
|
|
|
// Abstract class for also capturing the from_ and to_ class.
|
|
|
|
class ClassReasonForCancelling : public ReasonForCancelling {
|
|
|
|
public:
|
2016-08-16 20:01:10 +00:00
|
|
|
ClassReasonForCancelling(Zone* zone, const Class& from, const Class& to);
|
2016-07-28 18:07:15 +00:00
|
|
|
void AppendTo(JSONArray* array);
|
|
|
|
|
2016-07-26 18:13:28 +00:00
|
|
|
protected:
|
|
|
|
const Class& from_;
|
|
|
|
const Class& to_;
|
|
|
|
};
|
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
class IsolateGroupReloadContext {
|
2016-05-17 19:19:06 +00:00
|
|
|
public:
|
2019-11-15 14:08:45 +00:00
|
|
|
IsolateGroupReloadContext(IsolateGroup* isolate,
|
|
|
|
SharedClassTable* shared_class_table,
|
|
|
|
JSONStream* js);
|
|
|
|
~IsolateGroupReloadContext();
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2018-09-27 21:47:39 +00:00
|
|
|
// If kernel_buffer is provided, the VM takes ownership when Reload is called.
|
2019-11-15 14:08:45 +00:00
|
|
|
bool Reload(bool force_reload,
|
2016-11-17 19:29:12 +00:00
|
|
|
const char* root_script_url = NULL,
|
2018-09-26 14:10:53 +00:00
|
|
|
const char* packages_url = NULL,
|
|
|
|
const uint8_t* kernel_buffer = NULL,
|
|
|
|
intptr_t kernel_buffer_size = 0);
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2016-08-02 16:36:46 +00:00
|
|
|
// All zone allocated objects must be allocated from this zone.
|
|
|
|
Zone* zone() const { return zone_; }
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
bool UseSavedSizeTableForGC() const {
|
|
|
|
return saved_size_table_.load(std::memory_order_relaxed) != nullptr;
|
2019-10-21 16:26:39 +00:00
|
|
|
}
|
2019-09-10 12:48:16 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
IsolateGroup* isolate_group() const { return isolate_group_; }
|
|
|
|
bool reload_aborted() const { return HasReasonsForCancelling(); }
|
2016-07-29 18:23:18 +00:00
|
|
|
bool reload_skipped() const { return reload_skipped_; }
|
2020-04-25 05:21:27 +00:00
|
|
|
ErrorPtr error() const;
|
2019-11-15 14:08:45 +00:00
|
|
|
int64_t start_time_micros() const { return start_time_micros_; }
|
2016-07-29 18:23:18 +00:00
|
|
|
int64_t reload_timestamp() const { return reload_timestamp_; }
|
|
|
|
|
2016-08-02 16:36:46 +00:00
|
|
|
static Dart_FileModifiedCallback file_modified_callback() {
|
|
|
|
return file_modified_callback_;
|
|
|
|
}
|
|
|
|
static void SetFileModifiedCallback(Dart_FileModifiedCallback callback) {
|
|
|
|
file_modified_callback_ = callback;
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
2019-11-15 14:08:45 +00:00
|
|
|
intptr_t GetClassSizeForHeapWalkAt(classid_t cid);
|
2019-09-06 22:06:30 +00:00
|
|
|
void DiscardSavedClassTable(bool is_rollback);
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2016-07-26 18:13:28 +00:00
|
|
|
// Tells whether there are reasons for cancelling the reload.
|
|
|
|
bool HasReasonsForCancelling() const {
|
|
|
|
return !reasons_to_cancel_reload_.is_empty();
|
|
|
|
}
|
|
|
|
|
|
|
|
// Record problem for this reload.
|
|
|
|
void AddReasonForCancelling(ReasonForCancelling* reason);
|
|
|
|
|
2016-07-28 18:07:15 +00:00
|
|
|
// Reports all reasons for cancelling reload.
|
2016-07-26 18:13:28 +00:00
|
|
|
void ReportReasonsForCancelling();
|
|
|
|
|
2017-05-01 06:28:01 +00:00
|
|
|
// Reports the details of a reload operation.
|
2019-11-15 14:08:45 +00:00
|
|
|
void ReportOnJSON(JSONStream* stream, intptr_t final_library_count);
|
2016-07-28 18:07:15 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
// Ensures there is a instance morpher for [cid], if not it will use
|
|
|
|
// [instance_morpher]
|
|
|
|
void EnsureHasInstanceMorpherFor(classid_t cid,
|
|
|
|
InstanceMorpher* instance_morpher);
|
2016-07-26 18:13:28 +00:00
|
|
|
|
|
|
|
// Tells whether instance in the heap must be morphed.
|
2016-11-08 21:54:47 +00:00
|
|
|
bool HasInstanceMorphers() const { return !instance_morphers_.is_empty(); }
|
2016-07-26 18:13:28 +00:00
|
|
|
|
2016-08-02 23:11:30 +00:00
|
|
|
// Called by both FinalizeLoading and FinalizeFailedLoad.
|
2019-11-15 14:08:45 +00:00
|
|
|
void CommonFinalizeTail(intptr_t final_library_count);
|
2016-08-02 16:36:46 +00:00
|
|
|
|
|
|
|
// Report back through the observatory channels.
|
|
|
|
void ReportError(const Error& error);
|
|
|
|
void ReportSuccess();
|
2016-07-29 18:23:18 +00:00
|
|
|
|
2016-05-17 19:19:06 +00:00
|
|
|
void VisitObjectPointers(ObjectPointerVisitor* visitor);
|
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
void GetRootLibUrl(const char* root_script_url);
|
|
|
|
char* CompileToKernel(bool force_reload,
|
|
|
|
const char* packages_url,
|
|
|
|
const uint8_t** kernel_buffer,
|
|
|
|
intptr_t* kernel_buffer_size);
|
2020-04-30 07:52:24 +00:00
|
|
|
void BuildModifiedLibrariesClosure(BitVector* modified_libs);
|
2019-11-15 14:08:45 +00:00
|
|
|
void FindModifiedSources(bool force_reload,
|
2018-02-07 19:35:39 +00:00
|
|
|
Dart_SourceFile** modified_sources,
|
2018-08-03 11:03:08 +00:00
|
|
|
intptr_t* count,
|
|
|
|
const char* packages_url);
|
2019-11-15 14:08:45 +00:00
|
|
|
bool ScriptModifiedSince(const Script& script, int64_t since);
|
2016-07-29 18:23:18 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
void CheckpointSharedClassTable();
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2021-09-28 18:30:52 +00:00
|
|
|
void MorphInstancesPhase1Allocate(ObjectLocator* locator, Become* become);
|
|
|
|
void MorphInstancesPhase2Become(Become* become);
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
void ForEachIsolate(std::function<void(Isolate*)> callback);
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2016-08-02 16:36:46 +00:00
|
|
|
// The zone used for all reload related allocations.
|
|
|
|
Zone* zone_;
|
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
IsolateGroup* isolate_group_;
|
|
|
|
SharedClassTable* shared_class_table_;
|
2016-07-26 18:13:28 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
int64_t start_time_micros_ = -1;
|
|
|
|
int64_t reload_timestamp_ = -1;
|
|
|
|
bool reload_skipped_ = false;
|
|
|
|
bool reload_finalized_ = false;
|
|
|
|
JSONStream* js_;
|
|
|
|
intptr_t num_old_libs_ = -1;
|
2016-07-26 18:13:28 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
intptr_t saved_num_cids_ = -1;
|
|
|
|
std::atomic<intptr_t*> saved_size_table_;
|
|
|
|
intptr_t num_received_libs_ = -1;
|
|
|
|
intptr_t bytes_received_libs_ = -1;
|
|
|
|
intptr_t num_received_classes_ = -1;
|
|
|
|
intptr_t num_received_procedures_ = -1;
|
|
|
|
intptr_t num_saved_libs_ = -1;
|
2016-07-26 18:13:28 +00:00
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
// Required trait for the instance_morpher_by_cid_;
|
2016-07-26 18:13:28 +00:00
|
|
|
struct MorpherTrait {
|
|
|
|
typedef InstanceMorpher* Value;
|
|
|
|
typedef intptr_t Key;
|
|
|
|
typedef InstanceMorpher* Pair;
|
|
|
|
|
|
|
|
static Key KeyOf(Pair kv) { return kv->cid(); }
|
|
|
|
static Value ValueOf(Pair kv) { return kv; }
|
2021-04-14 10:04:48 +00:00
|
|
|
static uword Hash(Key key) { return Utils::WordHash(key); }
|
2016-07-26 18:13:28 +00:00
|
|
|
static bool IsKeyEqual(Pair kv, Key key) { return kv->cid() == key; }
|
|
|
|
};
|
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
// Collect the necessary instance transformation for schema changes.
|
|
|
|
GrowableArray<InstanceMorpher*> instance_morphers_;
|
|
|
|
|
|
|
|
// Collects the reasons for cancelling the reload.
|
|
|
|
GrowableArray<ReasonForCancelling*> reasons_to_cancel_reload_;
|
|
|
|
|
2016-07-26 18:13:28 +00:00
|
|
|
// Hash map from cid to InstanceMorpher.
|
2019-11-15 14:08:45 +00:00
|
|
|
DirectChainedHashMap<MorpherTrait> instance_morpher_by_cid_;
|
|
|
|
|
|
|
|
// A bit vector indicating which of the original libraries were modified.
|
|
|
|
BitVector* modified_libs_ = nullptr;
|
|
|
|
|
2020-04-30 07:52:24 +00:00
|
|
|
// A bit vector indicating which of the original libraries were modified,
|
|
|
|
// or where a transitive dependency was modified.
|
|
|
|
BitVector* modified_libs_transitive_ = nullptr;
|
|
|
|
|
|
|
|
// A bit vector indicating which of the saved libraries that transitively
|
|
|
|
// depend on a modified libary.
|
|
|
|
BitVector* saved_libs_transitive_updated_ = nullptr;
|
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
String& root_lib_url_;
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr* from() { return reinterpret_cast<ObjectPtr*>(&root_url_prefix_); }
|
|
|
|
StringPtr root_url_prefix_;
|
|
|
|
StringPtr old_root_url_prefix_;
|
|
|
|
ObjectPtr* to() {
|
|
|
|
return reinterpret_cast<ObjectPtr*>(&old_root_url_prefix_);
|
2019-11-15 14:08:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
friend class Isolate;
|
|
|
|
friend class Class; // AddStaticFieldMapping, AddEnumBecomeMapping.
|
|
|
|
friend class Library;
|
|
|
|
friend class ObjectLocator;
|
|
|
|
friend class MarkFunctionsForRecompilation; // IsDirty.
|
|
|
|
friend class ReasonForCancelling;
|
2021-01-16 15:42:53 +00:00
|
|
|
friend class ProgramReloadContext;
|
2020-02-20 21:08:35 +00:00
|
|
|
friend class IsolateGroup; // GetClassSizeForHeapWalkAt
|
2021-01-15 23:32:02 +00:00
|
|
|
friend class UntaggedObject; // GetClassSizeForHeapWalkAt
|
2019-11-15 14:08:45 +00:00
|
|
|
|
|
|
|
static Dart_FileModifiedCallback file_modified_callback_;
|
|
|
|
};
|
|
|
|
|
2021-01-16 15:42:53 +00:00
|
|
|
class ProgramReloadContext {
|
2019-11-15 14:08:45 +00:00
|
|
|
public:
|
2021-01-16 15:42:53 +00:00
|
|
|
ProgramReloadContext(
|
2019-11-15 14:08:45 +00:00
|
|
|
std::shared_ptr<IsolateGroupReloadContext> group_reload_context,
|
2021-02-12 05:48:15 +00:00
|
|
|
IsolateGroup* isolate_group);
|
2021-01-16 15:42:53 +00:00
|
|
|
~ProgramReloadContext();
|
2019-11-15 14:08:45 +00:00
|
|
|
|
|
|
|
// All zone allocated objects must be allocated from this zone.
|
|
|
|
Zone* zone() const { return zone_; }
|
|
|
|
|
|
|
|
IsolateGroupReloadContext* group_reload_context() {
|
|
|
|
return group_reload_context_.get();
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool IsSameLibrary(const Library& a_lib, const Library& b_lib);
|
|
|
|
static bool IsSameClass(const Class& a, const Class& b);
|
|
|
|
|
|
|
|
private:
|
|
|
|
bool IsDirty(const Library& lib);
|
|
|
|
|
|
|
|
// Prefers old classes when we are in the middle of a reload.
|
2020-04-25 05:21:27 +00:00
|
|
|
ClassPtr GetClassForHeapWalkAt(intptr_t cid);
|
2019-11-15 14:08:45 +00:00
|
|
|
void DiscardSavedClassTable(bool is_rollback);
|
|
|
|
|
|
|
|
void RegisterClass(const Class& new_cls);
|
|
|
|
|
|
|
|
// Finds the library private key for |replacement_or_new| or return null
|
|
|
|
// if |replacement_or_new| is new.
|
2020-04-25 05:21:27 +00:00
|
|
|
StringPtr FindLibraryPrivateKey(const Library& replacement_or_new);
|
2019-11-15 14:08:45 +00:00
|
|
|
|
|
|
|
void VisitObjectPointers(ObjectPointerVisitor* visitor);
|
|
|
|
|
2021-02-12 05:48:15 +00:00
|
|
|
IsolateGroup* isolate_group() { return isolate_group_; }
|
2019-11-15 14:08:45 +00:00
|
|
|
ObjectStore* object_store();
|
|
|
|
|
|
|
|
void EnsuredUnoptimizedCodeForStack();
|
|
|
|
void DeoptimizeDependentCode();
|
|
|
|
|
|
|
|
void ReloadPhase1AllocateStorageMapsAndCheckpoint();
|
|
|
|
void CheckpointClasses();
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr ReloadPhase2LoadKernel(kernel::Program* program,
|
|
|
|
const String& root_lib_url);
|
2019-11-15 14:08:45 +00:00
|
|
|
void ReloadPhase3FinalizeLoading();
|
|
|
|
void ReloadPhase4CommitPrepare();
|
|
|
|
void ReloadPhase4CommitFinish();
|
|
|
|
void ReloadPhase4Rollback();
|
|
|
|
|
|
|
|
void CheckpointLibraries();
|
|
|
|
|
|
|
|
void RollbackClasses();
|
|
|
|
void RollbackLibraries();
|
|
|
|
|
|
|
|
#ifdef DEBUG
|
|
|
|
void VerifyMaps();
|
|
|
|
#endif
|
|
|
|
|
|
|
|
void CommitBeforeInstanceMorphing();
|
|
|
|
void CommitAfterInstanceMorphing();
|
|
|
|
void PostCommit();
|
|
|
|
|
|
|
|
void RunInvalidationVisitors();
|
2019-11-15 22:55:26 +00:00
|
|
|
void InvalidateKernelInfos(
|
|
|
|
Zone* zone,
|
|
|
|
const GrowableArray<const KernelProgramInfo*>& kernel_infos);
|
|
|
|
void InvalidateFunctions(Zone* zone,
|
|
|
|
const GrowableArray<const Function*>& functions);
|
|
|
|
void InvalidateFields(Zone* zone,
|
|
|
|
const GrowableArray<const Field*>& fields,
|
|
|
|
const GrowableArray<const Instance*>& instances);
|
|
|
|
void ResetUnoptimizedICsOnStack();
|
|
|
|
void ResetMegamorphicCaches();
|
|
|
|
void InvalidateWorld();
|
2016-07-26 18:13:28 +00:00
|
|
|
|
2016-05-17 19:19:06 +00:00
|
|
|
struct LibraryInfo {
|
|
|
|
bool dirty;
|
|
|
|
};
|
|
|
|
|
2019-11-15 14:08:45 +00:00
|
|
|
// The zone used for all reload related allocations.
|
|
|
|
Zone* zone_;
|
|
|
|
std::shared_ptr<IsolateGroupReloadContext> group_reload_context_;
|
2021-02-12 05:48:15 +00:00
|
|
|
IsolateGroup* isolate_group_;
|
2019-11-15 14:08:45 +00:00
|
|
|
intptr_t saved_num_cids_ = -1;
|
2020-07-09 18:33:32 +00:00
|
|
|
intptr_t saved_num_tlc_cids_ = -1;
|
2020-04-25 05:21:27 +00:00
|
|
|
std::atomic<ClassPtr*> saved_class_table_;
|
2020-07-09 18:33:32 +00:00
|
|
|
std::atomic<ClassPtr*> saved_tlc_class_table_;
|
2019-11-15 14:08:45 +00:00
|
|
|
MallocGrowableArray<LibraryInfo> library_infos_;
|
2016-07-29 18:23:18 +00:00
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ClassPtr OldClassOrNull(const Class& replacement_or_new);
|
|
|
|
LibraryPtr OldLibraryOrNull(const Library& replacement_or_new);
|
|
|
|
LibraryPtr OldLibraryOrNullBaseMoved(const Library& replacement_or_new);
|
2016-11-17 19:29:12 +00:00
|
|
|
|
2016-05-17 19:19:06 +00:00
|
|
|
void BuildLibraryMapping();
|
2019-05-07 23:13:27 +00:00
|
|
|
void BuildRemovedClassesSet();
|
2019-11-15 14:08:45 +00:00
|
|
|
void ValidateReload();
|
2016-05-17 19:19:06 +00:00
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
void AddClassMapping(const Class& replacement_or_new, const Class& original);
|
2016-05-17 19:19:06 +00:00
|
|
|
void AddLibraryMapping(const Library& replacement_or_new,
|
|
|
|
const Library& original);
|
|
|
|
void AddStaticFieldMapping(const Field& old_field, const Field& new_field);
|
2016-07-15 17:52:20 +00:00
|
|
|
void AddBecomeMapping(const Object& old, const Object& neu);
|
2016-05-17 19:19:06 +00:00
|
|
|
void RebuildDirectSubclasses();
|
|
|
|
|
2021-09-28 18:30:52 +00:00
|
|
|
Become become_;
|
|
|
|
|
2020-04-25 05:21:27 +00:00
|
|
|
ObjectPtr* from() {
|
|
|
|
return reinterpret_cast<ObjectPtr*>(&old_classes_set_storage_);
|
2019-11-15 14:08:45 +00:00
|
|
|
}
|
2020-04-25 05:21:27 +00:00
|
|
|
ArrayPtr old_classes_set_storage_;
|
|
|
|
ArrayPtr class_map_storage_;
|
|
|
|
ArrayPtr removed_class_set_storage_;
|
|
|
|
ArrayPtr old_libraries_set_storage_;
|
|
|
|
ArrayPtr library_map_storage_;
|
|
|
|
LibraryPtr saved_root_library_;
|
|
|
|
GrowableObjectArrayPtr saved_libraries_;
|
|
|
|
ObjectPtr* to() { return reinterpret_cast<ObjectPtr*>(&saved_libraries_); }
|
2016-05-17 19:19:06 +00:00
|
|
|
|
|
|
|
friend class Isolate;
|
2021-02-12 05:48:15 +00:00
|
|
|
friend class IsolateGroup;
|
2016-07-15 17:52:20 +00:00
|
|
|
friend class Class; // AddStaticFieldMapping, AddEnumBecomeMapping.
|
2016-08-02 16:36:46 +00:00
|
|
|
friend class Library;
|
2016-07-26 18:13:28 +00:00
|
|
|
friend class ObjectLocator;
|
2016-08-02 16:36:46 +00:00
|
|
|
friend class MarkFunctionsForRecompilation; // IsDirty.
|
|
|
|
friend class ReasonForCancelling;
|
2019-11-15 14:08:45 +00:00
|
|
|
friend class IsolateGroupReloadContext;
|
2016-05-17 19:19:06 +00:00
|
|
|
};
|
|
|
|
|
2019-08-20 00:49:52 +00:00
|
|
|
class CallSiteResetter : public ValueObject {
|
|
|
|
public:
|
|
|
|
explicit CallSiteResetter(Zone* zone);
|
|
|
|
|
|
|
|
void ZeroEdgeCounters(const Function& function);
|
2019-08-22 21:43:43 +00:00
|
|
|
void ResetCaches(const Code& code);
|
|
|
|
void ResetCaches(const ObjectPool& pool);
|
2019-08-20 00:49:52 +00:00
|
|
|
void Reset(const ICData& ic);
|
|
|
|
void ResetSwitchableCalls(const Code& code);
|
|
|
|
|
|
|
|
private:
|
|
|
|
Zone* zone_;
|
|
|
|
Instructions& instrs_;
|
|
|
|
ObjectPool& pool_;
|
|
|
|
Object& object_;
|
|
|
|
String& name_;
|
|
|
|
Class& new_cls_;
|
|
|
|
Library& new_lib_;
|
|
|
|
Function& new_function_;
|
|
|
|
Field& new_field_;
|
|
|
|
Array& entries_;
|
|
|
|
Function& old_target_;
|
|
|
|
Function& new_target_;
|
|
|
|
Function& caller_;
|
|
|
|
Array& args_desc_array_;
|
|
|
|
Array& ic_data_array_;
|
|
|
|
Array& edge_counters_;
|
|
|
|
PcDescriptors& descriptors_;
|
|
|
|
ICData& ic_data_;
|
|
|
|
};
|
|
|
|
|
[vm/concurrency] Final support for hot-reload of multi-isolate groups
This is the initial implementation of hot reload with multi-isolate
groups.
Implementation:
As before, when a service API call triggers a reload it will be routed
as an OOB message to a specific isolate (**). As opposed to before, that
isolate has now to coordinate with all other isolates, ensuring that it
"owns" the reload and all other isolates are waiting in a state that
allows reload.
This is implemented as a [ReloadOperationScope] which first participates
in other reloads (if there are any) and then owns the reload. It will
send a new kind of service message to all other registered isolates. All
of them have to check in before reload can proceed. If a new isolate
is about to join the group, it will participate when registering the
isolate. If an old isolate wants to die, it will participate when
unregistering the isolate.
This means that in addition to the existing StackOverFlow checks that
can process OOB messages and therefore reload, we'll have isolate
registration and unregistration as well as a new
Isolate::kCheckForReload OOB message handler where an isolate can
participate in a reload.
We consider the isolate group to be reloadable if the main isolate has
loaded the program and set the root library. Helper isolates don't need
to load any more kernel code and only initialize core libraries, so it's
fine to reload them during this time.
(**) The reason we continue to send reload service API calls to any
isolate in an isolate group is that re-loading might involve calling out
to the embedder's tag handler. Doing so currently requires an active
isolate.
If we allowed a subset of dart_api.h (the subset needed by the tag
handler) to be used only with an active IsolateGroup instead of an
active Isolate we could remove this requirement.
Edge cases:
There's various edge cases to consider: The main edge case is, we currently
maintain an upper limit to the number of isolates executing in parallel
(to ensure each can have big enough chunk of new space, i.e. TLAB).
If there are more isolates with active work they are waiting until one
of the exiting ones "yields". To ensure progress, if any such actively
running isolate gets a request to participate in a reload, it will mark
its own thread as "blocked" and therefore "yields", so another isolate
can make progress until all isolates are participating and the reload
can start.
Marking an isolate as "blocked" happens by exiting that isolate. It will
free up it's TLAB, decrease active mutator count and (if running on VM's
thread pool) also temporarily increase the thread pool size.
The side-effect of this is that it will use one pthread per isolate
during reload. In the future we can extend this first implementation, by
specially handling isolates that don't have a message handler running.
Doing so would require careful consideration to avoid races.
Testing:
In order to test this we use a small helper framework for reload tests.
The helper framework will, similar to real world reload e.g. in flutter,
will spawn a subprocess. It will use the service API to trigger reloads
in this subproces.
To synchronize between the reload driver and the application being
reloaded it allows watching for events to be printed to stdout/stderr.
The reload test itself can be written - similar to multitests - with
annotations such as `// @include-in-relload-0` in them. The testing
framework will then generate multiple application versions that all get
compiled to kernel.
For simplicity we generate the kernel using the standalone VM with
`--snapshot-kind=kernel` and avoid using the incremental compiler.
There are 4 different tests exercising different aspects of
multi-isolate reload:
vm/dart_2/isolates/reload_active_stack_test:
Performs a reload while a fixed number of isolates have an active
stack, thereby ensuring e.g. that all frames of all isolate mutator
stacks get deoptimized, ...
vm/dart_2/isolates/reload_no_active_stack_test:
Similar to the test above, but instead of having an active stack the
isolates can yield to the event loop, possibly be even descheduled
vm/dart_2/isolates/reload_many_isolates_test:
Similar to the test above, but this test uses many more isolates.
vm/dart_2/isolates/reload_many_isolates_live_and_die_test:
Performs a reload where isolates get spawned and die all the time.
There are always P isolates alive at any given point in time, each
of them spawns children when their parent has died.
Performing a reload catches isolates as various stages of their
lifecycle and can therefore cover a lot of corner cases.
TEST=vm/dart_2/isolates/reload_*_test.dart
Issue https://github.com/dart-lang/sdk/issues/36097
Change-Id: I97039b4084de040b7f2e22f5832a40d57ba398d5
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/187461
Commit-Queue: Martin Kustermann <kustermann@google.com>
Reviewed-by: Alexander Aprelev <aam@google.com>
2021-03-02 18:57:02 +00:00
|
|
|
class ReloadHandler {
|
|
|
|
public:
|
|
|
|
ReloadHandler() {}
|
|
|
|
~ReloadHandler() {}
|
|
|
|
|
|
|
|
void RegisterIsolate();
|
|
|
|
void UnregisterIsolate();
|
|
|
|
void CheckForReload();
|
|
|
|
|
|
|
|
private:
|
|
|
|
friend class ReloadOperationScope;
|
|
|
|
|
|
|
|
void PauseIsolatesForReloadLocked();
|
|
|
|
void ResumeIsolatesLocked();
|
|
|
|
void ParticipateIfReloadRequested(SafepointMonitorLocker* ml,
|
|
|
|
bool is_registered,
|
|
|
|
bool allow_later_retry);
|
|
|
|
|
|
|
|
intptr_t registered_isolate_count_ = 0;
|
|
|
|
|
|
|
|
Monitor monitor_;
|
|
|
|
Thread* reloading_thread_ = nullptr;
|
|
|
|
|
|
|
|
Monitor checkin_monitor_;
|
|
|
|
intptr_t isolates_checked_in_ = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
class ReloadOperationScope : public ThreadStackResource {
|
|
|
|
public:
|
|
|
|
explicit ReloadOperationScope(Thread* thread)
|
|
|
|
: ThreadStackResource(thread), isolate_group_(thread->isolate_group()) {
|
|
|
|
isolate_group_->reload_handler()->PauseIsolatesForReloadLocked();
|
|
|
|
}
|
|
|
|
|
|
|
|
~ReloadOperationScope() {
|
|
|
|
isolate_group_->reload_handler()->ResumeIsolatesLocked();
|
|
|
|
}
|
|
|
|
|
|
|
|
private:
|
|
|
|
IsolateGroup* isolate_group_;
|
|
|
|
};
|
|
|
|
|
2016-05-17 19:19:06 +00:00
|
|
|
} // namespace dart
|
|
|
|
|
2017-08-24 16:25:53 +00:00
|
|
|
#endif // !defined(PRODUCT) && !defined(DART_PRECOMPILED_RUNTIME)
|
|
|
|
|
2016-11-08 21:54:47 +00:00
|
|
|
#endif // RUNTIME_VM_ISOLATE_RELOAD_H_
|