[vm/bytecode] Bootstrapping VM from bytecode

Previously, core snapshot was generated from AST
(because --enable-interpreter/--use-bytecode-compiler was not specified
when building core snapshot).

As the result, CL
https://dart.googlesource.com/sdk/+/da8cb470cc94830a98d49532e8d5d1a5b3d80f8b
which declared libraries in bytecode also removed bytecode entirely from
core snapshot in Dart SDK.

This CL enables bytecode by default if --bytecode argument is
specified for gn.py. This enables JIT compiler from bytecode
(interpreter is still disabled by default but can be enabled
using --enable-interpreter). Core snapshot and other
snapshots now have bytecode.

This change revealed a bunch of bugs which are fixed in this CL:

* _Closure fields were treated as unboxing candidates which triggered
  assertion in LoadFieldTOS in interpreter.

* Several places should load class declarations if they are not loaded yet.

* Canonicalization of TypeRef objects which are not fully initialized
  may cause duplicate entries in the hash table of canonical
  TypeArguments. This triggers assertions when hash table is rehashed.
  The solution is to avoid canonicalization of non-root recursive types
  and recursive type arguments. Also, TypeRef::Canonicalize and
  TypeRef::Hash are reverted to assert and work only if type was set.

* Native wrapper classes are eagerly stamped as type-finalized
  which caused assertion failures when reading their class declarations
  from bytecode.

* When building flow graph for FFI trampolines kernel offset of library
  (which is now declared in bytecode) was queried. Added special case
  to Function::KernelDataProgramOffset().

* In interpreter-only mode with simulator (e.g. SIMARM64) if simulator
  is not called before code is interrupted with stack overflow check,
  simulator returns get_sp() = 0, which was treated as stack overflow.

* test standalone_2/io/platform_resolved_executable_test.dart
  spawns sub-process but it didn't pass VM options.

Change-Id: I81bc4f1a4c6725cfa246a435ebe5d8abe43abc67
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/107199
Commit-Queue: Alexander Markov <alexmarkov@google.com>
Reviewed-by: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Régis Crelier <regis@google.com>
This commit is contained in:
Alexander Markov 2019-06-26 18:25:26 +00:00 committed by commit-bot@chromium.org
parent c1c24c2e19
commit 7b50b7a8e6
11 changed files with 161 additions and 59 deletions

View file

@ -146,6 +146,10 @@ config("dart_config") {
include_dirs += [ "../third_party/tcmalloc/gperftools/src" ]
}
if (dart_platform_bytecode) {
defines += [ "DART_USE_BYTECODE" ]
}
if (is_fuchsia) {
if (using_fuchsia_sdk) {
# TODO(chinmaygarde): Currenty these targets need to be build in the

View file

@ -4,6 +4,8 @@
#include "vm/bootstrap.h"
#include <memory>
#include "include/dart_api.h"
#include "vm/class_finalizer.h"
@ -50,11 +52,18 @@ static void Finish(Thread* thread) {
Class& cls = Class::Handle(zone, object_store->closure_class());
ClassFinalizer::LoadClassMembers(cls);
// Make sure _Closure fields are not marked as unboxing candidates
// as they are accessed with plain loads.
const Array& fields = Array::Handle(zone, cls.fields());
Field& field = Field::Handle(zone);
for (intptr_t i = 0; i < fields.Length(); ++i) {
field ^= fields.At(i);
field.set_is_unboxing_candidate(false);
}
#if defined(DEBUG)
// Verify that closure field offsets are identical in Dart and C++.
const Array& fields = Array::Handle(zone, cls.fields());
ASSERT(fields.Length() == 6);
Field& field = Field::Handle(zone);
field ^= fields.At(0);
ASSERT(field.Offset() == Closure::instantiator_type_arguments_offset());
field ^= fields.At(1);

View file

@ -195,14 +195,19 @@ bool ClassFinalizer::ProcessPendingClasses() {
#if defined(DEBUG)
for (intptr_t i = 0; i < class_array.Length(); i++) {
cls ^= class_array.At(i);
ASSERT(cls.is_declaration_loaded());
ASSERT(cls.is_declared_in_bytecode() || cls.is_declaration_loaded());
}
#endif
// Finalize types in all classes.
for (intptr_t i = 0; i < class_array.Length(); i++) {
cls ^= class_array.At(i);
FinalizeTypesInClass(cls);
if (cls.is_declared_in_bytecode()) {
cls.EnsureDeclarationLoaded();
ASSERT(cls.is_type_finalized());
} else {
FinalizeTypesInClass(cls);
}
}
if (FLAG_print_classes) {
@ -407,6 +412,7 @@ intptr_t ClassFinalizer::ExpandAndFinalizeTypeArguments(
// The type class does not need to be finalized in order to finalize the type.
// Also, the type parameters of the type class must be finalized.
Class& type_class = Class::Handle(zone, type.type_class());
type_class.EnsureDeclarationLoaded();
if (!type_class.is_type_finalized()) {
FinalizeTypeParameters(type_class, pending_types);
}
@ -1003,7 +1009,7 @@ static void MarkImplemented(Zone* zone, const Class& iface) {
void ClassFinalizer::FinalizeTypesInClass(const Class& cls) {
Thread* thread = Thread::Current();
HANDLESCOPE(thread);
ASSERT(cls.is_declaration_loaded());
cls.EnsureDeclarationLoaded();
if (cls.is_type_finalized()) {
return;
}
@ -1190,12 +1196,7 @@ RawError* ClassFinalizer::LoadClassMembers(const Class& cls) {
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
#if !defined(DART_PRECOMPILED_RUNTIME)
if (!cls.is_declaration_loaded()) {
// Loading of class declaration can be postponed until needed
// if class comes from bytecode.
ASSERT(cls.is_declared_in_bytecode());
kernel::BytecodeReader::LoadClassDeclaration(cls);
}
cls.EnsureDeclarationLoaded();
#endif
ASSERT(cls.is_type_finalized());
ClassFinalizer::FinalizeClass(cls);

View file

@ -137,7 +137,7 @@ void BytecodeMetadataHelper::ReadLibrary(const Library& library) {
&Array::Handle(helper_->zone_, GetBytecodeComponent()));
BytecodeReaderHelper bytecode_reader(&H, active_class_, &bytecode_component);
AlternativeReadingScope alt(&bytecode_reader.reader(),
library.bytecode_offset());
bytecode_component.GetLibraryIndexOffset());
bytecode_reader.FindAndReadSpecificLibrary(
library, bytecode_component.GetNumLibraries());
}
@ -527,6 +527,35 @@ void BytecodeReaderHelper::ReadClosureDeclaration(const Function& function,
closure.SetSignatureType(signature_type);
}
static bool IsNonCanonical(const AbstractType& type) {
return type.IsTypeRef() || (type.IsType() && !type.IsCanonical());
}
static bool HasNonCanonicalTypes(Zone* zone, const Function& func) {
auto& type = AbstractType::Handle(zone);
for (intptr_t i = 0; i < func.NumParameters(); ++i) {
type = func.ParameterTypeAt(i);
if (IsNonCanonical(type)) {
return true;
}
}
type = func.result_type();
if (IsNonCanonical(type)) {
return true;
}
const auto& type_params = TypeArguments::Handle(zone, func.type_parameters());
if (!type_params.IsNull()) {
for (intptr_t i = 0; i < type_params.Length(); ++i) {
type = type_params.TypeAt(i);
type = TypeParameter::Cast(type).bound();
if (IsNonCanonical(type)) {
return true;
}
}
}
return false;
}
RawType* BytecodeReaderHelper::ReadFunctionSignature(
const Function& func,
bool has_optional_positional_params,
@ -581,7 +610,14 @@ RawType* BytecodeReaderHelper::ReadFunctionSignature(
// Finalize function type.
type = func.SignatureType();
type = ClassFinalizer::FinalizeType(*(active_class_->klass), type);
ClassFinalizer::FinalizationKind finalization = ClassFinalizer::kCanonicalize;
if (pending_recursive_types_ != nullptr && HasNonCanonicalTypes(Z, func)) {
// This function type is a part of recursive type. Avoid canonicalization
// as not all TypeRef objects are filled up at this point.
finalization = ClassFinalizer::kFinalize;
}
type =
ClassFinalizer::FinalizeType(*(active_class_->klass), type, finalization);
return Type::Cast(type).raw();
}
@ -1244,7 +1280,7 @@ RawObject* BytecodeReaderHelper::ReadObjectContents(uint32_t header) {
}
return cls;
}
RawClass* cls = library.LookupClassAllowPrivate(class_name);
RawClass* cls = library.LookupLocalClass(class_name);
NoSafepointScope no_safepoint_scope(thread_);
if (cls == Class::null()) {
FATAL2("Unable to find class %s in %s", class_name.ToCString(),
@ -1581,10 +1617,7 @@ RawObject* BytecodeReaderHelper::ReadType(intptr_t tag) {
return AbstractType::void_type().raw();
case kSimpleType: {
const Class& cls = Class::CheckedHandle(Z, ReadObject());
if (!cls.is_declaration_loaded()) {
ASSERT(cls.is_declared_in_bytecode());
BytecodeReader::LoadClassDeclaration(cls);
}
cls.EnsureDeclarationLoaded();
return cls.DeclarationType();
}
case kTypeParameter: {
@ -1615,10 +1648,7 @@ RawObject* BytecodeReaderHelper::ReadType(intptr_t tag) {
}
case kGenericType: {
const Class& cls = Class::CheckedHandle(Z, ReadObject());
if (!cls.is_declaration_loaded()) {
ASSERT(cls.is_declared_in_bytecode());
BytecodeReader::LoadClassDeclaration(cls);
}
cls.EnsureDeclarationLoaded();
const TypeArguments& type_arguments =
TypeArguments::CheckedHandle(Z, ReadObject());
const Type& type = Type::Handle(
@ -1629,10 +1659,7 @@ RawObject* BytecodeReaderHelper::ReadType(intptr_t tag) {
case kRecursiveGenericType: {
const intptr_t id = reader_.ReadUInt();
const Class& cls = Class::CheckedHandle(Z, ReadObject());
if (!cls.is_declaration_loaded()) {
ASSERT(cls.is_declared_in_bytecode());
BytecodeReader::LoadClassDeclaration(cls);
}
cls.EnsureDeclarationLoaded();
const auto saved_pending_recursive_types = pending_recursive_types_;
if (id == 0) {
pending_recursive_types_ = &GrowableObjectArray::Handle(
@ -1643,8 +1670,10 @@ RawObject* BytecodeReaderHelper::ReadType(intptr_t tag) {
TypeRef::Handle(Z, TypeRef::New(AbstractType::null_abstract_type()));
pending_recursive_types_->Add(type_ref);
reading_type_arguments_of_recursive_type_ = true;
const TypeArguments& type_arguments =
TypeArguments::CheckedHandle(Z, ReadObject());
reading_type_arguments_of_recursive_type_ = false;
ASSERT(id == pending_recursive_types_->Length() - 1);
ASSERT(pending_recursive_types_->At(id) == type_ref.raw());
@ -1655,6 +1684,11 @@ RawObject* BytecodeReaderHelper::ReadType(intptr_t tag) {
Z, Type::New(cls, type_arguments, TokenPosition::kNoSource));
type_ref.set_type(type);
type.SetIsFinalized();
if (id != 0) {
// Do not canonicalize non-root recursive types
// as not all TypeRef objects are filled up at this point.
return type.raw();
}
return type.Canonicalize();
}
case kRecursiveTypeRef: {
@ -1770,6 +1804,8 @@ RawScript* BytecodeReaderHelper::ReadSourceFile(const String& uri,
}
RawTypeArguments* BytecodeReaderHelper::ReadTypeArguments() {
const bool is_recursive = reading_type_arguments_of_recursive_type_;
reading_type_arguments_of_recursive_type_ = false;
const intptr_t length = reader_.ReadUInt();
TypeArguments& type_arguments =
TypeArguments::ZoneHandle(Z, TypeArguments::New(length));
@ -1778,6 +1814,14 @@ RawTypeArguments* BytecodeReaderHelper::ReadTypeArguments() {
type ^= ReadObject();
type_arguments.SetTypeAt(i, type);
}
if (is_recursive) {
// Avoid canonicalization of type arguments of recursive type
// as not all TypeRef objects are filled up at this point.
// Type arguments will be canoncialized when the root recursive
// type is canonicalized.
ASSERT(pending_recursive_types_ != nullptr);
return type_arguments.raw();
}
return type_arguments.Canonicalize();
}
@ -2221,13 +2265,17 @@ void BytecodeReaderHelper::ReadClassDeclaration(const Class& cls) {
// Its cid is set in Class::New / Isolate::RegisterClass /
// ClassTable::Register, unless it was loaded for expression evaluation.
ASSERT(cls.is_declared_in_bytecode());
ASSERT(!cls.is_declaration_loaded());
ASSERT(!cls.is_declaration_loaded() || loading_native_wrappers_library_);
const intptr_t flags = reader_.ReadUInt();
const bool has_pragma = (flags & kHasPragmaFlag) != 0;
// Set early to enable access to type_parameters().
cls.set_is_declaration_loaded();
// TODO(alexmarkov): revise early stamping of native wrapper classes
// as loaded.
if (!cls.is_declaration_loaded()) {
cls.set_is_declaration_loaded();
}
const auto& script = Script::CheckedHandle(Z, ReadObject());
cls.set_script(script);
@ -2294,13 +2342,6 @@ void BytecodeReaderHelper::ReadClassDeclaration(const Class& cls) {
library.AddClassMetadata(cls, top_level_class, TokenPosition::kNoSource,
0, annotations_offset);
if (has_pragma) {
// TODO(alexmarkov): read annotations right away using
// annotations_offset.
NoOOBMessageScope no_msg_scope(thread_);
NoReloadScope no_reload_scope(thread_->isolate(), thread_);
library.GetMetadata(cls);
}
}
}
}
@ -2310,7 +2351,11 @@ void BytecodeReaderHelper::ReadClassDeclaration(const Class& cls) {
bytecode_component_->GetMembersOffset());
// All types are finalized if loading from bytecode.
cls.set_is_type_finalized();
// TODO(alexmarkov): revise early stamping of native wrapper classes
// as type-finalized.
if (!cls.is_type_finalized()) {
cls.set_is_type_finalized();
}
// TODO(alexmarkov): move this to class finalization.
ClassFinalizer::RegisterClassInHierarchy(Z, cls);
@ -2379,10 +2424,11 @@ void BytecodeReaderHelper::ReadLibraryDeclaration(const Library& library,
}
} else {
if (lookup_classes) {
cls = library.LookupClassAllowPrivate(name);
cls = library.LookupLocalClass(name);
}
if (lookup_classes && !cls.IsNull()) {
ASSERT(!cls.is_declaration_loaded());
ASSERT(!cls.is_declaration_loaded() ||
loading_native_wrappers_library_);
cls.set_script(script);
} else {
cls = Class::New(library, name, script, TokenPosition::kNoSource,
@ -2395,6 +2441,13 @@ void BytecodeReaderHelper::ReadLibraryDeclaration(const Library& library,
cls.set_is_declared_in_bytecode(true);
cls.set_bytecode_offset(class_offset);
if (loading_native_wrappers_library_ || !register_class) {
AlternativeReadingScope alt(&reader_, class_offset);
ReadClassDeclaration(cls);
AlternativeReadingScope alt2(&reader_, cls.bytecode_offset());
ReadMembers(cls, /* discard_fields = */ false);
}
}
ASSERT(!library.Loaded());

View file

@ -225,6 +225,7 @@ class BytecodeReaderHelper : public ValueObject {
Class& scoped_function_class_;
Library* expression_evaluation_library_ = nullptr;
bool loading_native_wrappers_library_ = false;
bool reading_type_arguments_of_recursive_type_ = false;
DISALLOW_COPY_AND_ASSIGN(BytecodeReaderHelper);
};

View file

@ -5,6 +5,8 @@
#include "include/dart_api.h"
#include "include/dart_native_api.h"
#include <memory>
#include "lib/stacktrace.h"
#include "platform/assert.h"
#include "vm/class_finalizer.h"
@ -5133,6 +5135,7 @@ DART_EXPORT Dart_Handle Dart_GetType(Dart_Handle library,
return Api::NewError("Type '%s' not found in library '%s'.",
name_str.ToCString(), lib_name.ToCString());
}
cls.EnsureDeclarationLoaded();
CHECK_ERROR_HANDLE(cls.VerifyEntryPoint());
if (cls.NumTypeArguments() == 0) {
if (number_of_type_arguments != 0) {

View file

@ -32,6 +32,12 @@ constexpr bool kDartPrecompiledRuntime = true;
constexpr bool kDartPrecompiledRuntime = false;
#endif
#if defined(DART_USE_BYTECODE)
constexpr bool kDartUseBytecode = true;
#else
constexpr bool kDartUseBytecode = false;
#endif
// List of all flags in the VM.
// Flags can be one of three categories:
// * P roduct flags: Can be set in any of the deployment modes, including in
@ -191,7 +197,7 @@ constexpr bool kDartPrecompiledRuntime = false;
D(trace_zones, bool, false, "Traces allocation sizes in the zone.") \
P(truncating_left_shift, bool, true, \
"Optimize left shift to truncate if possible") \
P(use_bytecode_compiler, bool, false, "Compile from bytecode") \
P(use_bytecode_compiler, bool, kDartUseBytecode, "Compile from bytecode") \
P(use_compactor, bool, false, "Compact the heap during old-space GC.") \
P(use_cha_deopt, bool, true, \
"Use class hierarchy analysis even if it can cause deoptimization.") \

View file

@ -3545,6 +3545,21 @@ RawObject* Class::EvaluateCompiledExpression(
arguments, type_arguments);
}
void Class::EnsureDeclarationLoaded() const {
if (!is_declaration_loaded()) {
#if defined(DART_PRECOMPILED_RUNTIME)
UNREACHABLE();
#else
// Loading of class declaration can be postponed until needed
// if class comes from bytecode.
ASSERT(is_declared_in_bytecode());
kernel::BytecodeReader::LoadClassDeclaration(*this);
ASSERT(is_declaration_loaded());
ASSERT(is_type_finalized());
#endif
}
}
// Ensure that top level parsing of the class has been done.
RawError* Class::EnsureIsFinalized(Thread* thread) const {
// Finalized classes have already been parsed.
@ -7749,7 +7764,8 @@ RawExternalTypedData* Function::KernelData() const {
intptr_t Function::KernelDataProgramOffset() const {
ASSERT(!is_declared_in_bytecode());
if (IsNoSuchMethodDispatcher() || IsInvokeFieldDispatcher()) {
if (IsNoSuchMethodDispatcher() || IsInvokeFieldDispatcher() ||
IsFfiTrampoline()) {
return 0;
}
Object& data = Object::Handle(raw_ptr()->data_);
@ -17693,14 +17709,7 @@ bool Type::IsEquivalent(const Instance& other, TrailPtr trail) const {
for (intptr_t i = 0; i < from_index; i++) {
type_arg = type_args.TypeAt(i);
other_type_arg = other_type_args.TypeAt(i);
// Type arguments may not match if they are TypeRefs without
// underlying type (which will be set later).
ASSERT(
type_arg.IsEquivalent(other_type_arg, trail) ||
(type_arg.IsTypeRef() &&
TypeRef::Cast(type_arg).type() == AbstractType::null()) ||
(other_type_arg.IsTypeRef() &&
TypeRef::Cast(other_type_arg).type() == AbstractType::null()));
ASSERT(type_arg.IsEquivalent(other_type_arg, trail));
}
}
#endif
@ -18159,10 +18168,9 @@ RawAbstractType* TypeRef::Canonicalize(TrailPtr trail) const {
// TODO(regis): Try to reduce the number of nodes required to represent the
// referenced recursive type.
AbstractType& ref_type = AbstractType::Handle(type());
if (!ref_type.IsNull()) {
ref_type = ref_type.Canonicalize(trail);
set_type(ref_type);
}
ASSERT(!ref_type.IsNull());
ref_type = ref_type.Canonicalize(trail);
set_type(ref_type);
return raw();
}
@ -18188,11 +18196,15 @@ void TypeRef::EnumerateURIs(URIs* uris) const {
}
intptr_t TypeRef::Hash() const {
// Do not calculate the hash of the referenced type to avoid divergence.
// TypeRef can participate in type canonicalization even before referenced
// type is set, so its hash should not rely on referenced type.
const intptr_t kTypeRefHash = 37;
return kTypeRefHash;
// Do not use hash of the referenced type because
// - we could be in process of calculating it (as TypeRef is used to
// represent recursive references to types).
// - referenced type might be incomplete (e.g. not all its
// type arguments are set).
const AbstractType& ref_type = AbstractType::Handle(type());
ASSERT(!ref_type.IsNull());
const uint32_t result = Class::Handle(ref_type.type_class()).id();
return FinalizeHash(result, kHashBits);
}
RawTypeRef* TypeRef::New() {

View file

@ -1309,6 +1309,10 @@ class Class : public Object {
const Array& param_values,
const TypeArguments& type_param_values) const;
// Load class declaration (super type, interfaces, type parameters and
// number of type arguments) if it is not loaded yet.
void EnsureDeclarationLoaded() const;
RawError* EnsureIsFinalized(Thread* thread) const;
// Allocate a class used for VM internal objects.

View file

@ -2219,6 +2219,13 @@ static void HandleOSRRequest(Thread* thread) {
DEFINE_RUNTIME_ENTRY(StackOverflow, 0) {
#if defined(USING_SIMULATOR)
uword stack_pos = Simulator::Current()->get_sp();
// If simulator was never called (for example, in pure
// interpreted mode) it may return 0 as a value of SPREG.
if (stack_pos == 0) {
// Use any reasonable value which would not be treated
// as stack overflow.
stack_pos = thread->saved_stack_limit();
}
#else
uword stack_pos = OSThread::GetCurrentStackPointer();
#endif
@ -2296,7 +2303,8 @@ DEFINE_RUNTIME_ENTRY(OptimizeInvokedFunction, 1) {
// If running with interpreter, do the unoptimized compilation first.
const bool optimizing_compilation = function.ShouldCompilerOptimize();
ASSERT(FLAG_enable_interpreter || optimizing_compilation);
ASSERT((!optimizing_compilation) || function.HasCode());
ASSERT((!optimizing_compilation) || function.HasCode() ||
function.ForceOptimize());
#if defined(PRODUCT)
if (!optimizing_compilation ||

View file

@ -23,7 +23,8 @@ void verify(String exePath, {String altPath}) {
env['PATH'] = altPath;
}
var processResult = Process.runSync(exePath, [scriptPath],
var processResult = Process.runSync(
exePath, [...Platform.executableArguments, scriptPath],
includeParentEnvironment: false, runInShell: true, environment: env);
if (processResult.exitCode != 0) {