[vm] Represent tagged pointers as C++ value types instead of C++ pointer types.

This works around bugs in UndefinedBehaviorSanitizer and Clang.

Bug: b/28638298
Change-Id: I6be595f9664516019d28017d24559583a1ae3a21
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/144354
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
This commit is contained in:
Ryan Macnak 2020-04-25 05:21:27 +00:00 committed by commit-bot@chromium.org
parent 2e438d1baf
commit 6fe15f6df9
299 changed files with 9681 additions and 9573 deletions

View file

@ -118,10 +118,7 @@ config("compiler") {
ldflags += [ "-fsanitize=thread" ]
}
if (is_ubsan) {
cflags += [
"-fsanitize=undefined",
"-fno-sanitize=null,alignment",
]
cflags += [ "-fsanitize=undefined" ]
ldflags += [ "-fsanitize=undefined" ]
}
}

View file

@ -67,7 +67,7 @@ DEFINE_NATIVE_ENTRY(Double_div, 0, 2) {
return Double::New(left / right);
}
static RawInteger* DoubleToInteger(double val, const char* error_msg) {
static IntegerPtr DoubleToInteger(double val, const char* error_msg) {
if (isinf(val) || isnan(val)) {
const Array& args = Array::Handle(Array::New(1));
args.SetAt(0, String::Handle(String::New(error_msg)));

View file

@ -13,7 +13,7 @@ namespace dart {
// Scan the stack until we hit the first function in the _AssertionError
// class. We then return the next frame's script taking inlining into account.
static RawScript* FindScript(DartFrameIterator* iterator) {
static ScriptPtr FindScript(DartFrameIterator* iterator) {
#if defined(DART_PRECOMPILED_RUNTIME)
// The precompiled runtime faces two issues in recovering the correct
// assertion text. First, the precompiled runtime does not include

View file

@ -103,10 +103,10 @@ DEFINE_NATIVE_ENTRY(Ffi_address, 0, 1) {
return Integer::New(pointer.NativeAddress());
}
static RawObject* LoadValueNumeric(Zone* zone,
const Pointer& target,
classid_t type_cid,
const Integer& offset) {
static ObjectPtr LoadValueNumeric(Zone* zone,
const Pointer& target,
classid_t type_cid,
const Integer& offset) {
// TODO(36370): Make representation consistent with kUnboxedFfiIntPtr.
const size_t address =
target.NativeAddress() + static_cast<intptr_t>(offset.AsInt64Value());
@ -164,9 +164,9 @@ DEFINE_NATIVE_ENTRY(Ffi_loadPointer, 1, 2) {
return Pointer::New(type_arg, *reinterpret_cast<uword*>(address));
}
static RawObject* LoadValueStruct(Zone* zone,
const Pointer& target,
const AbstractType& instance_type_arg) {
static ObjectPtr LoadValueStruct(Zone* zone,
const Pointer& target,
const AbstractType& instance_type_arg) {
// Result is a struct class -- find <class name>.#fromPointer
// constructor and call it.
const Class& cls = Class::Handle(zone, instance_type_arg.type_class());

View file

@ -51,8 +51,8 @@ DEFINE_NATIVE_ENTRY(Closure_equals, 0, 2) {
const Context& context_a = Context::Handle(zone, receiver.context());
const Context& context_b =
Context::Handle(zone, other_closure.context());
RawObject* receiver_a = context_a.At(0);
RawObject* receiver_b = context_b.At(0);
ObjectPtr receiver_a = context_a.At(0);
ObjectPtr receiver_b = context_b.At(0);
if ((receiver_a == receiver_b) &&
((func_a.raw() == func_b.raw()) ||
((func_a.name() == func_b.name()) &&

View file

@ -158,7 +158,7 @@ DEFINE_NATIVE_ENTRY(Integer_equalToInteger, 0, 2) {
return Bool::Get(left.CompareWith(right) == 0).raw();
}
static RawInteger* ParseInteger(const String& value) {
static IntegerPtr ParseInteger(const String& value) {
// Used by both Integer_parse and Integer_fromEnvironment.
if (value.IsOneByteString()) {
// Quick conversion for unpadded integers in strings.
@ -202,9 +202,9 @@ DEFINE_NATIVE_ENTRY(Integer_fromEnvironment, 0, 3) {
return default_value.raw();
}
static RawInteger* ShiftOperationHelper(Token::Kind kind,
const Integer& value,
const Integer& amount) {
static IntegerPtr ShiftOperationHelper(Token::Kind kind,
const Integer& value,
const Integer& amount) {
if (amount.AsInt64Value() < 0) {
Exceptions::ThrowArgumentError(amount);
}

View file

@ -109,36 +109,34 @@ DEFINE_NATIVE_ENTRY(SendPortImpl_sendInternal_, 0, 2) {
return Object::null();
}
class RawObjectPtrSetTraits {
class ObjectPtrSetTraitsLayout {
public:
static bool ReportStats() { return false; }
static const char* Name() { return "RawObjectPtrSetTraits"; }
static bool IsMatch(const RawObject* a, const RawObject* b) { return a == b; }
static bool IsMatch(const ObjectPtr a, const ObjectPtr b) { return a == b; }
static uword Hash(const RawObject* obj) {
return reinterpret_cast<uword>(obj);
}
static uword Hash(const ObjectPtr obj) { return static_cast<uword>(obj); }
};
static RawObject* ValidateMessageObject(Zone* zone,
Isolate* isolate,
const Object& obj) {
static ObjectPtr ValidateMessageObject(Zone* zone,
Isolate* isolate,
const Object& obj) {
TIMELINE_DURATION(Thread::Current(), Isolate, "ValidateMessageObject");
class SendMessageValidator : public ObjectPointerVisitor {
public:
SendMessageValidator(IsolateGroup* isolate_group,
WeakTable* visited,
MallocGrowableArray<RawObject*>* const working_set)
MallocGrowableArray<ObjectPtr>* const working_set)
: ObjectPointerVisitor(isolate_group),
visited_(visited),
working_set_(working_set) {}
private:
void VisitPointers(RawObject** from, RawObject** to) {
for (RawObject** raw = from; raw <= to; raw++) {
if (!(*raw)->IsHeapObject() || (*raw)->IsCanonical()) {
void VisitPointers(ObjectPtr* from, ObjectPtr* to) {
for (ObjectPtr* raw = from; raw <= to; raw++) {
if (!(*raw)->IsHeapObject() || (*raw)->ptr()->IsCanonical()) {
continue;
}
if (visited_->GetValueExclusive(*raw) == 1) {
@ -150,9 +148,9 @@ static RawObject* ValidateMessageObject(Zone* zone,
}
WeakTable* visited_;
MallocGrowableArray<RawObject*>* const working_set_;
MallocGrowableArray<ObjectPtr>* const working_set_;
};
if (!obj.raw()->IsHeapObject() || obj.raw()->IsCanonical()) {
if (!obj.raw()->IsHeapObject() || obj.raw()->ptr()->IsCanonical()) {
return obj.raw();
}
ClassTable* class_table = isolate->class_table();
@ -160,7 +158,7 @@ static RawObject* ValidateMessageObject(Zone* zone,
Class& klass = Class::Handle(zone);
Closure& closure = Closure::Handle(zone);
MallocGrowableArray<RawObject*> working_set;
MallocGrowableArray<ObjectPtr> working_set;
std::unique_ptr<WeakTable> visited(new WeakTable());
NoSafepointScope no_safepoint;
@ -170,7 +168,7 @@ static RawObject* ValidateMessageObject(Zone* zone,
working_set.Add(obj.raw());
while (!working_set.is_empty()) {
RawObject* raw = working_set.RemoveLast();
ObjectPtr raw = working_set.RemoveLast();
if (visited->GetValueExclusive(raw) > 0) {
continue;
@ -196,7 +194,7 @@ static RawObject* ValidateMessageObject(Zone* zone,
case kClosureCid: {
closure = Closure::RawCast(raw);
RawFunction* func = closure.function();
FunctionPtr func = closure.function();
// We only allow closure of top level methods or static functions in a
// class to be sent in isolate messages.
if (!Function::IsImplicitStaticClosureFunction(func)) {
@ -215,7 +213,7 @@ static RawObject* ValidateMessageObject(Zone* zone,
}
}
}
raw->VisitPointers(&visitor);
raw->ptr()->VisitPointers(&visitor);
}
isolate->set_forward_table_new(nullptr);
return obj.raw();

View file

@ -74,7 +74,7 @@ DEFINE_NATIVE_ENTRY(Math_doublePow, 0, 2) {
}
// Returns the typed-data array store in '_Random._state' field.
static RawTypedData* GetRandomStateArray(const Instance& receiver) {
static TypedDataPtr GetRandomStateArray(const Instance& receiver) {
const Class& random_class = Class::Handle(receiver.clazz());
const Field& state_field =
Field::Handle(random_class.LookupFieldAllowPrivate(Symbols::_state()));
@ -107,7 +107,7 @@ DEFINE_NATIVE_ENTRY(Random_nextState, 0, 1) {
return Object::null();
}
RawTypedData* CreateRandomState(Zone* zone, uint64_t seed) {
TypedDataPtr CreateRandomState(Zone* zone, uint64_t seed) {
const TypedData& result =
TypedData::Handle(zone, TypedData::New(kTypedDataUint32ArrayCid, 2));
result.SetUint32(0, static_cast<uint32_t>(seed));

View file

@ -21,14 +21,14 @@ namespace dart {
#if !defined(DART_PRECOMPILED_RUNTIME)
#define RETURN_OR_PROPAGATE(expr) \
RawObject* result = expr; \
ObjectPtr result = expr; \
if (IsErrorClassId(result->GetClassIdMayBeSmi())) { \
Exceptions::PropagateError(Error::Handle(Error::RawCast(result))); \
} \
return result;
static RawInstance* CreateMirror(const String& mirror_class_name,
const Array& constructor_arguments) {
static InstancePtr CreateMirror(const String& mirror_class_name,
const Array& constructor_arguments) {
const Library& mirrors_lib = Library::Handle(Library::MirrorsLibrary());
const String& constructor_name = Symbols::DotUnder();
@ -90,8 +90,8 @@ static void EnsureConstructorsAreCompiled(const Function& func) {
func.EnsureHasCode();
}
static RawInstance* CreateParameterMirrorList(const Function& func,
const Instance& owner_mirror) {
static InstancePtr CreateParameterMirrorList(const Function& func,
const Instance& owner_mirror) {
HANDLESCOPE(Thread::Current());
const intptr_t implicit_param_count = func.NumImplicitParameters();
const intptr_t non_implicit_param_count =
@ -184,8 +184,8 @@ static RawInstance* CreateParameterMirrorList(const Function& func,
return results.raw();
}
static RawInstance* CreateTypeVariableMirror(const TypeParameter& param,
const Instance& owner_mirror) {
static InstancePtr CreateTypeVariableMirror(const TypeParameter& param,
const Instance& owner_mirror) {
const Array& args = Array::Handle(Array::New(3));
args.SetAt(0, param);
args.SetAt(1, String::Handle(param.name()));
@ -195,7 +195,7 @@ static RawInstance* CreateTypeVariableMirror(const TypeParameter& param,
// We create a list in native code and let Dart code create the type mirror
// object and the ordered map.
static RawInstance* CreateTypeVariableList(const Class& cls) {
static InstancePtr CreateTypeVariableList(const Class& cls) {
const TypeArguments& args = TypeArguments::Handle(cls.type_parameters());
if (args.IsNull()) {
return Object::empty_array().raw();
@ -214,10 +214,10 @@ static RawInstance* CreateTypeVariableList(const Class& cls) {
return result.raw();
}
static RawInstance* CreateTypedefMirror(const Class& cls,
const AbstractType& type,
const Bool& is_declaration,
const Instance& owner_mirror) {
static InstancePtr CreateTypedefMirror(const Class& cls,
const AbstractType& type,
const Bool& is_declaration,
const Instance& owner_mirror) {
const Array& args = Array::Handle(Array::New(6));
args.SetAt(0, MirrorReference::Handle(MirrorReference::New(cls)));
args.SetAt(1, type);
@ -228,7 +228,7 @@ static RawInstance* CreateTypedefMirror(const Class& cls,
return CreateMirror(Symbols::_TypedefMirror(), args);
}
static RawInstance* CreateFunctionTypeMirror(const AbstractType& type) {
static InstancePtr CreateFunctionTypeMirror(const AbstractType& type) {
ASSERT(type.IsFunctionType());
const Class& cls = Class::Handle(Type::Cast(type).type_class());
const Function& func = Function::Handle(Type::Cast(type).signature());
@ -239,9 +239,9 @@ static RawInstance* CreateFunctionTypeMirror(const AbstractType& type) {
return CreateMirror(Symbols::_FunctionTypeMirror(), args);
}
static RawInstance* CreateMethodMirror(const Function& func,
const Instance& owner_mirror,
const AbstractType& instantiator) {
static InstancePtr CreateMethodMirror(const Function& func,
const Instance& owner_mirror,
const AbstractType& instantiator) {
const Array& args = Array::Handle(Array::New(6));
args.SetAt(0, MirrorReference::Handle(MirrorReference::New(func)));
@ -259,7 +259,7 @@ static RawInstance* CreateMethodMirror(const Function& func,
(static_cast<intptr_t>(func.IsGetterFunction()) << Mirrors::kGetter);
kind_flags |=
(static_cast<intptr_t>(func.IsSetterFunction()) << Mirrors::kSetter);
bool is_ctor = (func.kind() == RawFunction::kConstructor);
bool is_ctor = (func.kind() == FunctionLayout::kConstructor);
kind_flags |= (static_cast<intptr_t>(is_ctor) << Mirrors::kConstructor);
kind_flags |= (static_cast<intptr_t>(is_ctor && func.is_const())
<< Mirrors::kConstCtor);
@ -281,8 +281,8 @@ static RawInstance* CreateMethodMirror(const Function& func,
return CreateMirror(Symbols::_MethodMirror(), args);
}
static RawInstance* CreateVariableMirror(const Field& field,
const Instance& owner_mirror) {
static InstancePtr CreateVariableMirror(const Field& field,
const Instance& owner_mirror) {
const MirrorReference& field_ref =
MirrorReference::Handle(MirrorReference::New(field));
@ -301,10 +301,10 @@ static RawInstance* CreateVariableMirror(const Field& field,
return CreateMirror(Symbols::_VariableMirror(), args);
}
static RawInstance* CreateClassMirror(const Class& cls,
const AbstractType& type,
const Bool& is_declaration,
const Instance& owner_mirror) {
static InstancePtr CreateClassMirror(const Class& cls,
const AbstractType& type,
const Bool& is_declaration,
const Instance& owner_mirror) {
if (type.IsTypeRef()) {
AbstractType& ref_type = AbstractType::Handle(TypeRef::Cast(type).type());
ASSERT(!ref_type.IsTypeRef());
@ -351,7 +351,7 @@ static bool IsCensoredLibrary(const String& url) {
return false;
}
static RawInstance* CreateLibraryMirror(Thread* thread, const Library& lib) {
static InstancePtr CreateLibraryMirror(Thread* thread, const Library& lib) {
Zone* zone = thread->zone();
ASSERT(!lib.IsNull());
const Array& args = Array::Handle(zone, Array::New(3));
@ -368,24 +368,24 @@ static RawInstance* CreateLibraryMirror(Thread* thread, const Library& lib) {
return CreateMirror(Symbols::_LibraryMirror(), args);
}
static RawInstance* CreateCombinatorMirror(const Object& identifiers,
bool is_show) {
static InstancePtr CreateCombinatorMirror(const Object& identifiers,
bool is_show) {
const Array& args = Array::Handle(Array::New(2));
args.SetAt(0, identifiers);
args.SetAt(1, Bool::Get(is_show));
return CreateMirror(Symbols::_CombinatorMirror(), args);
}
static RawInstance* CreateLibraryDependencyMirror(Thread* thread,
const Instance& importer,
const Library& importee,
const Array& show_names,
const Array& hide_names,
const Object& metadata,
const LibraryPrefix& prefix,
const String& prefix_name,
const bool is_import,
const bool is_deferred) {
static InstancePtr CreateLibraryDependencyMirror(Thread* thread,
const Instance& importer,
const Library& importee,
const Array& show_names,
const Array& hide_names,
const Object& metadata,
const LibraryPrefix& prefix,
const String& prefix_name,
const bool is_import,
const bool is_deferred) {
const Instance& importee_mirror =
Instance::Handle(CreateLibraryMirror(thread, importee));
if (importee_mirror.IsNull()) {
@ -427,12 +427,12 @@ static RawInstance* CreateLibraryDependencyMirror(Thread* thread,
return CreateMirror(Symbols::_LibraryDependencyMirror(), args);
}
static RawInstance* CreateLibraryDependencyMirror(Thread* thread,
const Instance& importer,
const Namespace& ns,
const LibraryPrefix& prefix,
const bool is_import,
const bool is_deferred) {
static InstancePtr CreateLibraryDependencyMirror(Thread* thread,
const Instance& importer,
const Namespace& ns,
const LibraryPrefix& prefix,
const bool is_import,
const bool is_deferred) {
const Library& importee = Library::Handle(ns.library());
const Array& show_names = Array::Handle(ns.show_names());
const Array& hide_names = Array::Handle(ns.hide_names());
@ -453,7 +453,7 @@ static RawInstance* CreateLibraryDependencyMirror(Thread* thread,
prefix_name, is_import, is_deferred);
}
static RawGrowableObjectArray* CreateBytecodeLibraryDependencies(
static GrowableObjectArrayPtr CreateBytecodeLibraryDependencies(
Thread* thread,
const Library& lib,
const Instance& lib_mirror) {
@ -597,7 +597,7 @@ DEFINE_NATIVE_ENTRY(LibraryMirror_libraryDependencies, 0, 2) {
return deps.raw();
}
static RawInstance* CreateTypeMirror(const AbstractType& type) {
static InstancePtr CreateTypeMirror(const AbstractType& type) {
if (type.IsTypeRef()) {
AbstractType& ref_type = AbstractType::Handle(TypeRef::Cast(type).type());
ASSERT(!ref_type.IsTypeRef());
@ -652,7 +652,7 @@ static RawInstance* CreateTypeMirror(const AbstractType& type) {
return Instance::null();
}
static RawInstance* CreateIsolateMirror() {
static InstancePtr CreateIsolateMirror() {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
const String& debug_name = String::Handle(String::New(isolate->name()));
@ -697,8 +697,8 @@ static void VerifyMethodKindShifts() {
#endif
}
static RawAbstractType* InstantiateType(const AbstractType& type,
const AbstractType& instantiator) {
static AbstractTypePtr InstantiateType(const AbstractType& type,
const AbstractType& instantiator) {
// Generic function type parameters are not reified, but mapped to dynamic,
// i.e. all function type parameters are free with a null vector.
ASSERT(type.IsFinalized());
@ -1112,9 +1112,9 @@ DEFINE_NATIVE_ENTRY(ClassMirror_members, 0, 3) {
for (intptr_t i = 0; i < num_functions; i++) {
func ^= functions.At(i);
if (func.is_reflectable() &&
(func.kind() == RawFunction::kRegularFunction ||
func.kind() == RawFunction::kGetterFunction ||
func.kind() == RawFunction::kSetterFunction)) {
(func.kind() == FunctionLayout::kRegularFunction ||
func.kind() == FunctionLayout::kGetterFunction ||
func.kind() == FunctionLayout::kSetterFunction)) {
member_mirror =
CreateMethodMirror(func, owner_mirror, owner_instantiator);
member_mirrors.Add(member_mirror);
@ -1147,7 +1147,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_constructors, 0, 3) {
Function& func = Function::Handle();
for (intptr_t i = 0; i < num_functions; i++) {
func ^= functions.At(i);
if (func.is_reflectable() && func.kind() == RawFunction::kConstructor) {
if (func.is_reflectable() && func.kind() == FunctionLayout::kConstructor) {
constructor_mirror =
CreateMethodMirror(func, owner_mirror, owner_instantiator);
constructor_mirrors.Add(constructor_mirror);
@ -1200,9 +1200,9 @@ DEFINE_NATIVE_ENTRY(LibraryMirror_members, 0, 2) {
} else if (entry.IsFunction()) {
const Function& func = Function::Cast(entry);
if (func.is_reflectable() &&
(func.kind() == RawFunction::kRegularFunction ||
func.kind() == RawFunction::kGetterFunction ||
func.kind() == RawFunction::kSetterFunction)) {
(func.kind() == FunctionLayout::kRegularFunction ||
func.kind() == FunctionLayout::kGetterFunction ||
func.kind() == FunctionLayout::kSetterFunction)) {
member_mirror =
CreateMethodMirror(func, owner_mirror, AbstractType::Handle());
member_mirrors.Add(member_mirror);
@ -1443,7 +1443,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_invokeConstructor, 0, 5) {
Function::Handle(klass.LookupFunction(internal_constructor_name));
if (lookup_constructor.IsNull() ||
(lookup_constructor.kind() != RawFunction::kConstructor) ||
(lookup_constructor.kind() != FunctionLayout::kConstructor) ||
!lookup_constructor.is_reflectable()) {
ThrowNoSuchMethod(AbstractType::Handle(klass.RareType()),
external_constructor_name, explicit_args, arg_names,
@ -1640,9 +1640,9 @@ DEFINE_NATIVE_ENTRY(MethodMirror_source, 0, 1) {
return func.GetSource();
}
static RawInstance* CreateSourceLocation(const String& uri,
intptr_t line,
intptr_t column) {
static InstancePtr CreateSourceLocation(const String& uri,
intptr_t line,
intptr_t column) {
const Array& args = Array::Handle(Array::New(3));
args.SetAt(0, uri);
args.SetAt(1, Smi::Handle(Smi::New(line)));

View file

@ -113,9 +113,9 @@ DEFINE_NATIVE_ENTRY(RegExp_getGroupNameMap, 0, 1) {
return Object::null();
}
static RawObject* ExecuteMatch(Zone* zone,
NativeArguments* arguments,
bool sticky) {
static ObjectPtr ExecuteMatch(Zone* zone,
NativeArguments* arguments,
bool sticky) {
const RegExp& regexp = RegExp::CheckedHandle(zone, arguments->NativeArgAt(0));
ASSERT(!regexp.IsNull());
GET_NON_NULL_NATIVE_ARGUMENT(String, subject, arguments->NativeArgAt(1));

View file

@ -18,8 +18,8 @@ DECLARE_FLAG(bool, show_invisible_frames);
static const intptr_t kDefaultStackAllocation = 8;
static RawStackTrace* CurrentSyncStackTraceLazy(Thread* thread,
intptr_t skip_frames = 1) {
static StackTracePtr CurrentSyncStackTraceLazy(Thread* thread,
intptr_t skip_frames = 1) {
Zone* zone = thread->zone();
const auto& code_array = GrowableObjectArray::ZoneHandle(
@ -39,8 +39,8 @@ static RawStackTrace* CurrentSyncStackTraceLazy(Thread* thread,
return StackTrace::New(code_array_fixed, pc_offset_array_fixed);
}
static RawStackTrace* CurrentSyncStackTrace(Thread* thread,
intptr_t skip_frames = 1) {
static StackTracePtr CurrentSyncStackTrace(Thread* thread,
intptr_t skip_frames = 1) {
Zone* zone = thread->zone();
const Function& null_function = Function::ZoneHandle(zone);
@ -63,7 +63,7 @@ static RawStackTrace* CurrentSyncStackTrace(Thread* thread,
return StackTrace::New(code_array, pc_offset_array);
}
static RawStackTrace* CurrentStackTrace(
static StackTracePtr CurrentStackTrace(
Thread* thread,
bool for_async_function,
intptr_t skip_frames = 1,
@ -128,7 +128,7 @@ static RawStackTrace* CurrentStackTrace(
return result.raw();
}
RawStackTrace* GetStackTraceForException() {
StackTracePtr GetStackTraceForException() {
Thread* thread = Thread::Current();
return CurrentStackTrace(thread, false, 0);
}

View file

@ -5,10 +5,11 @@
#ifndef RUNTIME_LIB_STACKTRACE_H_
#define RUNTIME_LIB_STACKTRACE_H_
#include "vm/tagged_pointer.h"
namespace dart {
class StackTrace;
class RawStackTrace;
// Creates a StackTrace object from the current stack. Skips the
// first skip_frames Dart frames.
@ -18,7 +19,7 @@ class RawStackTrace;
const StackTrace& GetCurrentStackTrace(int skip_frames);
// Creates a StackTrace object to be attached to an exception.
RawStackTrace* GetStackTraceForException();
StackTracePtr GetStackTraceForException();
} // namespace dart

View file

@ -362,8 +362,7 @@ DEFINE_NATIVE_ENTRY(OneByteString_allocateFromOneByteList, 0, 3) {
}
String& string = String::Handle(OneByteString::New(length, space));
for (int i = 0; i < length; i++) {
intptr_t value =
Smi::Value(reinterpret_cast<RawSmi*>(array.At(start + i)));
intptr_t value = Smi::Value(static_cast<SmiPtr>(array.At(start + i)));
OneByteString::SetCharAt(string, i, value);
}
return string.raw();
@ -376,8 +375,7 @@ DEFINE_NATIVE_ENTRY(OneByteString_allocateFromOneByteList, 0, 3) {
}
String& string = String::Handle(OneByteString::New(length, space));
for (int i = 0; i < length; i++) {
intptr_t value =
Smi::Value(reinterpret_cast<RawSmi*>(array.At(start + i)));
intptr_t value = Smi::Value(static_cast<SmiPtr>(array.At(start + i)));
OneByteString::SetCharAt(string, i, value);
}
return string.raw();
@ -458,8 +456,7 @@ DEFINE_NATIVE_ENTRY(TwoByteString_allocateFromTwoByteList, 0, 3) {
const String& string =
String::Handle(zone, TwoByteString::New(length, space));
for (int i = 0; i < length; i++) {
intptr_t value =
Smi::Value(reinterpret_cast<RawSmi*>(array.At(start + i)));
intptr_t value = Smi::Value(static_cast<SmiPtr>(array.At(start + i)));
TwoByteString::SetCharAt(string, i, value);
}
return string.raw();
@ -471,8 +468,7 @@ DEFINE_NATIVE_ENTRY(TwoByteString_allocateFromTwoByteList, 0, 3) {
const String& string =
String::Handle(zone, TwoByteString::New(length, space));
for (int i = 0; i < length; i++) {
intptr_t value =
Smi::Value(reinterpret_cast<RawSmi*>(array.At(start + i)));
intptr_t value = Smi::Value(static_cast<SmiPtr>(array.At(start + i)));
TwoByteString::SetCharAt(string, i, value);
}
return string.raw();

View file

@ -85,12 +85,12 @@ DEFINE_NATIVE_ENTRY(TypedDataView_typedData, 0, 1) {
}
template <typename DstType, typename SrcType>
static RawBool* CopyData(const Instance& dst,
const Instance& src,
const Smi& dst_start,
const Smi& src_start,
const Smi& length,
bool clamped) {
static BoolPtr CopyData(const Instance& dst,
const Instance& src,
const Smi& dst_start,
const Smi& src_start,
const Smi& length,
bool clamped) {
const DstType& dst_array = DstType::Cast(dst);
const SrcType& src_array = SrcType::Cast(src);
const intptr_t dst_offset_in_bytes = dst_start.Value();

View file

@ -133,7 +133,7 @@ static bool ToWasmValueTag(classid_t type, wasmer_value_tag* out) {
}
}
static RawObject* ToDartObject(wasmer_value_t ret) {
static ObjectPtr ToDartObject(wasmer_value_t ret) {
switch (ret.tag) {
case wasmer_value_tag::WASM_I32:
return Integer::New(ret.value.I32);
@ -165,7 +165,7 @@ static Dart_Handle ToDartApiObject(wasmer_value value, wasmer_value_tag type) {
}
}
RawExternalTypedData* WasmMemoryToExternalTypedData(wasmer_memory_t* memory) {
ExternalTypedDataPtr WasmMemoryToExternalTypedData(wasmer_memory_t* memory) {
uint8_t* data = wasmer_memory_data(memory);
uint32_t size = wasmer_memory_data_length(memory);
return ExternalTypedData::New(kExternalTypedDataUint8ArrayCid, data, size);
@ -191,7 +191,7 @@ std::ostream& operator<<(std::ostream& o, const wasmer_import_export_kind& io) {
}
}
RawString* DescribeModule(const wasmer_module_t* module) {
StringPtr DescribeModule(const wasmer_module_t* module) {
std::stringstream desc;
desc << "Imports:\n";

View file

@ -16,7 +16,7 @@ namespace dart {
static const uint32_t kTestPcOffset = 0x4;
static const intptr_t kTestSpillSlotBitCount = 0;
static RawCompressedStackMaps* MapsFromBuilder(BitmapBuilder* bmap) {
static CompressedStackMapsPtr MapsFromBuilder(BitmapBuilder* bmap) {
CompressedStackMapsBuilder builder;
builder.AddEntry(kTestPcOffset, bmap, kTestSpillSlotBitCount);
return builder.Finalize();

View file

@ -90,9 +90,9 @@ static void Finish(Thread* thread) {
ClassFinalizer::LoadClassMembers(cls);
}
static RawError* BootstrapFromKernel(Thread* thread,
const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size) {
static ErrorPtr BootstrapFromKernel(Thread* thread,
const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size) {
Zone* zone = thread->zone();
const char* error = nullptr;
std::unique_ptr<kernel::Program> program = kernel::Program::ReadFromBuffer(
@ -140,8 +140,8 @@ static RawError* BootstrapFromKernel(Thread* thread,
return Error::null();
}
RawError* Bootstrap::DoBootstrapping(const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size) {
ErrorPtr Bootstrap::DoBootstrapping(const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size) {
Thread* thread = Thread::Current();
Isolate* isolate = thread->isolate();
Zone* zone = thread->zone();
@ -167,8 +167,8 @@ RawError* Bootstrap::DoBootstrapping(const uint8_t* kernel_buffer,
return BootstrapFromKernel(thread, kernel_buffer, kernel_buffer_size);
}
#else
RawError* Bootstrap::DoBootstrapping(const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size) {
ErrorPtr Bootstrap::DoBootstrapping(const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size) {
UNREACHABLE();
return Error::null();
}

View file

@ -7,11 +7,11 @@
#include "include/dart_api.h"
#include "vm/allocation.h"
#include "vm/tagged_pointer.h"
namespace dart {
// Forward declarations.
class RawError;
namespace kernel {
class Program;
}
@ -24,8 +24,8 @@ class Bootstrap : public AllStatic {
// bootstrapping.
// The caller of this function is responsible for managing the kernel
// program's memory.
static RawError* DoBootstrapping(const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size);
static ErrorPtr DoBootstrapping(const uint8_t* kernel_buffer,
intptr_t kernel_buffer_size);
static void SetupNativeResolver();
static bool IsBootstrapResolver(Dart_NativeEntryResolver resolver);

View file

@ -488,8 +488,8 @@ class BootstrapNatives : public AllStatic {
static const uint8_t* Symbol(Dart_NativeFunction* nf);
#define DECLARE_BOOTSTRAP_NATIVE(name, ignored) \
static RawObject* DN_##name(Thread* thread, Zone* zone, \
NativeArguments* arguments);
static ObjectPtr DN_##name(Thread* thread, Zone* zone, \
NativeArguments* arguments);
BOOTSTRAP_NATIVE_LIST(DECLARE_BOOTSTRAP_NATIVE)
#if !defined(DART_PRECOMPILED_RUNTIME)

View file

@ -668,10 +668,10 @@ void ClassFinalizer::FinalizeTypeArguments(const Class& cls,
}
}
RawAbstractType* ClassFinalizer::FinalizeType(const Class& cls,
const AbstractType& type,
FinalizationKind finalization,
PendingTypes* pending_types) {
AbstractTypePtr ClassFinalizer::FinalizeType(const Class& cls,
const AbstractType& type,
FinalizationKind finalization,
PendingTypes* pending_types) {
// Only the 'root' type of the graph can be canonicalized, after all depending
// types have been bound checked.
ASSERT((pending_types == NULL) || (finalization < kCanonicalize));
@ -1186,7 +1186,7 @@ void ClassFinalizer::FinalizeClass(const Class& cls) {
}
}
RawError* ClassFinalizer::LoadClassMembers(const Class& cls) {
ErrorPtr ClassFinalizer::LoadClassMembers(const Class& cls) {
ASSERT(Thread::Current()->IsMutatorThread());
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
@ -1474,21 +1474,21 @@ class CidRewriteVisitor : public ObjectVisitor {
return old_to_new_cids_[cid];
}
void VisitObject(RawObject* obj) {
void VisitObject(ObjectPtr obj) {
if (obj->IsClass()) {
RawClass* cls = Class::RawCast(obj);
ClassPtr cls = Class::RawCast(obj);
cls->ptr()->id_ = Map(cls->ptr()->id_);
} else if (obj->IsField()) {
RawField* field = Field::RawCast(obj);
FieldPtr field = Field::RawCast(obj);
field->ptr()->guarded_cid_ = Map(field->ptr()->guarded_cid_);
field->ptr()->is_nullable_ = Map(field->ptr()->is_nullable_);
} else if (obj->IsTypeParameter()) {
RawTypeParameter* param = TypeParameter::RawCast(obj);
TypeParameterPtr param = TypeParameter::RawCast(obj);
param->ptr()->parameterized_class_id_ =
Map(param->ptr()->parameterized_class_id_);
} else if (obj->IsType()) {
RawType* type = Type::RawCast(obj);
RawObject* id = type->ptr()->type_class_id_;
TypePtr type = Type::RawCast(obj);
ObjectPtr id = type->ptr()->type_class_id_;
if (!id->IsHeapObject()) {
type->ptr()->type_class_id_ =
Smi::New(Map(Smi::Value(Smi::RawCast(id))));
@ -1499,7 +1499,7 @@ class CidRewriteVisitor : public ObjectVisitor {
if (old_cid != new_cid) {
// Don't touch objects that are unchanged. In particular, Instructions,
// which are write-protected.
obj->SetClassId(new_cid);
obj->ptr()->SetClassId(new_cid);
}
}
}
@ -1558,30 +1558,30 @@ void ClassFinalizer::RemapClassIds(intptr_t* old_to_new_cid) {
// In the Dart VM heap the following instances directly use cids for the
// computation of canonical hash codes:
//
// * RawType (due to RawType::type_class_id_)
// * RawTypeParameter (due to RawTypeParameter::parameterized_class_id_)
// * RawType (due to TypeLayout::type_class_id_)
// * RawTypeParameter (due to TypeParameterLayout::parameterized_class_id_)
//
// The following instances use cids for the computation of canonical hash codes
// indirectly:
//
// * RawTypeRef (due to RawTypeRef::type_->type_class_id)
// * RawType (due to RawType::signature_'s result/parameter types)
// * RawTypeRef (due to TypeRefLayout::type_->type_class_id)
// * RawType (due to TypeLayout::signature_'s result/parameter types)
// * RawTypeArguments (due to type references)
// * RawInstance (due to instance fields)
// * RawArray (due to type arguments & array entries)
//
// Caching of the canonical hash codes happens for:
//
// * RawType::hash_
// * RawTypeParameter::hash_
// * RawTypeArguments::hash_
// * TypeLayout::hash_
// * TypeParameterLayout::hash_
// * TypeArgumentsLayout::hash_
// * RawInstance (weak table)
// * RawArray (weak table)
//
// No caching of canonical hash codes (i.e. it gets re-computed every time)
// happens for:
//
// * RawTypeRef (computed via RawTypeRef::type_->type_class_id)
// * RawTypeRef (computed via TypeRefLayout::type_->type_class_id)
//
// Usages of canonical hash codes are:
//
@ -1596,7 +1596,7 @@ class ClearTypeHashVisitor : public ObjectVisitor {
type_(Type::Handle(zone)),
type_args_(TypeArguments::Handle(zone)) {}
void VisitObject(RawObject* obj) {
void VisitObject(ObjectPtr obj) {
if (obj->IsTypeParameter()) {
type_param_ ^= obj;
type_param_.SetHash(0);

View file

@ -27,7 +27,7 @@ class ClassFinalizer : public AllStatic {
// Finalize given type while parsing class cls.
// Also canonicalize and bound check type if applicable.
static RawAbstractType* FinalizeType(
static AbstractTypePtr FinalizeType(
const Class& cls,
const AbstractType& type,
FinalizationKind finalization = kCanonicalize,
@ -69,7 +69,7 @@ class ClassFinalizer : public AllStatic {
// and fields of the class.
//
// Returns Error::null() if there is no loading error.
static RawError* LoadClassMembers(const Class& cls);
static ErrorPtr LoadClassMembers(const Class& cls);
#if !defined(DART_PRECOMPILED_RUNTIME)
// Verify that the classes have been properly prefinalized. This is

View file

@ -9,7 +9,7 @@
namespace dart {
static RawClass* CreateTestClass(const char* name) {
static ClassPtr CreateTestClass(const char* name) {
const String& class_name =
String::Handle(Symbols::New(Thread::Current(), name));
const Script& script = Script::Handle();

View file

@ -71,7 +71,7 @@ SharedClassTable::~SharedClassTable() {
NOT_IN_PRODUCT(free(trace_allocation_table_.load()));
}
void ClassTable::set_table(RawClass** table) {
void ClassTable::set_table(ClassPtr* table) {
Isolate* isolate = Isolate::Current();
ASSERT(isolate != nullptr);
table_.store(table);
@ -82,7 +82,7 @@ ClassTable::ClassTable(SharedClassTable* shared_class_table)
: top_(kNumPredefinedCids),
capacity_(0),
table_(NULL),
old_class_tables_(new MallocGrowableArray<RawClass**>()),
old_class_tables_(new MallocGrowableArray<ClassPtr*>()),
shared_class_table_(shared_class_table) {
if (Dart::vm_isolate() == NULL) {
ASSERT(kInitialCapacity >= kNumPredefinedCids);
@ -91,14 +91,14 @@ ClassTable::ClassTable(SharedClassTable* shared_class_table)
// Don't use set_table because caller is supposed to set up isolates
// cached copy when constructing ClassTable. Isolate::Current might not
// be available at this point yet.
table_.store(static_cast<RawClass**>(calloc(capacity_, sizeof(RawClass*))));
table_.store(static_cast<ClassPtr*>(calloc(capacity_, sizeof(ClassPtr))));
} else {
// Duplicate the class table from the VM isolate.
ClassTable* vm_class_table = Dart::vm_isolate()->class_table();
capacity_ = vm_class_table->capacity_;
// Note that [calloc] will zero-initialize the memory.
RawClass** table =
static_cast<RawClass**>(calloc(capacity_, sizeof(RawClass*)));
ClassPtr* table =
static_cast<ClassPtr*>(calloc(capacity_, sizeof(ClassPtr)));
// The following cids don't have a corresponding class object in Dart code.
// We therefore need to initialize them eagerly.
for (intptr_t i = kObjectCid; i < kInstanceCid; i++) {
@ -124,7 +124,7 @@ ClassTable::~ClassTable() {
free(table_.load());
}
void ClassTable::AddOldTable(RawClass** old_class_table) {
void ClassTable::AddOldTable(ClassPtr* old_class_table) {
ASSERT(Thread::Current()->IsMutatorThread());
old_class_tables_->Add(old_class_table);
}
@ -224,8 +224,8 @@ void ClassTable::Grow(intptr_t new_capacity) {
ASSERT(new_capacity > capacity_);
auto old_table = table_.load();
auto new_table = static_cast<RawClass**>(
malloc(new_capacity * sizeof(RawClass*))); // NOLINT
auto new_table = static_cast<ClassPtr*>(
malloc(new_capacity * sizeof(ClassPtr))); // NOLINT
intptr_t i;
for (i = 0; i < capacity_; i++) {
// Don't use memmove, which changes this from a relaxed atomic operation
@ -335,9 +335,9 @@ void SharedClassTable::Unregister(intptr_t index) {
void ClassTable::Remap(intptr_t* old_to_new_cid) {
ASSERT(Thread::Current()->IsAtSafepoint());
const intptr_t num_cids = NumCids();
std::unique_ptr<RawClass*[]> cls_by_old_cid(new RawClass*[num_cids]);
std::unique_ptr<ClassPtr[]> cls_by_old_cid(new ClassPtr[num_cids]);
auto* table = table_.load();
memmove(cls_by_old_cid.get(), table, sizeof(RawClass*) * num_cids);
memmove(cls_by_old_cid.get(), table, sizeof(ClassPtr) * num_cids);
for (intptr_t i = 0; i < num_cids; i++) {
table[old_to_new_cid[i]] = cls_by_old_cid[i];
}
@ -372,8 +372,8 @@ void ClassTable::VisitObjectPointers(ObjectPointerVisitor* visitor) {
visitor->set_gc_root_type("class table");
if (top_ != 0) {
auto* table = table_.load();
RawObject** from = reinterpret_cast<RawObject**>(&table[0]);
RawObject** to = reinterpret_cast<RawObject**>(&table[top_ - 1]);
ObjectPtr* from = reinterpret_cast<ObjectPtr*>(&table[0]);
ObjectPtr* to = reinterpret_cast<ObjectPtr*>(&table[top_ - 1]);
visitor->VisitPointers(from, to);
}
visitor->clear_gc_root_type();
@ -413,14 +413,14 @@ void ClassTable::Print() {
continue;
}
cls = At(i);
if (cls.raw() != reinterpret_cast<RawClass*>(0)) {
if (cls.raw() != nullptr) {
name = cls.Name();
OS::PrintErr("%" Pd ": %s\n", i, name.ToCString());
}
}
}
void ClassTable::SetAt(intptr_t index, RawClass* raw_cls) {
void ClassTable::SetAt(intptr_t index, ClassPtr raw_cls) {
// This is called by snapshot reader and class finalizer.
ASSERT(index < capacity_);
const intptr_t size =

View file

@ -15,6 +15,7 @@
#include "vm/class_id.h"
#include "vm/flags.h"
#include "vm/globals.h"
#include "vm/tagged_pointer.h"
namespace dart {
@ -30,7 +31,6 @@ class JSONStream;
template <typename T>
class MallocGrowableArray;
class ObjectPointerVisitor;
class RawClass;
// Wraps a 64-bit integer to represent the bitmap of unboxed fields
// stored in the shared class table.
@ -258,12 +258,12 @@ class ClassTable {
SharedClassTable* shared_class_table() const { return shared_class_table_; }
void CopyBeforeHotReload(RawClass*** copy, intptr_t* copy_num_cids) {
void CopyBeforeHotReload(ClassPtr** copy, intptr_t* copy_num_cids) {
// The [IsolateReloadContext] will need to maintain a copy of the old class
// table until instances have been morphed.
const intptr_t num_cids = NumCids();
const intptr_t bytes = sizeof(RawClass*) * num_cids;
auto class_table = static_cast<RawClass**>(malloc(bytes));
const intptr_t bytes = sizeof(ClassPtr) * num_cids;
auto class_table = static_cast<ClassPtr*>(malloc(bytes));
auto table = table_.load();
for (intptr_t i = 0; i < num_cids; i++) {
// Don't use memmove, which changes this from a relaxed atomic operation
@ -282,7 +282,7 @@ class ClassTable {
// here).
}
void ResetAfterHotReload(RawClass** old_table,
void ResetAfterHotReload(ClassPtr* old_table,
intptr_t num_old_cids,
bool is_rollback) {
// The [IsolateReloadContext] is no longer source-of-truth for GC after we
@ -307,7 +307,7 @@ class ClassTable {
}
// Thread-safe.
RawClass* At(intptr_t index) const {
ClassPtr At(intptr_t index) const {
ASSERT(IsValidIndex(index));
return table_.load()[index];
}
@ -316,7 +316,7 @@ class ClassTable {
return shared_class_table_->SizeAt(index);
}
void SetAt(intptr_t index, RawClass* raw_cls);
void SetAt(intptr_t index, ClassPtr raw_cls);
bool IsValidIndex(intptr_t index) const {
return shared_class_table_->IsValidIndex(index);
@ -357,7 +357,7 @@ class ClassTable {
#ifndef PRODUCT
// Describes layout of heap stats for code generation. See offset_extractor.cc
struct ArrayLayout {
struct ArrayTraits {
static intptr_t elements_start_offset() { return 0; }
static constexpr intptr_t kElementSize = sizeof(uint8_t);
@ -387,20 +387,20 @@ class ClassTable {
static const int kInitialCapacity = SharedClassTable::kInitialCapacity;
static const int kCapacityIncrement = SharedClassTable::kCapacityIncrement;
void AddOldTable(RawClass** old_table);
void AddOldTable(ClassPtr* old_table);
void Grow(intptr_t index);
RawClass** table() { return table_.load(); }
void set_table(RawClass** table);
ClassPtr* table() { return table_.load(); }
void set_table(ClassPtr* table);
intptr_t top_;
intptr_t capacity_;
// Copy-on-write is used for table_, with old copies stored in
// old_class_tables_.
AcqRelAtomic<RawClass**> table_;
MallocGrowableArray<RawClass**>* old_class_tables_;
AcqRelAtomic<ClassPtr*> table_;
MallocGrowableArray<ClassPtr*>* old_class_tables_;
SharedClassTable* shared_class_table_;
DISALLOW_COPY_AND_ASSIGN(ClassTable);

File diff suppressed because it is too large Load diff

View file

@ -57,7 +57,7 @@ class SerializationCluster : public ZoneAllocated {
virtual ~SerializationCluster() {}
// Add [object] to the cluster and push its outgoing references.
virtual void Trace(Serializer* serializer, RawObject* object) = 0;
virtual void Trace(Serializer* serializer, ObjectPtr object) = 0;
// Write the cluster type and information needed to allocate the cluster's
// objects. For fixed sized objects, this is just the object count. For
@ -104,8 +104,8 @@ class DeserializationCluster : public ZoneAllocated {
class SmiObjectIdPair {
public:
SmiObjectIdPair() : smi_(NULL), id_(0) {}
RawSmi* smi_;
SmiObjectIdPair() : smi_(nullptr), id_(0) {}
SmiPtr smi_;
intptr_t id_;
bool operator==(const SmiObjectIdPair& other) const {
@ -115,7 +115,7 @@ class SmiObjectIdPair {
class SmiObjectIdPairTrait {
public:
typedef RawSmi* Key;
typedef SmiPtr Key;
typedef intptr_t Value;
typedef SmiObjectIdPair Pair;
@ -162,7 +162,7 @@ class Serializer : public ThreadStackResource {
void AddVMIsolateBaseObjects();
void AddBaseObject(RawObject* base_object,
void AddBaseObject(ObjectPtr base_object,
const char* type = nullptr,
const char* name = nullptr) {
intptr_t ref = AssignRef(base_object);
@ -181,7 +181,7 @@ class Serializer : public ThreadStackResource {
}
}
intptr_t AssignRef(RawObject* object) {
intptr_t AssignRef(ObjectPtr object) {
ASSERT(IsAllocatedReference(next_ref_index_));
if (object->IsHeapObject()) {
// The object id weak table holds image offsets for Instructions instead
@ -190,7 +190,7 @@ class Serializer : public ThreadStackResource {
heap_->SetObjectId(object, next_ref_index_);
ASSERT(heap_->GetObjectId(object) == next_ref_index_);
} else {
RawSmi* smi = Smi::RawCast(object);
SmiPtr smi = Smi::RawCast(object);
SmiObjectIdPair* existing_pair = smi_ids_.Lookup(smi);
if (existing_pair != NULL) {
ASSERT(existing_pair->id_ == kUnallocatedReference);
@ -205,15 +205,15 @@ class Serializer : public ThreadStackResource {
return next_ref_index_++;
}
void Push(RawObject* object);
void Push(ObjectPtr object);
void AddUntracedRef() { num_written_objects_++; }
void Trace(RawObject* object);
void Trace(ObjectPtr object);
void UnexpectedObject(RawObject* object, const char* message);
void UnexpectedObject(ObjectPtr object, const char* message);
#if defined(SNAPSHOT_BACKTRACE)
RawObject* ParentOf(const Object& object);
ObjectPtr ParentOf(const Object& object);
#endif
SerializationCluster* NewClusterForClass(intptr_t cid);
@ -240,9 +240,7 @@ class Serializer : public ThreadStackResource {
WriteStream* stream() { return &stream_; }
intptr_t bytes_written() { return stream_.bytes_written(); }
void TraceStartWritingObject(const char* type,
RawObject* obj,
RawString* name);
void TraceStartWritingObject(const char* type, ObjectPtr obj, StringPtr name);
void TraceEndWritingObject();
// Writes raw data to the stream (basic type).
@ -263,7 +261,7 @@ class Serializer : public ThreadStackResource {
}
void Align(intptr_t alignment) { stream_.Align(alignment); }
void WriteRootRef(RawObject* object, const char* name = nullptr) {
void WriteRootRef(ObjectPtr object, const char* name = nullptr) {
intptr_t id = WriteRefId(object);
WriteUnsigned(id);
if (profile_writer_ != nullptr) {
@ -271,7 +269,7 @@ class Serializer : public ThreadStackResource {
}
}
void WriteElementRef(RawObject* object, intptr_t index) {
void WriteElementRef(ObjectPtr object, intptr_t index) {
intptr_t id = WriteRefId(object);
WriteUnsigned(id);
if (profile_writer_ != nullptr) {
@ -289,7 +287,7 @@ class Serializer : public ThreadStackResource {
// explicitly connected in the heap, for example an object referenced
// by the global object pool is in reality referenced by the code which
// caused this reference to be added to the global object pool.
void AttributeElementRef(RawObject* object, intptr_t index) {
void AttributeElementRef(ObjectPtr object, intptr_t index) {
intptr_t id = WriteRefId(object);
if (profile_writer_ != nullptr) {
profile_writer_->AttributeReferenceTo(
@ -300,7 +298,7 @@ class Serializer : public ThreadStackResource {
}
}
void WritePropertyRef(RawObject* object, const char* property) {
void WritePropertyRef(ObjectPtr object, const char* property) {
intptr_t id = WriteRefId(object);
WriteUnsigned(id);
if (profile_writer_ != nullptr) {
@ -312,7 +310,7 @@ class Serializer : public ThreadStackResource {
}
}
void WriteOffsetRef(RawObject* object, intptr_t offset) {
void WriteOffsetRef(ObjectPtr object, intptr_t offset) {
intptr_t id = WriteRefId(object);
WriteUnsigned(id);
if (profile_writer_ != nullptr) {
@ -335,20 +333,20 @@ class Serializer : public ThreadStackResource {
}
template <typename T, typename... P>
void WriteFromTo(T* obj, P&&... args) {
RawObject** from = obj->from();
RawObject** to = obj->to_snapshot(kind(), args...);
for (RawObject** p = from; p <= to; p++) {
WriteOffsetRef(*p, (p - reinterpret_cast<RawObject**>(obj->ptr())) *
sizeof(RawObject*));
void WriteFromTo(T obj, P&&... args) {
ObjectPtr* from = obj->ptr()->from();
ObjectPtr* to = obj->ptr()->to_snapshot(kind(), args...);
for (ObjectPtr* p = from; p <= to; p++) {
WriteOffsetRef(*p, (p - reinterpret_cast<ObjectPtr*>(obj->ptr())) *
sizeof(ObjectPtr));
}
}
template <typename T, typename... P>
void PushFromTo(T* obj, P&&... args) {
RawObject** from = obj->from();
RawObject** to = obj->to_snapshot(kind(), args...);
for (RawObject** p = from; p <= to; p++) {
void PushFromTo(T obj, P&&... args) {
ObjectPtr* from = obj->ptr()->from();
ObjectPtr* to = obj->ptr()->to_snapshot(kind(), args...);
for (ObjectPtr* p = from; p <= to; p++) {
Push(*p);
}
}
@ -358,15 +356,15 @@ class Serializer : public ThreadStackResource {
}
void WriteCid(intptr_t cid) {
COMPILE_ASSERT(RawObject::kClassIdTagSize <= 32);
COMPILE_ASSERT(ObjectLayout::kClassIdTagSize <= 32);
Write<int32_t>(cid);
}
void WriteInstructions(RawInstructions* instr,
void WriteInstructions(InstructionsPtr instr,
uint32_t unchecked_offset,
RawCode* code,
CodePtr code,
intptr_t index);
uint32_t GetDataOffset(RawObject* object) const;
uint32_t GetDataOffset(ObjectPtr object) const;
void TraceDataOffset(uint32_t offset);
intptr_t GetDataSize() const;
@ -383,9 +381,9 @@ class Serializer : public ThreadStackResource {
// Returns the reference ID for the object. Fails for objects that have not
// been allocated a reference ID yet, so should be used only after all
// WriteAlloc calls.
intptr_t WriteRefId(RawObject* object) {
intptr_t WriteRefId(ObjectPtr object) {
if (!object->IsHeapObject()) {
RawSmi* smi = Smi::RawCast(object);
SmiPtr smi = Smi::RawCast(object);
auto const id = smi_ids_.Lookup(smi)->id_;
if (IsAllocatedReference(id)) return id;
FATAL("Missing ref");
@ -423,7 +421,7 @@ class Serializer : public ThreadStackResource {
WriteStream stream_;
ImageWriter* image_writer_;
SerializationCluster** clusters_by_cid_;
GrowableArray<RawObject*> stack_;
GrowableArray<ObjectPtr> stack_;
intptr_t num_cids_;
intptr_t num_base_objects_;
intptr_t num_written_objects_;
@ -438,7 +436,7 @@ class Serializer : public ThreadStackResource {
V8SnapshotProfileWriter* profile_writer_ = nullptr;
struct ProfilingObject {
RawObject* object_ = nullptr;
ObjectPtr object_ = nullptr;
intptr_t id_ = 0;
intptr_t stream_start_ = 0;
intptr_t cid_ = -1;
@ -446,7 +444,7 @@ class Serializer : public ThreadStackResource {
OffsetsTable* offsets_table_ = nullptr;
#if defined(SNAPSHOT_BACKTRACE)
RawObject* current_parent_;
ObjectPtr current_parent_;
GrowableArray<Object*> parent_pairs_;
#endif
@ -474,8 +472,8 @@ class Serializer : public ThreadStackResource {
struct SerializerWritingObjectScope {
SerializerWritingObjectScope(Serializer* serializer,
const char* type,
RawObject* object,
RawString* name)
ObjectPtr object,
StringPtr name)
: serializer_(serializer) {
serializer_->TraceStartWritingObject(type, object, name);
}
@ -537,14 +535,14 @@ class Deserializer : public ThreadStackResource {
//
// Returns ApiError::null() on success and an ApiError with an an appropriate
// message otherwise.
RawApiError* VerifyImageAlignment();
ApiErrorPtr VerifyImageAlignment();
void ReadProgramSnapshot(ObjectStore* object_store);
void ReadVMSnapshot();
void AddVMIsolateBaseObjects();
static void InitializeHeader(RawObject* raw,
static void InitializeHeader(ObjectPtr raw,
intptr_t cid,
intptr_t size,
bool is_canonical = false);
@ -568,35 +566,35 @@ class Deserializer : public ThreadStackResource {
void Advance(intptr_t value) { stream_.Advance(value); }
void Align(intptr_t alignment) { stream_.Align(alignment); }
void AddBaseObject(RawObject* base_object) { AssignRef(base_object); }
void AddBaseObject(ObjectPtr base_object) { AssignRef(base_object); }
void AssignRef(RawObject* object) {
void AssignRef(ObjectPtr object) {
ASSERT(next_ref_index_ <= num_objects_);
refs_->ptr()->data()[next_ref_index_] = object;
next_ref_index_++;
}
RawObject* Ref(intptr_t index) const {
ObjectPtr Ref(intptr_t index) const {
ASSERT(index > 0);
ASSERT(index <= num_objects_);
return refs_->ptr()->data()[index];
}
RawObject* ReadRef() { return Ref(ReadUnsigned()); }
ObjectPtr ReadRef() { return Ref(ReadUnsigned()); }
template <typename T, typename... P>
void ReadFromTo(T* obj, P&&... params) {
RawObject** from = obj->from();
RawObject** to_snapshot = obj->to_snapshot(kind(), params...);
RawObject** to = obj->to(params...);
for (RawObject** p = from; p <= to_snapshot; p++) {
void ReadFromTo(T obj, P&&... params) {
ObjectPtr* from = obj->ptr()->from();
ObjectPtr* to_snapshot = obj->ptr()->to_snapshot(kind(), params...);
ObjectPtr* to = obj->ptr()->to(params...);
for (ObjectPtr* p = from; p <= to_snapshot; p++) {
*p = ReadRef();
}
// This is necessary because, unlike Object::Allocate, the clustered
// deserializer allocates object without null-initializing them. Instead,
// each deserialization cluster is responsible for initializing every field,
// ensuring that every field is written to exactly once.
for (RawObject** p = to_snapshot + 1; p <= to; p++) {
for (ObjectPtr* p = to_snapshot + 1; p <= to; p++) {
*p = Object::null();
}
}
@ -606,12 +604,12 @@ class Deserializer : public ThreadStackResource {
}
intptr_t ReadCid() {
COMPILE_ASSERT(RawObject::kClassIdTagSize <= 32);
COMPILE_ASSERT(ObjectLayout::kClassIdTagSize <= 32);
return Read<int32_t>();
}
void ReadInstructions(RawCode* code, intptr_t index, intptr_t start_index);
RawObject* GetObjectAt(uint32_t offset) const;
void ReadInstructions(CodePtr code, intptr_t index, intptr_t start_index);
ObjectPtr GetObjectAt(uint32_t offset) const;
void SkipHeader() { stream_.SetPosition(Snapshot::kHeaderSize); }
@ -652,7 +650,7 @@ class Deserializer : public ThreadStackResource {
intptr_t num_objects_;
intptr_t num_clusters_;
intptr_t code_order_length_ = 0;
RawArray* refs_;
ArrayPtr refs_;
intptr_t next_ref_index_;
DeserializationCluster** clusters_;
FieldTable* field_table_;
@ -723,11 +721,11 @@ class FullSnapshotReader {
Thread* thread);
~FullSnapshotReader() {}
RawApiError* ReadVMSnapshot();
RawApiError* ReadProgramSnapshot();
ApiErrorPtr ReadVMSnapshot();
ApiErrorPtr ReadProgramSnapshot();
private:
RawApiError* ConvertToApiError(char* message);
ApiErrorPtr ConvertToApiError(char* message);
Snapshot::Kind kind_;
Thread* thread_;

View file

@ -11,7 +11,7 @@
namespace dart {
void DescriptorList::AddDescriptor(RawPcDescriptors::Kind kind,
void DescriptorList::AddDescriptor(PcDescriptorsLayout::Kind kind,
intptr_t pc_offset,
intptr_t deopt_id,
TokenPosition token_pos,
@ -20,19 +20,20 @@ void DescriptorList::AddDescriptor(RawPcDescriptors::Kind kind,
// yield index 0 is reserved for normal entry.
RELEASE_ASSERT(yield_index != 0);
ASSERT((kind == RawPcDescriptors::kRuntimeCall) ||
(kind == RawPcDescriptors::kBSSRelocation) ||
(kind == RawPcDescriptors::kOther) ||
(yield_index != RawPcDescriptors::kInvalidYieldIndex) ||
ASSERT((kind == PcDescriptorsLayout::kRuntimeCall) ||
(kind == PcDescriptorsLayout::kBSSRelocation) ||
(kind == PcDescriptorsLayout::kOther) ||
(yield_index != PcDescriptorsLayout::kInvalidYieldIndex) ||
(deopt_id != DeoptId::kNone));
// When precompiling, we only use pc descriptors for exceptions,
// relocations and yield indices.
if (!FLAG_precompiled_mode || try_index != -1 ||
yield_index != RawPcDescriptors::kInvalidYieldIndex ||
kind == RawPcDescriptors::kBSSRelocation) {
yield_index != PcDescriptorsLayout::kInvalidYieldIndex ||
kind == PcDescriptorsLayout::kBSSRelocation) {
const int32_t kind_and_metadata =
RawPcDescriptors::KindAndMetadata::Encode(kind, try_index, yield_index);
PcDescriptorsLayout::KindAndMetadata::Encode(kind, try_index,
yield_index);
PcDescriptors::EncodeInteger(&encoded_data_, kind_and_metadata);
PcDescriptors::EncodeInteger(&encoded_data_, pc_offset - prev_pc_offset);
@ -48,7 +49,7 @@ void DescriptorList::AddDescriptor(RawPcDescriptors::Kind kind,
}
}
RawPcDescriptors* DescriptorList::FinalizePcDescriptors(uword entry_point) {
PcDescriptorsPtr DescriptorList::FinalizePcDescriptors(uword entry_point) {
if (encoded_data_.length() == 0) {
return Object::empty_descriptors().raw();
}
@ -82,7 +83,7 @@ void CompressedStackMapsBuilder::AddEntry(intptr_t pc_offset,
last_pc_offset_ = pc_offset;
}
RawCompressedStackMaps* CompressedStackMapsBuilder::Finalize() const {
CompressedStackMapsPtr CompressedStackMapsBuilder::Finalize() const {
if (encoded_bytes_.length() == 0) return CompressedStackMaps::null();
return CompressedStackMaps::NewInlined(encoded_bytes_);
}
@ -248,7 +249,7 @@ const char* CompressedStackMapsIterator::ToCString() const {
return ToCString(Thread::Current()->zone());
}
RawExceptionHandlers* ExceptionHandlerList::FinalizeExceptionHandlers(
ExceptionHandlersPtr ExceptionHandlerList::FinalizeExceptionHandlers(
uword entry_point) const {
intptr_t num_handlers = Length();
if (num_handlers == 0) {
@ -360,7 +361,7 @@ void CatchEntryMovesMapBuilder::EndMapping() {
}
}
RawTypedData* CatchEntryMovesMapBuilder::FinalizeCatchEntryMovesMap() {
TypedDataPtr CatchEntryMovesMapBuilder::FinalizeCatchEntryMovesMap() {
TypedData& td = TypedData::Handle(TypedData::New(
kTypedDataInt8ArrayCid, stream_.bytes_written(), Heap::kOld));
NoSafepointScope no_safepoint;
@ -518,12 +519,12 @@ void CodeSourceMapBuilder::EndCodeSourceRange(int32_t pc_offset,
BufferAdvancePC(pc_offset - buffered_pc_offset_);
}
void CodeSourceMapBuilder::NoteDescriptor(RawPcDescriptors::Kind kind,
void CodeSourceMapBuilder::NoteDescriptor(PcDescriptorsLayout::Kind kind,
int32_t pc_offset,
TokenPosition pos) {
const uint8_t kCanThrow =
RawPcDescriptors::kIcCall | RawPcDescriptors::kUnoptStaticCall |
RawPcDescriptors::kRuntimeCall | RawPcDescriptors::kOther;
PcDescriptorsLayout::kIcCall | PcDescriptorsLayout::kUnoptStaticCall |
PcDescriptorsLayout::kRuntimeCall | PcDescriptorsLayout::kOther;
if ((kind & kCanThrow) != 0) {
BufferChangePosition(pos);
BufferAdvancePC(pc_offset - buffered_pc_offset_);
@ -552,14 +553,14 @@ intptr_t CodeSourceMapBuilder::GetFunctionId(intptr_t inline_id) {
return inlined_functions_.Length() - 1;
}
RawArray* CodeSourceMapBuilder::InliningIdToFunction() {
ArrayPtr CodeSourceMapBuilder::InliningIdToFunction() {
if (inlined_functions_.Length() == 0) {
return Object::empty_array().raw();
}
return Array::MakeFixedLength(inlined_functions_);
}
RawCodeSourceMap* CodeSourceMapBuilder::Finalize() {
CodeSourceMapPtr CodeSourceMapBuilder::Finalize() {
if (!stack_traces_only_) {
FlushBuffer();
}

View file

@ -26,14 +26,14 @@ class DescriptorList : public ZoneAllocated {
~DescriptorList() {}
void AddDescriptor(RawPcDescriptors::Kind kind,
void AddDescriptor(PcDescriptorsLayout::Kind kind,
intptr_t pc_offset,
intptr_t deopt_id,
TokenPosition token_pos,
intptr_t try_index,
intptr_t yield_index);
RawPcDescriptors* FinalizePcDescriptors(uword entry_point);
PcDescriptorsPtr FinalizePcDescriptors(uword entry_point);
private:
GrowableArray<uint8_t> encoded_data_;
@ -55,7 +55,7 @@ class CompressedStackMapsBuilder : public ZoneAllocated {
BitmapBuilder* bitmap,
intptr_t spill_slot_bit_count);
RawCompressedStackMaps* Finalize() const;
CompressedStackMapsPtr Finalize() const;
private:
intptr_t last_pc_offset_ = 0;
@ -199,7 +199,7 @@ class ExceptionHandlerList : public ZoneAllocated {
return false;
}
RawExceptionHandlers* FinalizeExceptionHandlers(uword entry_point) const;
ExceptionHandlersPtr FinalizeExceptionHandlers(uword entry_point) const;
private:
GrowableArray<struct HandlerDesc> list_;
@ -215,7 +215,7 @@ class CatchEntryMovesMapBuilder : public ZoneAllocated {
void NewMapping(intptr_t pc_offset);
void Append(const CatchEntryMove& move);
void EndMapping();
RawTypedData* FinalizeCatchEntryMovesMap();
TypedDataPtr FinalizeCatchEntryMovesMap();
private:
class TrieNode;
@ -263,13 +263,13 @@ class CodeSourceMapBuilder : public ZoneAllocated {
void StartInliningInterval(int32_t pc_offset, intptr_t inline_id);
void BeginCodeSourceRange(int32_t pc_offset);
void EndCodeSourceRange(int32_t pc_offset, TokenPosition pos);
void NoteDescriptor(RawPcDescriptors::Kind kind,
void NoteDescriptor(PcDescriptorsLayout::Kind kind,
int32_t pc_offset,
TokenPosition pos);
void NoteNullCheck(int32_t pc_offset, TokenPosition pos, intptr_t name_index);
RawArray* InliningIdToFunction();
RawCodeSourceMap* Finalize();
ArrayPtr InliningIdToFunction();
CodeSourceMapPtr Finalize();
private:
intptr_t GetFunctionId(intptr_t inline_id);

View file

@ -96,7 +96,8 @@ TEST_CASE(StackMapGC) {
const PcDescriptors& descriptors =
PcDescriptors::Handle(code.pc_descriptors());
int call_count = 0;
PcDescriptors::Iterator iter(descriptors, RawPcDescriptors::kUnoptStaticCall);
PcDescriptors::Iterator iter(descriptors,
PcDescriptorsLayout::kUnoptStaticCall);
CompressedStackMapsBuilder compressed_maps_builder;
while (iter.MoveNext()) {
compressed_maps_builder.AddEntry(iter.PcOffset(), stack_bitmap, 0);
@ -142,7 +143,7 @@ ISOLATE_UNIT_TEST_CASE(DescriptorList_TokenPositions) {
sizeof(token_positions) / sizeof(token_positions[0]);
for (intptr_t i = 0; i < num_token_positions; i++) {
descriptors->AddDescriptor(RawPcDescriptors::kRuntimeCall, 0, 0,
descriptors->AddDescriptor(PcDescriptorsLayout::kRuntimeCall, 0, 0,
TokenPosition(token_positions[i]), 0, 1);
}
@ -151,7 +152,7 @@ ISOLATE_UNIT_TEST_CASE(DescriptorList_TokenPositions) {
ASSERT(!finalized_descriptors.IsNull());
PcDescriptors::Iterator it(finalized_descriptors,
RawPcDescriptors::kRuntimeCall);
PcDescriptorsLayout::kRuntimeCall);
intptr_t i = 0;
while (it.MoveNext()) {

View file

@ -14,10 +14,6 @@ namespace dart {
// Forward declaration.
class Code;
class ICData;
class RawArray;
class RawCode;
class RawFunction;
class RawObject;
#if defined(TARGET_ARCH_IA32)
// Stack-allocated class to create a scope where the specified region
@ -47,13 +43,13 @@ class CodePatcher : public AllStatic {
// Return the target address of the static call before return_address
// in given code.
static RawCode* GetStaticCallTargetAt(uword return_address, const Code& code);
static CodePtr GetStaticCallTargetAt(uword return_address, const Code& code);
// Get instance call information. Returns the call target and sets the output
// parameter data if non-NULL.
static RawCode* GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data);
static CodePtr GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data);
// Change the state of an instance call by patching the corresponding object
// pool entries (non-IA32) or instructions (IA32).
@ -69,9 +65,9 @@ class CodePatcher : public AllStatic {
// Return target of an unoptimized static call and its ICData object
// (calls target via a stub).
static RawFunction* GetUnoptimizedStaticCallAt(uword return_address,
const Code& code,
ICData* ic_data);
static FunctionPtr GetUnoptimizedStaticCallAt(uword return_address,
const Code& code,
ICData* ic_data);
static void InsertDeoptimizationCallAt(uword start);
@ -88,14 +84,14 @@ class CodePatcher : public AllStatic {
const Code& caller_code,
const Object& data,
const Code& target);
static RawObject* GetSwitchableCallDataAt(uword return_address,
const Code& caller_code);
static RawCode* GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code);
static ObjectPtr GetSwitchableCallDataAt(uword return_address,
const Code& caller_code);
static CodePtr GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code);
static RawCode* GetNativeCallAt(uword return_address,
const Code& caller_code,
NativeFunction* target);
static CodePtr GetNativeCallAt(uword return_address,
const Code& caller_code,
NativeFunction* target);
static void PatchNativeCallAt(uword return_address,
const Code& caller_code,

View file

@ -12,8 +12,8 @@
namespace dart {
RawCode* CodePatcher::GetStaticCallTargetAt(uword return_address,
const Code& code) {
CodePtr CodePatcher::GetStaticCallTargetAt(uword return_address,
const Code& code) {
ASSERT(code.ContainsInstructionAt(return_address));
CallPattern call(return_address, code);
return call.TargetCode();
@ -31,9 +31,9 @@ void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}
RawCode* CodePatcher::GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data) {
CodePtr CodePatcher::GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
ICCallPattern call(return_address, caller_code);
if (data != NULL) {
@ -65,9 +65,9 @@ void CodePatcher::PatchInstanceCallAtWithMutatorsStopped(
call.SetTargetCode(target);
}
RawFunction* CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
const Code& caller_code,
ICData* ic_data_result) {
FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
const Code& caller_code,
ICData* ic_data_result) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
ICCallPattern static_call(return_address, caller_code);
ICData& ic_data = ICData::Handle();
@ -108,8 +108,8 @@ void CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
}
}
RawCode* CodePatcher::GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code) {
CodePtr CodePatcher::GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
BareSwitchableCallPattern call(return_address, caller_code);
@ -120,8 +120,8 @@ RawCode* CodePatcher::GetSwitchableCallTargetAt(uword return_address,
}
}
RawObject* CodePatcher::GetSwitchableCallDataAt(uword return_address,
const Code& caller_code) {
ObjectPtr CodePatcher::GetSwitchableCallDataAt(uword return_address,
const Code& caller_code) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
BareSwitchableCallPattern call(return_address, caller_code);
@ -144,9 +144,9 @@ void CodePatcher::PatchNativeCallAt(uword return_address,
});
}
RawCode* CodePatcher::GetNativeCallAt(uword return_address,
const Code& code,
NativeFunction* target) {
CodePtr CodePatcher::GetNativeCallAt(uword return_address,
const Code& code,
NativeFunction* target) {
ASSERT(code.ContainsInstructionAt(return_address));
NativeCallPattern call(return_address, code);
*target = call.native_function();

View file

@ -24,8 +24,8 @@ class PoolPointerCall : public ValueObject {
intptr_t pp_index() const { return index_; }
RawCode* Target() const {
return reinterpret_cast<RawCode*>(object_pool_.ObjectAt(pp_index()));
CodePtr Target() const {
return static_cast<CodePtr>(object_pool_.ObjectAt(pp_index()));
}
void SetTarget(const Code& target) const {
@ -42,8 +42,8 @@ class PoolPointerCall : public ValueObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(PoolPointerCall);
};
RawCode* CodePatcher::GetStaticCallTargetAt(uword return_address,
const Code& code) {
CodePtr CodePatcher::GetStaticCallTargetAt(uword return_address,
const Code& code) {
ASSERT(code.ContainsInstructionAt(return_address));
PoolPointerCall call(return_address, code);
return call.Target();
@ -67,9 +67,9 @@ void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}
RawCode* CodePatcher::GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data) {
CodePtr CodePatcher::GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
ICCallPattern call(return_address, caller_code);
if (data != NULL) {
@ -101,9 +101,9 @@ void CodePatcher::PatchInstanceCallAtWithMutatorsStopped(
call.SetTargetCode(target);
}
RawFunction* CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
const Code& code,
ICData* ic_data_result) {
FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
const Code& code,
ICData* ic_data_result) {
ASSERT(code.ContainsInstructionAt(return_address));
ICCallPattern static_call(return_address, code);
ICData& ic_data = ICData::Handle();
@ -144,8 +144,8 @@ void CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
}
}
RawCode* CodePatcher::GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code) {
CodePtr CodePatcher::GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
BareSwitchableCallPattern call(return_address, caller_code);
@ -156,8 +156,8 @@ RawCode* CodePatcher::GetSwitchableCallTargetAt(uword return_address,
}
}
RawObject* CodePatcher::GetSwitchableCallDataAt(uword return_address,
const Code& caller_code) {
ObjectPtr CodePatcher::GetSwitchableCallDataAt(uword return_address,
const Code& caller_code) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
BareSwitchableCallPattern call(return_address, caller_code);
@ -180,9 +180,9 @@ void CodePatcher::PatchNativeCallAt(uword return_address,
});
}
RawCode* CodePatcher::GetNativeCallAt(uword return_address,
const Code& caller_code,
NativeFunction* target) {
CodePtr CodePatcher::GetNativeCallAt(uword return_address,
const Code& caller_code,
NativeFunction* target) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
NativeCallPattern call(return_address, caller_code);
*target = call.native_function();

View file

@ -29,8 +29,8 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
const String& function_name =
String::Handle(Symbols::New(thread, "callerFunction"));
const Function& function = Function::Handle(Function::New(
function_name, RawFunction::kRegularFunction, true, false, false, false,
false, owner_class, TokenPosition::kNoSource));
function_name, FunctionLayout::kRegularFunction, true, false, false,
false, false, owner_class, TokenPosition::kNoSource));
const String& target_name = String::Handle(String::New("targetFunction"));
const intptr_t kTypeArgsLen = 0;

View file

@ -29,8 +29,8 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
const String& function_name =
String::Handle(Symbols::New(thread, "callerFunction"));
const Function& function = Function::Handle(Function::New(
function_name, RawFunction::kRegularFunction, true, false, false, false,
false, owner_class, TokenPosition::kNoSource));
function_name, FunctionLayout::kRegularFunction, true, false, false,
false, false, owner_class, TokenPosition::kNoSource));
const String& target_name = String::Handle(String::New("targetFunction"));
const intptr_t kTypeArgsLen = 0;

View file

@ -26,8 +26,8 @@ class UnoptimizedCall : public ValueObject {
ASSERT(IsValid());
}
RawObject* ic_data() const {
return *reinterpret_cast<RawObject**>(start_ + 1);
ObjectPtr ic_data() const {
return *reinterpret_cast<ObjectPtr*>(start_ + 1);
}
static const int kMovInstructionSize = 5;
@ -89,20 +89,20 @@ class InstanceCall : public UnoptimizedCall {
#endif // DEBUG
}
RawObject* data() const { return *reinterpret_cast<RawObject**>(start_ + 1); }
ObjectPtr data() const { return *reinterpret_cast<ObjectPtr*>(start_ + 1); }
void set_data(const Object& data) const {
uword* cache_addr = reinterpret_cast<uword*>(start_ + 1);
uword imm = reinterpret_cast<uword>(data.raw());
uword imm = static_cast<uword>(data.raw());
*cache_addr = imm;
}
RawCode* target() const {
CodePtr target() const {
const uword imm = *reinterpret_cast<uword*>(start_ + 6);
return reinterpret_cast<RawCode*>(imm);
return static_cast<CodePtr>(imm);
}
void set_target(const Code& target) const {
uword* target_addr = reinterpret_cast<uword*>(start_ + 6);
uword imm = reinterpret_cast<uword>(target.raw());
uword imm = static_cast<uword>(target.raw());
*target_addr = imm;
}
@ -142,14 +142,14 @@ class StaticCall : public ValueObject {
return (code_bytes[0] == 0xBF) && (code_bytes[5] == 0xFF);
}
RawCode* target() const {
CodePtr target() const {
const uword imm = *reinterpret_cast<uword*>(start_ + 1);
return reinterpret_cast<RawCode*>(imm);
return static_cast<CodePtr>(imm);
}
void set_target(const Code& target) const {
uword* target_addr = reinterpret_cast<uword*>(start_ + 1);
uword imm = reinterpret_cast<uword>(target.raw());
uword imm = static_cast<uword>(target.raw());
*target_addr = imm;
CPU::FlushICache(start_ + 1, sizeof(imm));
}
@ -169,8 +169,8 @@ class StaticCall : public ValueObject {
DISALLOW_IMPLICIT_CONSTRUCTORS(StaticCall);
};
RawCode* CodePatcher::GetStaticCallTargetAt(uword return_address,
const Code& code) {
CodePtr CodePatcher::GetStaticCallTargetAt(uword return_address,
const Code& code) {
ASSERT(code.ContainsInstructionAt(return_address));
StaticCall call(return_address);
return call.target();
@ -194,9 +194,9 @@ void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}
RawCode* CodePatcher::GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data) {
CodePtr CodePatcher::GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
InstanceCall call(return_address);
if (data != NULL) {
@ -232,9 +232,9 @@ void CodePatcher::PatchInstanceCallAtWithMutatorsStopped(
call.set_target(target);
}
RawFunction* CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
const Code& caller_code,
ICData* ic_data_result) {
FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
const Code& caller_code,
ICData* ic_data_result) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
UnoptimizedStaticCall static_call(return_address);
ICData& ic_data = ICData::Handle();
@ -263,15 +263,15 @@ void CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
UNREACHABLE();
}
RawCode* CodePatcher::GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code) {
CodePtr CodePatcher::GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code) {
// Switchable instance calls only generated for precompilation.
UNREACHABLE();
return Code::null();
}
RawObject* CodePatcher::GetSwitchableCallDataAt(uword return_address,
const Code& caller_code) {
ObjectPtr CodePatcher::GetSwitchableCallDataAt(uword return_address,
const Code& caller_code) {
// Switchable instance calls only generated for precompilation.
UNREACHABLE();
return Object::null();
@ -284,9 +284,9 @@ void CodePatcher::PatchNativeCallAt(uword return_address,
UNREACHABLE();
}
RawCode* CodePatcher::GetNativeCallAt(uword return_address,
const Code& caller_code,
NativeFunction* target) {
CodePtr CodePatcher::GetNativeCallAt(uword return_address,
const Code& caller_code,
NativeFunction* target) {
UNREACHABLE();
return NULL;
}

View file

@ -29,8 +29,8 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
const String& function_name =
String::Handle(Symbols::New(thread, "callerFunction"));
const Function& function = Function::Handle(Function::New(
function_name, RawFunction::kRegularFunction, true, false, false, false,
false, owner_class, TokenPosition::kNoSource));
function_name, FunctionLayout::kRegularFunction, true, false, false,
false, false, owner_class, TokenPosition::kNoSource));
const String& target_name = String::Handle(String::New("targetFunction"));
const intptr_t kTypeArgsLen = 0;

View file

@ -75,7 +75,7 @@ class UnoptimizedCall : public ValueObject {
intptr_t argument_index() const { return argument_index_; }
RawCode* target() const {
CodePtr target() const {
Code& code = Code::Handle();
code ^= object_pool_.ObjectAt(code_index_);
return code.raw();
@ -128,7 +128,7 @@ class InstanceCall : public UnoptimizedCall {
#endif // DEBUG
}
RawObject* data() const { return object_pool_.ObjectAt(argument_index()); }
ObjectPtr data() const { return object_pool_.ObjectAt(argument_index()); }
void set_data(const Object& data) const {
ASSERT(data.IsArray() || data.IsICData() || data.IsMegamorphicCache());
object_pool_.SetObjectAt(argument_index(), data);
@ -149,7 +149,7 @@ class UnoptimizedStaticCall : public UnoptimizedCall {
#endif // DEBUG
}
RawObject* ic_data() const { return object_pool_.ObjectAt(argument_index()); }
ObjectPtr ic_data() const { return object_pool_.ObjectAt(argument_index()); }
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(UnoptimizedStaticCall);
@ -194,7 +194,7 @@ class PoolPointerCall : public ValueObject {
ASSERT(Object::Handle(object_pool_.ObjectAt(code_index_)).IsCode());
}
RawCode* Target() const {
CodePtr Target() const {
Code& code = Code::Handle();
code ^= object_pool_.ObjectAt(code_index_);
return code.raw();
@ -228,7 +228,7 @@ class SwitchableCallBase : public ValueObject {
intptr_t data_index() const { return data_index_; }
intptr_t target_index() const { return target_index_; }
RawObject* data() const { return object_pool_.ObjectAt(data_index()); }
ObjectPtr data() const { return object_pool_.ObjectAt(data_index()); }
void SetData(const Object& data) const {
ASSERT(!Object::Handle(object_pool_.ObjectAt(data_index())).IsCode());
@ -321,8 +321,8 @@ class SwitchableCall : public SwitchableCallBase {
// No need to flush the instruction cache, since the code is not modified.
}
RawCode* target() const {
return reinterpret_cast<RawCode*>(object_pool_.ObjectAt(target_index()));
CodePtr target() const {
return static_cast<CodePtr>(object_pool_.ObjectAt(target_index()));
}
};
@ -395,7 +395,7 @@ class BareSwitchableCall : public SwitchableCallBase {
object_pool_.SetRawValueAt(target_index(), target.MonomorphicEntryPoint());
}
RawCode* target() const {
CodePtr target() const {
const uword pc = object_pool_.RawValueAt(target_index());
auto rct = IsolateGroup::Current()->reverse_pc_lookup_cache();
if (rct->Contains(pc)) {
@ -409,8 +409,8 @@ class BareSwitchableCall : public SwitchableCallBase {
}
};
RawCode* CodePatcher::GetStaticCallTargetAt(uword return_address,
const Code& code) {
CodePtr CodePatcher::GetStaticCallTargetAt(uword return_address,
const Code& code) {
ASSERT(code.ContainsInstructionAt(return_address));
PoolPointerCall call(return_address, code);
return call.Target();
@ -430,9 +430,9 @@ void CodePatcher::PatchPoolPointerCallAt(uword return_address,
call.SetTarget(new_target);
}
RawCode* CodePatcher::GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data) {
CodePtr CodePatcher::GetInstanceCallAt(uword return_address,
const Code& caller_code,
Object* data) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
InstanceCall call(return_address, caller_code);
if (data != NULL) {
@ -468,9 +468,9 @@ void CodePatcher::InsertDeoptimizationCallAt(uword start) {
UNREACHABLE();
}
RawFunction* CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
const Code& caller_code,
ICData* ic_data_result) {
FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
const Code& caller_code,
ICData* ic_data_result) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
UnoptimizedStaticCall static_call(return_address, caller_code);
ICData& ic_data = ICData::Handle();
@ -511,8 +511,8 @@ void CodePatcher::PatchSwitchableCallAtWithMutatorsStopped(
}
}
RawCode* CodePatcher::GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code) {
CodePtr CodePatcher::GetSwitchableCallTargetAt(uword return_address,
const Code& caller_code) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
BareSwitchableCall call(return_address, caller_code);
@ -523,8 +523,8 @@ RawCode* CodePatcher::GetSwitchableCallTargetAt(uword return_address,
}
}
RawObject* CodePatcher::GetSwitchableCallDataAt(uword return_address,
const Code& caller_code) {
ObjectPtr CodePatcher::GetSwitchableCallDataAt(uword return_address,
const Code& caller_code) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
if (FLAG_precompiled_mode && FLAG_use_bare_instructions) {
BareSwitchableCall call(return_address, caller_code);
@ -547,9 +547,9 @@ void CodePatcher::PatchNativeCallAt(uword return_address,
});
}
RawCode* CodePatcher::GetNativeCallAt(uword return_address,
const Code& caller_code,
NativeFunction* target) {
CodePtr CodePatcher::GetNativeCallAt(uword return_address,
const Code& caller_code,
NativeFunction* target) {
ASSERT(caller_code.ContainsInstructionAt(return_address));
NativeCall call(return_address, caller_code);
*target = call.native_function();

View file

@ -29,8 +29,8 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
const String& function_name =
String::Handle(Symbols::New(thread, "callerFunction"));
const Function& function = Function::Handle(Function::New(
function_name, RawFunction::kRegularFunction, true, false, false, false,
false, owner_class, TokenPosition::kNoSource));
function_name, FunctionLayout::kRegularFunction, true, false, false,
false, false, owner_class, TokenPosition::kNoSource));
const String& target_name = String::Handle(String::New("targetFunction"));
const intptr_t kTypeArgsLen = 0;

View file

@ -79,8 +79,7 @@ static char* FindCharacter(char* str, char goal, char* limit) {
return NULL;
}
RawObject* CompilationTraceLoader::CompileTrace(uint8_t* buffer,
intptr_t size) {
ObjectPtr CompilationTraceLoader::CompileTrace(uint8_t* buffer, intptr_t size) {
// First compile functions named in the trace.
char* cursor = reinterpret_cast<char*>(buffer);
char* limit = cursor + size;
@ -125,7 +124,7 @@ RawObject* CompilationTraceLoader::CompileTrace(uint8_t* buffer,
arguments_descriptor = ArgumentsDescriptor::NewBoxed(kTypeArgsLen, argc);
dispatcher = closure_class.GetInvocationDispatcher(
Symbols::Call(), arguments_descriptor,
RawFunction::kInvokeFieldDispatcher, true /* create_if_absent */);
FunctionLayout::kInvokeFieldDispatcher, true /* create_if_absent */);
error_ = CompileFunction(dispatcher);
if (error_.IsError()) {
return error_.raw();
@ -160,9 +159,9 @@ RawObject* CompilationTraceLoader::CompileTrace(uint8_t* buffer,
// compile the getter, create its method extractor and compile that.
// - If looking for a getter and we only have a const field, evaluate the const
// field.
RawObject* CompilationTraceLoader::CompileTriple(const char* uri_cstr,
const char* cls_cstr,
const char* func_cstr) {
ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
const char* cls_cstr,
const char* func_cstr) {
uri_ = Symbols::New(thread_, uri_cstr);
class_name_ = Symbols::New(thread_, cls_cstr);
function_name_ = Symbols::New(thread_, func_cstr);
@ -363,7 +362,7 @@ RawObject* CompilationTraceLoader::CompileTriple(const char* uri_cstr,
return Object::null();
}
RawObject* CompilationTraceLoader::CompileFunction(const Function& function) {
ObjectPtr CompilationTraceLoader::CompileFunction(const Function& function) {
if (function.is_abstract() || function.HasCode()) {
return Object::null();
}
@ -612,7 +611,7 @@ TypeFeedbackLoader::~TypeFeedbackLoader() {
delete[] cid_map_;
}
RawObject* TypeFeedbackLoader::LoadFeedback(ReadStream* stream) {
ObjectPtr TypeFeedbackLoader::LoadFeedback(ReadStream* stream) {
stream_ = stream;
error_ = CheckHeader();
@ -656,7 +655,7 @@ RawObject* TypeFeedbackLoader::LoadFeedback(ReadStream* stream) {
return Error::null();
}
RawObject* TypeFeedbackLoader::CheckHeader() {
ObjectPtr TypeFeedbackLoader::CheckHeader() {
const char* expected_version = Version::SnapshotString();
ASSERT(expected_version != NULL);
const intptr_t version_len = strlen(expected_version);
@ -710,7 +709,7 @@ RawObject* TypeFeedbackLoader::CheckHeader() {
return Error::null();
}
RawObject* TypeFeedbackLoader::LoadClasses() {
ObjectPtr TypeFeedbackLoader::LoadClasses() {
num_cids_ = ReadInt();
cid_map_ = new intptr_t[num_cids_];
@ -731,7 +730,7 @@ RawObject* TypeFeedbackLoader::LoadClasses() {
return Error::null();
}
RawObject* TypeFeedbackLoader::LoadFields() {
ObjectPtr TypeFeedbackLoader::LoadFields() {
for (intptr_t cid = kNumPredefinedCids; cid < num_cids_; cid++) {
cls_ = ReadClassByName();
bool skip = cls_.IsNull();
@ -799,7 +798,7 @@ RawObject* TypeFeedbackLoader::LoadFields() {
return Error::null();
}
RawObject* TypeFeedbackLoader::LoadFunction() {
ObjectPtr TypeFeedbackLoader::LoadFunction() {
bool skip = false;
cls_ = ReadClassByName();
@ -813,7 +812,7 @@ RawObject* TypeFeedbackLoader::LoadFunction() {
}
func_name_ = ReadString(); // Without private mangling.
RawFunction::Kind kind = static_cast<RawFunction::Kind>(ReadInt());
FunctionLayout::Kind kind = static_cast<FunctionLayout::Kind>(ReadInt());
intptr_t token_pos = ReadInt();
intptr_t usage = ReadInt();
intptr_t inlining_depth = ReadInt();
@ -923,8 +922,8 @@ RawObject* TypeFeedbackLoader::LoadFunction() {
return Error::null();
}
RawFunction* TypeFeedbackLoader::FindFunction(RawFunction::Kind kind,
intptr_t token_pos) {
FunctionPtr TypeFeedbackLoader::FindFunction(FunctionLayout::Kind kind,
intptr_t token_pos) {
if (cls_name_.Equals(Symbols::TopLevel())) {
func_ = lib_.LookupFunctionAllowPrivate(func_name_);
} else {
@ -933,7 +932,7 @@ RawFunction* TypeFeedbackLoader::FindFunction(RawFunction::Kind kind,
if (!func_.IsNull()) {
// Found regular method.
} else if (kind == RawFunction::kMethodExtractor) {
} else if (kind == FunctionLayout::kMethodExtractor) {
ASSERT(Field::IsGetterName(func_name_));
// Without private mangling:
String& name = String::Handle(zone_, Field::NameFromGetter(func_name_));
@ -945,7 +944,7 @@ RawFunction* TypeFeedbackLoader::FindFunction(RawFunction::Kind kind,
} else {
func_ = Function::null();
}
} else if (kind == RawFunction::kDynamicInvocationForwarder) {
} else if (kind == FunctionLayout::kDynamicInvocationForwarder) {
// Without private mangling:
String& name = String::Handle(
zone_, Function::DemangleDynamicInvocationForwarderName(func_name_));
@ -957,7 +956,7 @@ RawFunction* TypeFeedbackLoader::FindFunction(RawFunction::Kind kind,
} else {
func_ = Function::null();
}
} else if (kind == RawFunction::kClosureFunction) {
} else if (kind == FunctionLayout::kClosureFunction) {
// Note this lookup relies on parent functions appearing before child
// functions in the serialized feedback, so the parent will have already
// been unoptimized compilated and the child function created and added to
@ -984,7 +983,7 @@ RawFunction* TypeFeedbackLoader::FindFunction(RawFunction::Kind kind,
}
if (!func_.IsNull()) {
if (kind == RawFunction::kImplicitClosureFunction) {
if (kind == FunctionLayout::kImplicitClosureFunction) {
func_ = func_.ImplicitClosureFunction();
}
if (func_.is_abstract() || (func_.kind() != kind)) {
@ -995,7 +994,7 @@ RawFunction* TypeFeedbackLoader::FindFunction(RawFunction::Kind kind,
return func_.raw();
}
RawClass* TypeFeedbackLoader::ReadClassByName() {
ClassPtr TypeFeedbackLoader::ReadClassByName() {
uri_ = ReadString();
cls_name_ = ReadString();
@ -1021,7 +1020,7 @@ RawClass* TypeFeedbackLoader::ReadClassByName() {
return cls_.raw();
}
RawString* TypeFeedbackLoader::ReadString() {
StringPtr TypeFeedbackLoader::ReadString() {
intptr_t len = stream_->ReadUnsigned();
const char* cstr =
reinterpret_cast<const char*>(stream_->AddressOfCurrentPosition());

View file

@ -35,13 +35,13 @@ class CompilationTraceLoader : public ValueObject {
public:
explicit CompilationTraceLoader(Thread* thread);
RawObject* CompileTrace(uint8_t* buffer, intptr_t buffer_length);
ObjectPtr CompileTrace(uint8_t* buffer, intptr_t buffer_length);
private:
RawObject* CompileTriple(const char* uri_cstr,
const char* cls_cstr,
const char* func_cstr);
RawObject* CompileFunction(const Function& function);
ObjectPtr CompileTriple(const char* uri_cstr,
const char* cls_cstr,
const char* func_cstr);
ObjectPtr CompileFunction(const Function& function);
void SpeculateInstanceCallTargets(const Function& function);
Thread* thread_;
@ -95,17 +95,17 @@ class TypeFeedbackLoader : public ValueObject {
explicit TypeFeedbackLoader(Thread* thread);
~TypeFeedbackLoader();
RawObject* LoadFeedback(ReadStream* stream);
ObjectPtr LoadFeedback(ReadStream* stream);
private:
RawObject* CheckHeader();
RawObject* LoadClasses();
RawObject* LoadFields();
RawObject* LoadFunction();
RawFunction* FindFunction(RawFunction::Kind kind, intptr_t token_pos);
ObjectPtr CheckHeader();
ObjectPtr LoadClasses();
ObjectPtr LoadFields();
ObjectPtr LoadFunction();
FunctionPtr FindFunction(FunctionLayout::Kind kind, intptr_t token_pos);
RawClass* ReadClassByName();
RawString* ReadString();
ClassPtr ReadClassByName();
StringPtr ReadString();
intptr_t ReadInt() { return stream_->Read<int32_t>(); }
Thread* thread_;

View file

@ -851,7 +851,7 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
if (has_one_target) {
const Function& target = targets.FirstTarget();
RawFunction::Kind function_kind = target.kind();
FunctionLayout::Kind function_kind = target.kind();
if (flow_graph()->CheckForInstanceCall(instr, function_kind) ==
FlowGraph::ToCheck::kNoCheck) {
StaticCallInstr* call = StaticCallInstr::FromCall(

View file

@ -657,7 +657,7 @@ void DispatchTableGenerator::ComputeSelectorOffsets() {
table_size_ = fitter.TableSize();
}
RawArray* DispatchTableGenerator::BuildCodeArray() {
ArrayPtr DispatchTableGenerator::BuildCodeArray() {
auto& entries = Array::Handle(zone_, Array::New(table_size_, Heap::kOld));
for (intptr_t i = 0; i < table_rows_.length(); i++) {
table_rows_[i]->FillTable(classes_, entries);

View file

@ -93,7 +93,7 @@ class DispatchTableGenerator {
// Build up an array of Code objects, used to serialize the information
// deserialized as a DispatchTable at runtime.
RawArray* BuildCodeArray();
ArrayPtr BuildCodeArray();
private:
void ReadTableSelectorInfo();

View file

@ -134,7 +134,7 @@ static void Jump(const Error& error) {
Thread::Current()->long_jump_base()->Jump(1, error);
}
RawError* Precompiler::CompileAll() {
ErrorPtr Precompiler::CompileAll() {
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {
Precompiler precompiler(Thread::Current());
@ -608,7 +608,7 @@ void Precompiler::CollectCallbackFields() {
if (subcls.is_allocated()) {
// Add dispatcher to cls.
dispatcher = subcls.GetInvocationDispatcher(
field_name, args_desc, RawFunction::kInvokeFieldDispatcher,
field_name, args_desc, FunctionLayout::kInvokeFieldDispatcher,
/* create_if_absent = */ true);
if (FLAG_trace_precompiler) {
THR_Print("Added invoke-field-dispatcher for %s to %s\n",
@ -806,7 +806,7 @@ void Precompiler::AddTypesOf(const Function& function) {
}
Code& code = Code::Handle(Z, function.CurrentCode());
if (code.IsNull()) {
ASSERT(function.kind() == RawFunction::kSignatureFunction);
ASSERT(function.kind() == FunctionLayout::kSignatureFunction);
} else {
const ExceptionHandlers& handlers =
ExceptionHandlers::Handle(Z, code.exception_handlers());
@ -940,8 +940,8 @@ void Precompiler::AddConstObject(const class Instance& instance) {
precompiler_(precompiler),
subinstance_(Object::Handle()) {}
virtual void VisitPointers(RawObject** first, RawObject** last) {
for (RawObject** current = first; current <= last; current++) {
virtual void VisitPointers(ObjectPtr* first, ObjectPtr* last) {
for (ObjectPtr* current = first; current <= last; current++) {
subinstance_ = *current;
if (subinstance_.IsInstance()) {
precompiler_->AddConstObject(Instance::Cast(subinstance_));
@ -956,16 +956,17 @@ void Precompiler::AddConstObject(const class Instance& instance) {
};
ConstObjectVisitor visitor(this, I);
instance.raw()->VisitPointers(&visitor);
instance.raw()->ptr()->VisitPointers(&visitor);
}
void Precompiler::AddClosureCall(const Array& arguments_descriptor) {
const Class& cache_class =
Class::Handle(Z, I->object_store()->closure_class());
const Function& dispatcher = Function::Handle(
Z, cache_class.GetInvocationDispatcher(
Symbols::Call(), arguments_descriptor,
RawFunction::kInvokeFieldDispatcher, true /* create_if_absent */));
const Function& dispatcher =
Function::Handle(Z, cache_class.GetInvocationDispatcher(
Symbols::Call(), arguments_descriptor,
FunctionLayout::kInvokeFieldDispatcher,
true /* create_if_absent */));
AddFunction(dispatcher);
}
@ -1181,7 +1182,7 @@ void Precompiler::AddAnnotatedRoots() {
if ((type == EntryPointPragma::kAlways ||
type == EntryPointPragma::kGetterOnly) &&
function.kind() != RawFunction::kConstructor &&
function.kind() != FunctionLayout::kConstructor &&
!function.IsSetterFunction()) {
function2 = function.ImplicitClosureFunction();
AddFunction(function2);
@ -1191,7 +1192,7 @@ void Precompiler::AddAnnotatedRoots() {
AddInstantiatedClass(cls);
}
}
if (function.kind() == RawFunction::kImplicitGetter &&
if (function.kind() == FunctionLayout::kImplicitGetter &&
!implicit_getters.IsNull()) {
for (intptr_t i = 0; i < implicit_getters.Length(); ++i) {
field ^= implicit_getters.At(i);
@ -1200,7 +1201,7 @@ void Precompiler::AddAnnotatedRoots() {
}
}
}
if (function.kind() == RawFunction::kImplicitSetter &&
if (function.kind() == FunctionLayout::kImplicitSetter &&
!implicit_setters.IsNull()) {
for (intptr_t i = 0; i < implicit_setters.Length(); ++i) {
field ^= implicit_setters.At(i);
@ -1209,7 +1210,7 @@ void Precompiler::AddAnnotatedRoots() {
}
}
}
if (function.kind() == RawFunction::kImplicitStaticGetter &&
if (function.kind() == FunctionLayout::kImplicitStaticGetter &&
!implicit_static_getters.IsNull()) {
for (intptr_t i = 0; i < implicit_static_getters.Length(); ++i) {
field ^= implicit_static_getters.At(i);
@ -1281,7 +1282,7 @@ void Precompiler::CheckForNewDynamicFunctions() {
if (IsSent(selector3)) {
AddFunction(function);
}
} else if (function.kind() == RawFunction::kRegularFunction) {
} else if (function.kind() == FunctionLayout::kRegularFunction) {
selector2 = Field::LookupGetterSymbol(selector);
if (IsSent(selector2)) {
metadata = kernel::ProcedureAttributesOf(function, Z);
@ -1300,12 +1301,12 @@ void Precompiler::CheckForNewDynamicFunctions() {
}
}
if (function.kind() == RawFunction::kImplicitSetter ||
function.kind() == RawFunction::kSetterFunction ||
function.kind() == RawFunction::kRegularFunction) {
if (function.kind() == FunctionLayout::kImplicitSetter ||
function.kind() == FunctionLayout::kSetterFunction ||
function.kind() == FunctionLayout::kRegularFunction) {
selector2 = Function::CreateDynamicInvocationForwarderName(selector);
if (IsSent(selector2)) {
if (function.kind() == RawFunction::kImplicitSetter) {
if (function.kind() == FunctionLayout::kImplicitSetter) {
field = function.accessor_field();
metadata = kernel::ProcedureAttributesOf(field, Z);
} else if (!found_metadata) {
@ -1333,7 +1334,7 @@ class NameFunctionsTraits {
String::Cast(a).Equals(String::Cast(b));
}
static uword Hash(const Object& obj) { return String::Cast(obj).Hash(); }
static RawObject* NewKey(const String& str) { return str.raw(); }
static ObjectPtr NewKey(const String& str) { return str.raw(); }
};
typedef UnorderedHashMap<NameFunctionsTraits> Table;
@ -1710,7 +1711,7 @@ void Precompiler::AttachOptimizedTypeTestingStub() {
GrowableHandlePtrArray<const AbstractType>* types)
: type_(AbstractType::Handle(zone)), types_(types) {}
void VisitObject(RawObject* obj) {
void VisitObject(ObjectPtr obj) {
if (obj->GetClassId() == kTypeCid || obj->GetClassId() == kTypeRefCid) {
type_ ^= obj;
types_->Add(type_);
@ -2157,7 +2158,7 @@ struct CodeKeyTraits {
typedef UnorderedHashSet<CodeKeyTraits> CodeSet;
#if defined(DEBUG)
RawFunction* Precompiler::FindUnvisitedRetainedFunction() {
FunctionPtr Precompiler::FindUnvisitedRetainedFunction() {
class CodeChecker : public CodeVisitor {
public:
CodeChecker()
@ -2200,7 +2201,7 @@ void Precompiler::Obfuscate() {
GrowableHandlePtrArray<const Script>* scripts)
: script_(Script::Handle(zone)), scripts_(scripts) {}
void VisitObject(RawObject* obj) {
void VisitObject(ObjectPtr obj) {
if (obj->GetClassId() == kScriptCid) {
script_ ^= obj;
scripts_->Add(Script::Cast(script_));
@ -2563,10 +2564,10 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
return is_compiled;
}
static RawError* PrecompileFunctionHelper(Precompiler* precompiler,
CompilationPipeline* pipeline,
const Function& function,
bool optimized) {
static ErrorPtr PrecompileFunctionHelper(Precompiler* precompiler,
CompilationPipeline* pipeline,
const Function& function,
bool optimized) {
// Check that we optimize, except if the function is not optimizable.
ASSERT(CompilerState::Current().is_aot());
ASSERT(!function.IsOptimizable() || optimized);
@ -2638,10 +2639,10 @@ static RawError* PrecompileFunctionHelper(Precompiler* precompiler,
return Error::null();
}
RawError* Precompiler::CompileFunction(Precompiler* precompiler,
Thread* thread,
Zone* zone,
const Function& function) {
ErrorPtr Precompiler::CompileFunction(Precompiler* precompiler,
Thread* thread,
Zone* zone,
const Function& function) {
VMTagScope tagScope(thread, VMTag::kCompileUnoptimizedTagId);
TIMELINE_FUNCTION_COMPILATION_DURATION(thread, "CompileFunction", function);
@ -2780,8 +2781,8 @@ void Obfuscator::InitializeRenamingMap(Isolate* isolate) {
PreventRenaming("_NamespaceImpl");
}
RawString* Obfuscator::ObfuscationState::RenameImpl(const String& name,
bool atomic) {
StringPtr Obfuscator::ObfuscationState::RenameImpl(const String& name,
bool atomic) {
ASSERT(name.IsSymbol());
renamed_ ^= renames_.GetOrNull(name);
@ -2857,7 +2858,7 @@ void Obfuscator::ObfuscationState::NextName() {
}
}
RawString* Obfuscator::ObfuscationState::NewAtomicRename(
StringPtr Obfuscator::ObfuscationState::NewAtomicRename(
bool should_be_private) {
do {
NextName();
@ -2869,8 +2870,8 @@ RawString* Obfuscator::ObfuscationState::NewAtomicRename(
return renamed_.raw();
}
RawString* Obfuscator::ObfuscationState::BuildRename(const String& name,
bool atomic) {
StringPtr Obfuscator::ObfuscationState::BuildRename(const String& name,
bool atomic) {
if (atomic) {
return NewAtomicRename(name.CharAt(0) == '_');
}

View file

@ -25,7 +25,6 @@ class Error;
class Field;
class Function;
class GrowableObjectArray;
class RawError;
class SequenceNode;
class String;
class ParsedJSONObject;
@ -192,12 +191,12 @@ typedef DirectChainedHashMap<InstanceKeyValueTrait> InstanceSet;
class Precompiler : public ValueObject {
public:
static RawError* CompileAll();
static ErrorPtr CompileAll();
static RawError* CompileFunction(Precompiler* precompiler,
Thread* thread,
Zone* zone,
const Function& function);
static ErrorPtr CompileFunction(Precompiler* precompiler,
Thread* thread,
Zone* zone,
const Function& function);
// Returns true if get:runtimeType is not overloaded by any class.
bool get_runtime_type_is_unique() const {
@ -269,7 +268,7 @@ class Precompiler : public ValueObject {
void DropClasses();
void DropLibraries();
DEBUG_ONLY(RawFunction* FindUnvisitedRetainedFunction());
DEBUG_ONLY(FunctionPtr FindUnvisitedRetainedFunction());
void Obfuscate();
@ -348,7 +347,7 @@ class FunctionsTraits {
return String::Cast(obj).Hash();
}
}
static RawObject* NewKey(const Function& function) { return function.raw(); }
static ObjectPtr NewKey(const Function& function) { return function.raw(); }
};
typedef UnorderedHashSet<FunctionsTraits> UniqueFunctionsSet;
@ -406,7 +405,7 @@ class Obfuscator : public ValueObject {
//
// This method is guaranteed to return the same value for the same
// input and it always preserves leading '_' even for atomic renames.
RawString* Rename(const String& name, bool atomic = false) {
StringPtr Rename(const String& name, bool atomic = false) {
if (state_ == NULL) {
return name.raw();
}
@ -444,13 +443,13 @@ class Obfuscator : public ValueObject {
static const intptr_t kSavedStateRenamesIndex = 1;
static const intptr_t kSavedStateSize = 2;
static RawArray* GetRenamesFromSavedState(const Array& saved_state) {
static ArrayPtr GetRenamesFromSavedState(const Array& saved_state) {
Array& renames = Array::Handle();
renames ^= saved_state.At(kSavedStateRenamesIndex);
return renames.raw();
}
static RawString* GetNameFromSavedState(const Array& saved_state) {
static StringPtr GetNameFromSavedState(const Array& saved_state) {
String& name = String::Handle();
name ^= saved_state.At(kSavedStateNameIndex);
return name.raw();
@ -494,7 +493,7 @@ class Obfuscator : public ValueObject {
//
// This method is guaranteed to return the same value for the same
// input.
RawString* RenameImpl(const String& name, bool atomic);
StringPtr RenameImpl(const String& name, bool atomic);
// Register an identity (name -> name) mapping in the renaming map.
//
@ -511,11 +510,11 @@ class Obfuscator : public ValueObject {
// For non-atomic renames BuildRename ensures that private mangled
// identifiers (_ident@key) are renamed consistently with non-mangled
// counterparts (_ident).
RawString* BuildRename(const String& name, bool atomic);
StringPtr BuildRename(const String& name, bool atomic);
// Generate a new rename. If |should_be_private| is set to true
// then we prefix returned identifier with '_'.
RawString* NewAtomicRename(bool should_be_private);
StringPtr NewAtomicRename(bool should_be_private);
// Update next_ to generate the next free rename.
void NextName();
@ -549,7 +548,7 @@ class Obfuscator {
Obfuscator(Thread* thread, const String& private_key) {}
~Obfuscator() {}
RawString* Rename(const String& name, bool atomic = false) {
StringPtr Rename(const String& name, bool atomic = false) {
return name.raw();
}

View file

@ -127,10 +127,10 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* R1: new object end address. */ \
/* R2: allocation size. */ \
{ \
__ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag); \
__ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag); \
__ mov(R3, \
Operand(R2, LSL, \
target::RawObject::kTagBitsSizeTagPos - \
target::ObjectLayout::kTagBitsSizeTagPos - \
target::ObjectAlignment::kObjectAlignmentLog2), \
LS); \
__ mov(R3, Operand(0), HI); \
@ -2060,10 +2060,10 @@ static void TryAllocateOneByteString(Assembler* assembler,
// R1: new object end address.
// R2: allocation size.
{
const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag);
__ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag);
__ mov(R3, Operand(R2, LSL, shift), LS);
__ mov(R3, Operand(0), HI);

View file

@ -149,9 +149,9 @@ static int GetScaleFactor(intptr_t size) {
/* R1: new object end address. */ \
/* R2: allocation size. */ \
{ \
__ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag); \
__ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag); \
__ LslImmediate(R2, R2, \
target::RawObject::kTagBitsSizeTagPos - \
target::ObjectLayout::kTagBitsSizeTagPos - \
target::ObjectAlignment::kObjectAlignmentLog2); \
__ csel(R2, ZR, R2, HI); \
\
@ -2131,10 +2131,10 @@ static void TryAllocateOneByteString(Assembler* assembler,
// R1: new object end address.
// R2: allocation size.
{
const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R2, target::RawObject::kSizeTagMaxSizeTag);
__ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag);
__ LslImmediate(R2, R2, shift);
__ csel(R2, R2, ZR, LS);

View file

@ -135,9 +135,9 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* EDI: allocation size. */ \
{ \
Label size_tag_overflow, done; \
__ cmpl(EDI, Immediate(target::RawObject::kSizeTagMaxSizeTag)); \
__ cmpl(EDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag)); \
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \
__ shll(EDI, Immediate(target::RawObject::kTagBitsSizeTagPos - \
__ shll(EDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos - \
target::ObjectAlignment::kObjectAlignmentLog2)); \
__ jmp(&done, Assembler::kNearJump); \
\
@ -2075,9 +2075,9 @@ static void TryAllocateOneByteString(Assembler* assembler,
// EDI: allocation size.
{
Label size_tag_overflow, done;
__ cmpl(EDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ cmpl(EDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shll(EDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
__ shll(EDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);

View file

@ -137,9 +137,9 @@ void AsmIntrinsifier::GrowableArray_Allocate(Assembler* assembler,
/* R13: scratch register. */ \
{ \
Label size_tag_overflow, done; \
__ cmpq(RDI, Immediate(target::RawObject::kSizeTagMaxSizeTag)); \
__ cmpq(RDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag)); \
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump); \
__ shlq(RDI, Immediate(target::RawObject::kTagBitsSizeTagPos - \
__ shlq(RDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos - \
target::ObjectAlignment::kObjectAlignmentLog2)); \
__ jmp(&done, Assembler::kNearJump); \
\
@ -2105,9 +2105,9 @@ static void TryAllocateOneByteString(Assembler* assembler,
// RDI: allocation size.
{
Label size_tag_overflow, done;
__ cmpq(RDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ cmpq(RDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shlq(RDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
__ shlq(RDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);

View file

@ -1741,7 +1741,7 @@ void Assembler::StoreIntoObject(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare RawObject::StorePointer.
// Compare ObjectLayout::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
@ -1749,7 +1749,7 @@ void Assembler::StoreIntoObject(Register object,
if (!lr_reserved) Push(LR);
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
and_(TMP, LR, Operand(TMP, LSR, target::RawObject::kBarrierOverlapShift));
and_(TMP, LR, Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift));
ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
tst(TMP, Operand(LR));
if (value != kWriteBarrierValueReg) {
@ -1805,7 +1805,7 @@ void Assembler::StoreIntoArray(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare RawObject::StorePointer.
// Compare ObjectLayout::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
@ -1813,7 +1813,7 @@ void Assembler::StoreIntoArray(Register object,
if (!lr_reserved) Push(LR);
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
and_(TMP, LR, Operand(TMP, LSR, target::RawObject::kBarrierOverlapShift));
and_(TMP, LR, Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift));
ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
tst(TMP, Operand(LR));
@ -1853,7 +1853,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
tst(TMP, Operand(1 << target::RawObject::kOldAndNotRememberedBit));
tst(TMP, Operand(1 << target::ObjectLayout::kOldAndNotRememberedBit));
b(&done, ZERO);
Stop("Store buffer update is required");
@ -1974,30 +1974,30 @@ void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(classid_t) == sizeof(uint16_t));
Lsr(result, tags, Operand(target::RawObject::kClassIdTagPos), AL);
Lsr(result, tags, Operand(target::ObjectLayout::kClassIdTagPos), AL);
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::RawObject::kSizeTagPos == 8);
ASSERT(target::RawObject::kSizeTagSize == 8);
ASSERT(target::ObjectLayout::kSizeTagPos == 8);
ASSERT(target::ObjectLayout::kSizeTagSize == 8);
Lsr(result, tags,
Operand(target::RawObject::kSizeTagPos -
Operand(target::ObjectLayout::kSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2),
AL);
AndImmediate(result, result,
(Utils::NBitMask(target::RawObject::kSizeTagSize)
(Utils::NBitMask(target::ObjectLayout::kSizeTagSize)
<< target::ObjectAlignment::kObjectAlignmentLog2));
}
void Assembler::LoadClassId(Register result, Register object, Condition cond) {
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
ldrh(result, FieldAddress(object, class_id_offset), cond);
}

View file

@ -1198,8 +1198,8 @@ class Assembler : public AssemblerBase {
// before the code can be used.
//
// The neccessary information for the "linker" (i.e. the relocation
// information) is stored in [RawCode::static_calls_target_table_]: an entry
// of the form
// information) is stored in [CodeLayout::static_calls_target_table_]: an
// entry of the form
//
// (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
//

View file

@ -982,14 +982,15 @@ void Assembler::StoreIntoObject(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare RawObject::StorePointer.
// Compare ObjectLayout::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
}
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
and_(TMP, TMP2, Operand(TMP, LSR, target::RawObject::kBarrierOverlapShift));
and_(TMP, TMP2,
Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift));
tst(TMP, Operand(BARRIER_MASK));
b(&done, ZERO);
@ -1043,14 +1044,15 @@ void Assembler::StoreIntoArray(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare RawObject::StorePointer.
// Compare ObjectLayout::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
}
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
ldr(TMP2, FieldAddress(value, target::Object::tags_offset()), kUnsignedByte);
and_(TMP, TMP2, Operand(TMP, LSR, target::RawObject::kBarrierOverlapShift));
and_(TMP, TMP2,
Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift));
tst(TMP, Operand(BARRIER_MASK));
b(&done, ZERO);
if (!lr_reserved) Push(LR);
@ -1076,7 +1078,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
ldr(TMP, FieldAddress(object, target::Object::tags_offset()), kUnsignedByte);
tsti(TMP, Immediate(1 << target::RawObject::kOldAndNotRememberedBit));
tsti(TMP, Immediate(1 << target::ObjectLayout::kOldAndNotRememberedBit));
b(&done, ZERO);
Stop("Store buffer update is required");
@ -1128,26 +1130,26 @@ void Assembler::StoreInternalPointer(Register object,
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(classid_t) == sizeof(uint16_t));
LsrImmediate(result, tags, target::RawObject::kClassIdTagPos, kWord);
LsrImmediate(result, tags, target::ObjectLayout::kClassIdTagPos, kWord);
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::RawObject::kSizeTagPos == 8);
ASSERT(target::RawObject::kSizeTagSize == 8);
ubfx(result, tags, target::RawObject::kSizeTagPos,
target::RawObject::kSizeTagSize);
ASSERT(target::ObjectLayout::kSizeTagPos == 8);
ASSERT(target::ObjectLayout::kSizeTagSize == 8);
ubfx(result, tags, target::ObjectLayout::kSizeTagPos,
target::ObjectLayout::kSizeTagSize);
LslImmediate(result, result, target::ObjectAlignment::kObjectAlignmentLog2);
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
kUnsignedHalfword);
}

View file

@ -1655,8 +1655,8 @@ class Assembler : public AssemblerBase {
// the code can be used.
//
// The neccessary information for the "linker" (i.e. the relocation
// information) is stored in [RawCode::static_calls_target_table_]: an entry
// of the form
// information) is stored in [CodeLayout::static_calls_target_table_]: an
// entry of the form
//
// (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
//

View file

@ -2551,7 +2551,7 @@ ASSEMBLER_TEST_GENERATE(LoadObjectNull, assembler) {
}
ASSEMBLER_TEST_RUN(LoadObjectNull, test) {
EXPECT_EQ(Object::null(), test->InvokeWithCodeAndThread<RawObject*>());
EXPECT_EQ(Object::null(), test->InvokeWithCodeAndThread<ObjectPtr>());
}
// PushObject null.
@ -2566,7 +2566,7 @@ ASSEMBLER_TEST_GENERATE(PushObjectNull, assembler) {
}
ASSEMBLER_TEST_RUN(PushObjectNull, test) {
EXPECT_EQ(Object::null(), test->InvokeWithCodeAndThread<RawObject*>());
EXPECT_EQ(Object::null(), test->InvokeWithCodeAndThread<ObjectPtr>());
}
// CompareObject null.
@ -2584,7 +2584,7 @@ ASSEMBLER_TEST_GENERATE(CompareObjectNull, assembler) {
}
ASSEMBLER_TEST_RUN(CompareObjectNull, test) {
EXPECT_EQ(Bool::True().raw(), test->InvokeWithCodeAndThread<RawObject*>());
EXPECT_EQ(Bool::True().raw(), test->InvokeWithCodeAndThread<ObjectPtr>());
}
ASSEMBLER_TEST_GENERATE(LoadObjectTrue, assembler) {
@ -2597,7 +2597,7 @@ ASSEMBLER_TEST_GENERATE(LoadObjectTrue, assembler) {
}
ASSEMBLER_TEST_RUN(LoadObjectTrue, test) {
EXPECT_EQ(Bool::True().raw(), test->InvokeWithCodeAndThread<RawObject*>());
EXPECT_EQ(Bool::True().raw(), test->InvokeWithCodeAndThread<ObjectPtr>());
}
ASSEMBLER_TEST_GENERATE(LoadObjectFalse, assembler) {
@ -2610,7 +2610,7 @@ ASSEMBLER_TEST_GENERATE(LoadObjectFalse, assembler) {
}
ASSEMBLER_TEST_RUN(LoadObjectFalse, test) {
EXPECT_EQ(Bool::False().raw(), test->InvokeWithCodeAndThread<RawObject*>());
EXPECT_EQ(Bool::False().raw(), test->InvokeWithCodeAndThread<ObjectPtr>());
}
ASSEMBLER_TEST_GENERATE(CSelTrue, assembler) {

View file

@ -1934,7 +1934,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
testb(FieldAddress(object, target::Object::tags_offset()),
Immediate(1 << target::RawObject::kOldAndNotRememberedBit));
Immediate(1 << target::ObjectLayout::kOldAndNotRememberedBit));
j(ZERO, &done, Assembler::kNearJump);
Stop("Store buffer update is required");
@ -2622,11 +2622,11 @@ void Assembler::EmitGenericShift(int rm,
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
}
@ -2652,11 +2652,11 @@ void Assembler::SmiUntagOrCheckClass(Register object,
Register scratch,
Label* is_smi) {
ASSERT(kSmiTagShift == 1);
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
@ -2682,8 +2682,8 @@ void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
Bind(&join);
} else {
ASSERT(result != object);
static const intptr_t kSmiCidSource = kSmiCid
<< target::RawObject::kClassIdTagPos;
static const intptr_t kSmiCidSource =
kSmiCid << target::ObjectLayout::kClassIdTagPos;
// Make a dummy "Object" whose cid is kSmiCid.
movl(result, Immediate(reinterpret_cast<int32_t>(&kSmiCidSource) + 1));

View file

@ -17,8 +17,8 @@ ASSEMBLER_TEST_EXTERN(StoreIntoObject);
ASSEMBLER_TEST_RUN(StoreIntoObject, test) {
#define TEST_CODE(value, growable_array, thread) \
test->Invoke<void, RawObject*, RawObject*, Thread*>(value, growable_array, \
thread)
test->Invoke<void, ObjectPtr, ObjectPtr, Thread*>(value, growable_array, \
thread)
const Array& old_array = Array::Handle(Array::New(3, Heap::kOld));
const Array& new_array = Array::Handle(Array::New(3, Heap::kNew));
@ -38,7 +38,7 @@ ASSEMBLER_TEST_RUN(StoreIntoObject, test) {
for (int i = -128; i < 128; i++) {
smi = Smi::New(i);
TEST_CODE(smi.raw(), grow_old_array.raw(), thread);
EXPECT(reinterpret_cast<RawArray*>(smi.raw()) == grow_old_array.data());
EXPECT(static_cast<ArrayPtr>(smi.raw()) == grow_old_array.data());
EXPECT(!thread->StoreBufferContains(grow_old_array.raw()));
}

View file

@ -1374,14 +1374,14 @@ void Assembler::StoreIntoObject(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare RawObject::StorePointer.
// Compare ObjectLayout::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
testq(value, Immediate(kSmiTagMask));
j(ZERO, &done, kNearJump);
}
movb(TMP, FieldAddress(object, target::Object::tags_offset()));
shrl(TMP, Immediate(target::RawObject::kBarrierOverlapShift));
shrl(TMP, Immediate(target::ObjectLayout::kBarrierOverlapShift));
andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
j(ZERO, &done, kNearJump);
@ -1426,14 +1426,14 @@ void Assembler::StoreIntoArray(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare RawObject::StorePointer.
// Compare ObjectLayout::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
testq(value, Immediate(kSmiTagMask));
j(ZERO, &done, kNearJump);
}
movb(TMP, FieldAddress(object, target::Object::tags_offset()));
shrl(TMP, Immediate(target::RawObject::kBarrierOverlapShift));
shrl(TMP, Immediate(target::ObjectLayout::kBarrierOverlapShift));
andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
j(ZERO, &done, kNearJump);
@ -1461,7 +1461,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
testb(FieldAddress(object, target::Object::tags_offset()),
Immediate(1 << target::RawObject::kOldAndNotRememberedBit));
Immediate(1 << target::ObjectLayout::kOldAndNotRememberedBit));
j(ZERO, &done, Assembler::kNearJump);
Stop("Store buffer update is required");
@ -2104,31 +2104,31 @@ void Assembler::EmitGenericShift(bool wide,
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(classid_t) == sizeof(uint16_t));
movl(result, tags);
shrl(result, Immediate(target::RawObject::kClassIdTagPos));
shrl(result, Immediate(target::ObjectLayout::kClassIdTagPos));
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::RawObject::kSizeTagPos == 8);
ASSERT(target::RawObject::kSizeTagSize == 8);
ASSERT(target::ObjectLayout::kSizeTagPos == 8);
ASSERT(target::ObjectLayout::kSizeTagSize == 8);
movzxw(result, tags);
shrl(result, Immediate(target::RawObject::kSizeTagPos -
shrl(result, Immediate(target::ObjectLayout::kSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
AndImmediate(result,
Immediate(Utils::NBitMask(target::RawObject::kSizeTagSize)
Immediate(Utils::NBitMask(target::ObjectLayout::kSizeTagSize)
<< target::ObjectAlignment::kObjectAlignmentLog2));
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(classid_t) == sizeof(uint16_t));
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
}
@ -2154,12 +2154,12 @@ void Assembler::SmiUntagOrCheckClass(Register object,
intptr_t class_id,
Label* is_smi) {
ASSERT(kSmiTagShift == 1);
ASSERT(target::RawObject::kClassIdTagPos == 16);
ASSERT(target::RawObject::kClassIdTagSize == 16);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(classid_t) == sizeof(uint16_t));
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::RawObject::kClassIdTagPos / kBitsPerByte;
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);

View file

@ -945,8 +945,8 @@ class Assembler : public AssemblerBase {
// before the code can be used.
//
// The neccessary information for the "linker" (i.e. the relocation
// information) is stored in [RawCode::static_calls_target_table_]: an entry
// of the form
// information) is stored in [CodeLayout::static_calls_target_table_]: an
// entry of the form
//
// (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)
//

View file

@ -232,7 +232,7 @@ void Disassembler::DisassembleCodeHelper(const char* function_fullname,
Object& obj = Object::Handle(zone);
for (intptr_t i = code.pointer_offsets_length() - 1; i >= 0; i--) {
const uword addr = code.GetPointerOffsetAt(i) + code.PayloadStart();
obj = *reinterpret_cast<RawObject**>(addr);
obj = *reinterpret_cast<ObjectPtr*>(addr);
THR_Print(" %d : %#" Px " '%s'\n", code.GetPointerOffsetAt(i), addr,
obj.ToCString());
}
@ -301,20 +301,20 @@ void Disassembler::DisassembleCodeHelper(const char* function_fullname,
String& var_name = String::Handle(zone);
for (intptr_t i = 0; i < var_desc_length; i++) {
var_name = var_descriptors.GetName(i);
RawLocalVarDescriptors::VarInfo var_info;
LocalVarDescriptorsLayout::VarInfo var_info;
var_descriptors.GetInfo(i, &var_info);
const int8_t kind = var_info.kind();
if (kind == RawLocalVarDescriptors::kSavedCurrentContext) {
if (kind == LocalVarDescriptorsLayout::kSavedCurrentContext) {
THR_Print(" saved current CTX reg offset %d\n", var_info.index());
} else {
if (kind == RawLocalVarDescriptors::kContextLevel) {
if (kind == LocalVarDescriptorsLayout::kContextLevel) {
THR_Print(" context level %d scope %d", var_info.index(),
var_info.scope_id);
} else if (kind == RawLocalVarDescriptors::kStackVar) {
} else if (kind == LocalVarDescriptorsLayout::kStackVar) {
THR_Print(" stack var '%s' offset %d", var_name.ToCString(),
var_info.index());
} else {
ASSERT(kind == RawLocalVarDescriptors::kContextVar);
ASSERT(kind == LocalVarDescriptorsLayout::kContextVar);
THR_Print(" context var '%s' level %d offset %d",
var_name.ToCString(), var_info.scope_id, var_info.index());
}

View file

@ -1995,7 +1995,7 @@ void Disassembler::DecodeInstruction(char* hex_buffer,
for (intptr_t i = 0; i < offsets_length; i++) {
uword addr = code.GetPointerOffsetAt(i) + code.PayloadStart();
if ((pc <= addr) && (addr < (pc + instruction_length))) {
*object = &Object::Handle(*reinterpret_cast<RawObject**>(addr));
*object = &Object::Handle(*reinterpret_cast<ObjectPtr*>(addr));
break;
}
}

View file

@ -61,7 +61,7 @@ class BlockBuilder : public ValueObject {
ReturnInstr* instr = new ReturnInstr(
TokenPos(), value, CompilerState::Current().GetNextDeoptId(),
RawPcDescriptors::kInvalidYieldIndex, representation);
PcDescriptorsLayout::kInvalidYieldIndex, representation);
AddInstruction(instr);
entry_->set_last_instruction(instr);
}

View file

@ -744,7 +744,7 @@ void ConstantPropagator::VisitOneByteStringFromCharCode(
const intptr_t ch_code = Smi::Cast(o).Value();
ASSERT(ch_code >= 0);
if (ch_code < Symbols::kMaxOneCharCodeSymbol) {
RawString** table = Symbols::PredefinedAddress();
StringPtr* table = Symbols::PredefinedAddress();
SetValue(instr, String::ZoneHandle(Z, table[ch_code]));
} else {
SetValue(instr, non_constant_);

View file

@ -36,7 +36,7 @@ class ConstantPropagator : public FlowGraphVisitor {
static void OptimizeBranches(FlowGraph* graph);
// Used to initialize the abstract value of definitions.
static RawObject* Unknown() { return Object::unknown_constant().raw(); }
static ObjectPtr Unknown() { return Object::unknown_constant().raw(); }
private:
void Analyze();

View file

@ -31,9 +31,9 @@ static bool IsRepresentable(const Integer& value, Representation rep) {
}
}
static RawInteger* BinaryIntegerEvaluateRaw(const Integer& left,
const Integer& right,
Token::Kind token_kind) {
static IntegerPtr BinaryIntegerEvaluateRaw(const Integer& left,
const Integer& right,
Token::Kind token_kind) {
switch (token_kind) {
case Token::kTRUNCDIV:
FALL_THROUGH;
@ -71,9 +71,9 @@ static RawInteger* BinaryIntegerEvaluateRaw(const Integer& left,
return Integer::null();
}
static RawInteger* UnaryIntegerEvaluateRaw(const Integer& value,
Token::Kind token_kind,
Zone* zone) {
static IntegerPtr UnaryIntegerEvaluateRaw(const Integer& value,
Token::Kind token_kind,
Zone* zone) {
switch (token_kind) {
case Token::kNEGATE:
return value.ArithmeticOp(Token::kMUL, Smi::Handle(zone, Smi::New(-1)),
@ -110,12 +110,12 @@ int64_t Evaluator::TruncateTo(int64_t v, Representation r) {
}
}
RawInteger* Evaluator::BinaryIntegerEvaluate(const Object& left,
const Object& right,
Token::Kind token_kind,
bool is_truncating,
Representation representation,
Thread* thread) {
IntegerPtr Evaluator::BinaryIntegerEvaluate(const Object& left,
const Object& right,
Token::Kind token_kind,
bool is_truncating,
Representation representation,
Thread* thread) {
if (!left.IsInteger() || !right.IsInteger()) {
return Integer::null();
}
@ -148,10 +148,10 @@ RawInteger* Evaluator::BinaryIntegerEvaluate(const Object& left,
return result.raw();
}
RawInteger* Evaluator::UnaryIntegerEvaluate(const Object& value,
Token::Kind token_kind,
Representation representation,
Thread* thread) {
IntegerPtr Evaluator::UnaryIntegerEvaluate(const Object& value,
Token::Kind token_kind,
Representation representation,
Thread* thread) {
if (!value.IsInteger()) {
return Integer::null();
}

View file

@ -24,19 +24,19 @@ class Evaluator : public AllStatic {
// Evaluates a binary integer operation and returns a pointer to a
// canonicalized RawInteger.
static RawInteger* BinaryIntegerEvaluate(const Object& left,
const Object& right,
Token::Kind token_kind,
bool is_truncating,
Representation representation,
Thread* thread);
static IntegerPtr BinaryIntegerEvaluate(const Object& left,
const Object& right,
Token::Kind token_kind,
bool is_truncating,
Representation representation,
Thread* thread);
// Evaluates a unary integer operation and returns a pointer to a
// canonicalized RawInteger.
static RawInteger* UnaryIntegerEvaluate(const Object& value,
Token::Kind token_kind,
Representation representation,
Thread* thread);
static IntegerPtr UnaryIntegerEvaluate(const Object& value,
Token::Kind token_kind,
Representation representation,
Thread* thread);
// Evaluates a binary double operation and returns the result.
static double EvaluateDoubleOp(const double left,

View file

@ -410,7 +410,7 @@ bool FlowGraph::IsReceiver(Definition* def) const {
FlowGraph::ToCheck FlowGraph::CheckForInstanceCall(
InstanceCallInstr* call,
RawFunction::Kind kind) const {
FunctionLayout::Kind kind) const {
if (!FLAG_use_cha_deopt && !isolate()->all_classes_finalized()) {
// Even if class or function are private, lazy class finalization
// may later add overriding methods.
@ -472,7 +472,7 @@ FlowGraph::ToCheck FlowGraph::CheckForInstanceCall(
}
const String& method_name =
(kind == RawFunction::kMethodExtractor)
(kind == FunctionLayout::kMethodExtractor)
? String::Handle(zone(), Field::NameFromGetter(call->function_name()))
: call->function_name();

View file

@ -236,7 +236,7 @@ class FlowGraph : public ZoneAllocated {
// Return value indicates that the call needs no check at all,
// just a null check, or a full class check.
ToCheck CheckForInstanceCall(InstanceCallInstr* call,
RawFunction::Kind kind) const;
FunctionLayout::Kind kind) const;
Thread* thread() const { return thread_; }
Zone* zone() const { return thread()->zone(); }

View file

@ -252,7 +252,7 @@ bool FlowGraphCompiler::CanOSRFunction() const {
void FlowGraphCompiler::InsertBSSRelocation(BSS::Relocation reloc) {
const intptr_t offset = assembler()->InsertAlignedRelocation(reloc);
AddDescriptor(RawPcDescriptors::kBSSRelocation, /*pc_offset=*/offset,
AddDescriptor(PcDescriptorsLayout::kBSSRelocation, /*pc_offset=*/offset,
/*deopt_id=*/DeoptId::kNone, TokenPosition::kNoSource,
/*try_index=*/-1);
}
@ -456,7 +456,7 @@ void FlowGraphCompiler::RecordCatchEntryMoves(Environment* env,
void FlowGraphCompiler::EmitCallsiteMetadata(TokenPosition token_pos,
intptr_t deopt_id,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
Environment* env) {
AddCurrentDescriptor(kind, deopt_id, token_pos);
@ -471,14 +471,15 @@ void FlowGraphCompiler::EmitCallsiteMetadata(TokenPosition token_pos,
} else {
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after,
token_pos);
}
}
}
void FlowGraphCompiler::EmitYieldPositionMetadata(TokenPosition token_pos,
intptr_t yield_index) {
AddDescriptor(RawPcDescriptors::kOther, assembler()->CodeSize(),
AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(),
DeoptId::kNone, token_pos, CurrentTryIndex(), yield_index);
}
@ -488,7 +489,7 @@ void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
// Instructions that can be deoptimization targets need to record kDeopt
// PcDescriptor corresponding to their deopt id. GotoInstr records its
// own so that it can control the placement.
AddCurrentDescriptor(RawPcDescriptors::kDeopt, instr->deopt_id(),
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, instr->deopt_id(),
instr->token_pos());
}
AllocateRegistersLocally(instr);
@ -732,7 +733,7 @@ void FlowGraphCompiler::SetNeedsStackTrace(intptr_t try_index) {
exception_handlers_list_->SetNeedsStackTrace(try_index);
}
void FlowGraphCompiler::AddDescriptor(RawPcDescriptors::Kind kind,
void FlowGraphCompiler::AddDescriptor(PcDescriptorsLayout::Kind kind,
intptr_t pc_offset,
intptr_t deopt_id,
TokenPosition token_pos,
@ -740,13 +741,13 @@ void FlowGraphCompiler::AddDescriptor(RawPcDescriptors::Kind kind,
intptr_t yield_index) {
code_source_map_builder_->NoteDescriptor(kind, pc_offset, token_pos);
// Don't emit deopt-descriptors in AOT mode.
if (FLAG_precompiled_mode && (kind == RawPcDescriptors::kDeopt)) return;
if (FLAG_precompiled_mode && (kind == PcDescriptorsLayout::kDeopt)) return;
pc_descriptors_list_->AddDescriptor(kind, pc_offset, deopt_id, token_pos,
try_index, yield_index);
}
// Uses current pc position and try-index.
void FlowGraphCompiler::AddCurrentDescriptor(RawPcDescriptors::Kind kind,
void FlowGraphCompiler::AddCurrentDescriptor(PcDescriptorsLayout::Kind kind,
intptr_t deopt_id,
TokenPosition token_pos) {
AddDescriptor(kind, assembler()->CodeSize(), deopt_id, token_pos,
@ -1123,7 +1124,7 @@ void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
code.set_pc_descriptors(descriptors);
}
RawArray* FlowGraphCompiler::CreateDeoptInfo(compiler::Assembler* assembler) {
ArrayPtr FlowGraphCompiler::CreateDeoptInfo(compiler::Assembler* assembler) {
// No deopt information if we precompile (no deoptimization allowed).
if (FLAG_precompiled_mode) {
return Array::empty_array().raw();
@ -1185,8 +1186,8 @@ void FlowGraphCompiler::FinalizeVarDescriptors(const Code& code) {
// descriptor for IrregexpFunction.
ASSERT(parsed_function().scope() == nullptr);
var_descs = LocalVarDescriptors::New(1);
RawLocalVarDescriptors::VarInfo info;
info.set_kind(RawLocalVarDescriptors::kSavedCurrentContext);
LocalVarDescriptorsLayout::VarInfo info;
info.set_kind(LocalVarDescriptorsLayout::kSavedCurrentContext);
info.scope_id = 0;
info.begin_pos = TokenPosition::kMinSource;
info.end_pos = TokenPosition::kMinSource;
@ -1285,7 +1286,7 @@ bool FlowGraphCompiler::TryIntrinsifyHelper() {
// there are no checks necessary in any case and we can therefore intrinsify
// them even in checked mode and strong mode.
switch (parsed_function().function().kind()) {
case RawFunction::kImplicitGetter: {
case FunctionLayout::kImplicitGetter: {
Field& field = Field::Handle(function().accessor_field());
ASSERT(!field.IsNull());
#if defined(DEBUG)
@ -1308,7 +1309,7 @@ bool FlowGraphCompiler::TryIntrinsifyHelper() {
}
return false;
}
case RawFunction::kImplicitSetter: {
case FunctionLayout::kImplicitSetter: {
if (!isolate()->argument_type_checks()) {
Field& field = Field::Handle(function().accessor_field());
ASSERT(!field.IsNull());
@ -1329,7 +1330,7 @@ bool FlowGraphCompiler::TryIntrinsifyHelper() {
break;
}
#if !defined(TARGET_ARCH_IA32)
case RawFunction::kMethodExtractor: {
case FunctionLayout::kMethodExtractor: {
auto& extracted_method = Function::ZoneHandle(
parsed_function().function().extracted_method_closure());
auto& klass = Class::Handle(extracted_method.Owner());
@ -1371,7 +1372,7 @@ bool FlowGraphCompiler::TryIntrinsifyHelper() {
void FlowGraphCompiler::GenerateStubCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
intptr_t deopt_id,
Environment* env) {
@ -1474,7 +1475,7 @@ void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id,
->raw();
call_ic_data = call_ic_data.Original();
}
AddCurrentDescriptor(RawPcDescriptors::kRewind, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRewind, deopt_id, token_pos);
EmitUnoptimizedStaticCall(args_info.size_with_type_args, deopt_id,
token_pos, locs, call_ic_data, entry_kind);
}
@ -2086,8 +2087,8 @@ bool FlowGraphCompiler::LookupMethodFor(int class_id,
if (class_id < 0) return false;
if (class_id >= isolate->class_table()->NumCids()) return false;
RawClass* raw_class = isolate->class_table()->At(class_id);
if (raw_class == NULL) return false;
ClassPtr raw_class = isolate->class_table()->At(class_id);
if (raw_class == nullptr) return false;
Class& cls = Class::Handle(zone, raw_class);
if (cls.IsNull()) return false;
if (!cls.is_finalized()) return false;
@ -2203,7 +2204,7 @@ void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
// Do not use the code from the function, but let the code be patched so
// that we can record the outgoing edges to other code.
const Function& function = *targets.TargetAt(smi_case)->target;
GenerateStaticDartCall(deopt_id, token_index, RawPcDescriptors::kOther,
GenerateStaticDartCall(deopt_id, token_index, PcDescriptorsLayout::kOther,
locs, function, entry_kind);
__ Drop(args_info.size_with_type_args);
if (match_found != NULL) {
@ -2253,7 +2254,7 @@ void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
// Do not use the code from the function, but let the code be patched so
// that we can record the outgoing edges to other code.
const Function& function = *targets.TargetAt(i)->target;
GenerateStaticDartCall(deopt_id, token_index, RawPcDescriptors::kOther,
GenerateStaticDartCall(deopt_id, token_index, PcDescriptorsLayout::kOther,
locs, function, entry_kind);
__ Drop(args_info.size_with_type_args);
if (!is_last_check || add_megamorphic_call) {
@ -2557,7 +2558,7 @@ void ThrowErrorSlowPathCode::EmitNativeCode(FlowGraphCompiler* compiler) {
__ CallRuntime(runtime_entry_, num_args_);
}
const intptr_t deopt_id = instruction()->deopt_id();
compiler->AddDescriptor(RawPcDescriptors::kOther,
compiler->AddDescriptor(PcDescriptorsLayout::kOther,
compiler->assembler()->CodeSize(), deopt_id,
instruction()->token_pos(), try_index_);
AddMetadataForRuntimeCall(compiler);

View file

@ -181,9 +181,9 @@ class CompilerDeoptInfo : public ZoneAllocated {
}
virtual ~CompilerDeoptInfo() {}
RawTypedData* CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table);
TypedDataPtr CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table);
// No code needs to be generated.
virtual void GenerateCode(FlowGraphCompiler* compiler, intptr_t stub_ix) {}
@ -602,27 +602,27 @@ class FlowGraphCompiler : public ValueObject {
void GenerateStubCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
intptr_t deopt_id = DeoptId::kNone,
Environment* env = nullptr);
void GeneratePatchableCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs);
void GenerateDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
void GenerateStaticDartCall(
intptr_t deopt_id,
TokenPosition token_pos,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
@ -785,7 +785,7 @@ class FlowGraphCompiler : public ValueObject {
// `pending_deoptimization_env`.
void EmitCallsiteMetadata(TokenPosition token_pos,
intptr_t deopt_id,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
Environment* env = nullptr);
@ -823,16 +823,16 @@ class FlowGraphCompiler : public ValueObject {
const Array& handler_types,
bool needs_stacktrace);
void SetNeedsStackTrace(intptr_t try_index);
void AddCurrentDescriptor(RawPcDescriptors::Kind kind,
void AddCurrentDescriptor(PcDescriptorsLayout::Kind kind,
intptr_t deopt_id,
TokenPosition token_pos);
void AddDescriptor(
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
intptr_t pc_offset,
intptr_t deopt_id,
TokenPosition token_pos,
intptr_t try_index,
intptr_t yield_index = RawPcDescriptors::kInvalidYieldIndex);
intptr_t yield_index = PcDescriptorsLayout::kInvalidYieldIndex);
// Add NullCheck information for the current PC.
void AddNullCheck(TokenPosition token_pos, const String& name);
@ -851,7 +851,7 @@ class FlowGraphCompiler : public ValueObject {
void FinalizeExceptionHandlers(const Code& code);
void FinalizePcDescriptors(const Code& code);
RawArray* CreateDeoptInfo(compiler::Assembler* assembler);
ArrayPtr CreateDeoptInfo(compiler::Assembler* assembler);
void FinalizeStackMaps(const Code& code);
void FinalizeVarDescriptors(const Code& code);
void FinalizeCatchEntryMovesMap(const Code& code);
@ -929,9 +929,9 @@ class FlowGraphCompiler : public ValueObject {
void AddStubCallTarget(const Code& code);
void AddDispatchTableCallTarget(const compiler::TableSelector* selector);
RawArray* edge_counters_array() const { return edge_counters_array_.raw(); }
ArrayPtr edge_counters_array() const { return edge_counters_array_.raw(); }
RawArray* InliningIdToFunction() const;
ArrayPtr InliningIdToFunction() const;
void BeginCodeSourceRange();
void EndCodeSourceRange(TokenPosition token_pos);
@ -1029,13 +1029,13 @@ class FlowGraphCompiler : public ValueObject {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
RawSubtypeTestCache* GenerateInlineInstanceof(
SubtypeTestCachePtr GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
RawSubtypeTestCache* GenerateInstantiatedTypeWithArgumentsTest(
SubtypeTestCachePtr GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& dst_type,
compiler::Label* is_instance_lbl,
@ -1047,19 +1047,19 @@ class FlowGraphCompiler : public ValueObject {
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_lbl);
RawSubtypeTestCache* GenerateUninstantiatedTypeTest(
SubtypeTestCachePtr GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& dst_type,
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_label);
RawSubtypeTestCache* GenerateFunctionTypeTest(
SubtypeTestCachePtr GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& dst_type,
compiler::Label* is_instance_lbl,
compiler::Label* is_not_instance_label);
RawSubtypeTestCache* GenerateSubtype1TestCacheLookup(
SubtypeTestCachePtr GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
compiler::Label* is_instance_lbl,
@ -1076,7 +1076,7 @@ class FlowGraphCompiler : public ValueObject {
TypeTestStubKind GetTypeTestStubKindForTypeParameter(
const TypeParameter& type_param);
RawSubtypeTestCache* GenerateCallSubtypeTestStub(
SubtypeTestCachePtr GenerateCallSubtypeTestStub(
TypeTestStubKind test_kind,
Register instance_reg,
Register instantiator_type_arguments_reg,

View file

@ -99,9 +99,9 @@ void FlowGraphCompiler::ExitIntrinsicMode() {
intrinsic_mode_ = false;
}
RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
if (deopt_env_ == NULL) {
++builder->current_info_number_;
return TypedData::null();
@ -228,7 +228,7 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
// R2: instantiator type arguments (if used).
// R1: function type arguments (if used).
// R3: type test cache.
RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
TypeTestStubKind test_kind,
Register instance_reg,
Register instantiator_type_arguments_reg,
@ -274,7 +274,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
// be completed.
// R0: instance being type checked (preserved).
// Clobbers R1, R2.
RawSubtypeTestCache*
SubtypeTestCachePtr
FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
@ -417,7 +417,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
// TODO(srdjan): Implement a quicker subtype check, as type test
// arrays can grow too high, but they may be useful when optimizing
// code (type-feedback).
RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
compiler::Label* is_instance_lbl,
@ -455,7 +455,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
// Generates inlined check if 'type' is a type parameter or type itself
// R0: instance (preserved).
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -540,7 +540,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
// Generates function type check.
//
// See [GenerateUninstantiatedTypeTest] for calling convention.
RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -573,7 +573,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
// Note that this inlined code must be followed by the runtime_call code, as it
// may fall through to it. Otherwise, this inline code will jump to the label
// is_instance or to the label is_not_instance.
RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -668,7 +668,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
__ LoadUniqueObject(TypeTestABI::kDstTypeReg, type);
__ LoadUniqueObject(TypeTestABI::kSubtypeTestCacheReg, test_cache);
GenerateStubCall(token_pos, StubCode::InstanceOf(),
/*kind=*/RawPcDescriptors::kOther, locs, deopt_id);
/*kind=*/PcDescriptorsLayout::kOther, locs, deopt_id);
__ b(&done);
}
__ Bind(&is_not_instance);
@ -809,7 +809,7 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
sub_type_cache_offset, PP);
__ blx(R9);
}
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs);
__ Bind(&done);
}
@ -1016,7 +1016,7 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs) {
__ BranchLinkPatchable(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
@ -1025,7 +1025,7 @@ void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
@ -1035,7 +1035,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
TokenPosition token_pos,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
@ -1063,7 +1063,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs);
}
void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
@ -1109,8 +1109,8 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
__ LoadFromOffset(kWord, R0, SP,
(ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
__ LoadUniqueObject(R9, ic_data);
GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
entry_kind);
GenerateDartCall(deopt_id, token_pos, stub, PcDescriptorsLayout::kIcCall,
locs, entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -1134,7 +1134,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset));
__ blx(LR);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kIcCall, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kIcCall, locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -1189,16 +1189,19 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
if (try_index == kInvalidTryIndex) {
try_index = CurrentTryIndex();
}
AddDescriptor(RawPcDescriptors::kOther, assembler()->CodeSize(),
AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(),
DeoptId::kNone, token_pos, try_index);
} else if (is_optimizing()) {
AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone,
token_pos);
AddDeoptIndexAtCall(deopt_id_after);
} else {
AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone,
token_pos);
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after,
token_pos);
}
RecordCatchEntryMoves(pending_deoptimization_env_, try_index);
__ Drop(args_desc.SizeWithTypeArgs());
@ -1244,7 +1247,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
__ LoadUniqueObject(R9, data);
__ blx(LR);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, RawPcDescriptors::kOther,
EmitCallsiteMetadata(token_pos, DeoptId::kNone, PcDescriptorsLayout::kOther,
locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -1260,7 +1263,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(R9, ic_data);
GenerateDartCall(deopt_id, token_pos, stub,
RawPcDescriptors::kUnoptStaticCall, locs, entry_kind);
PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind);
__ Drop(size_with_type_args);
}
@ -1283,7 +1286,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, token_pos, RawPcDescriptors::kOther, locs,
GenerateStaticDartCall(deopt_id, token_pos, PcDescriptorsLayout::kOther, locs,
function, entry_kind);
__ Drop(size_with_type_args);
}
@ -1330,7 +1333,8 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
} else {
__ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id,
token_pos);
// Stub returns result in flags (result of a cmp, we need Z computed).
__ Drop(1); // Discard constant.
__ Pop(reg); // Restore 'reg'.
@ -1353,7 +1357,8 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
} else {
__ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id,
token_pos);
// Stub returns result in flags (result of a cmp, we need Z computed).
__ Pop(right);
__ Pop(left);

View file

@ -93,9 +93,9 @@ void FlowGraphCompiler::ExitIntrinsicMode() {
intrinsic_mode_ = false;
}
RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
if (deopt_env_ == NULL) {
++builder->current_info_number_;
return TypedData::null();
@ -217,7 +217,7 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
// R0: instance (must be preserved).
// R2: instantiator type arguments (if used).
// R1: function type arguments (if used).
RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
TypeTestStubKind test_kind,
Register instance_reg,
Register instantiator_type_arguments_reg,
@ -263,7 +263,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
// be completed.
// R0: instance being type checked (preserved).
// Clobbers R1, R2.
RawSubtypeTestCache*
SubtypeTestCachePtr
FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
@ -401,7 +401,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
// TODO(srdjan): Implement a quicker subtype check, as type test
// arrays can grow too high, but they may be useful when optimizing
// code (type-feedback).
RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
compiler::Label* is_instance_lbl,
@ -437,7 +437,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
// Generates inlined check if 'type' is a type parameter or type itself
// R0: instance (preserved).
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -515,7 +515,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
// Generates function type check.
//
// See [GenerateUninstantiatedTypeTest] for calling convention.
RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -545,7 +545,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
// Note that this inlined code must be followed by the runtime_call code, as it
// may fall through to it. Otherwise, this inline code will jump to the label
// is_instance or to the label is_not_instance.
RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -634,7 +634,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
__ LoadUniqueObject(TypeTestABI::kDstTypeReg, type);
__ LoadUniqueObject(TypeTestABI::kSubtypeTestCacheReg, test_cache);
GenerateStubCall(token_pos, StubCode::InstanceOf(),
/*kind=*/RawPcDescriptors::kOther, locs, deopt_id);
/*kind=*/PcDescriptorsLayout::kOther, locs, deopt_id);
__ b(&done);
}
__ Bind(&is_not_instance);
@ -770,7 +770,7 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
sub_type_cache_offset);
__ blr(R9);
}
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs);
__ Bind(&done);
}
@ -977,7 +977,7 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs) {
__ BranchLinkPatchable(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
@ -986,7 +986,7 @@ void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
@ -996,7 +996,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
TokenPosition token_pos,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
@ -1024,7 +1024,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs);
}
void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
@ -1060,8 +1060,8 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
__ LoadObject(R6, parsed_function().function());
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
__ LoadUniqueObject(R5, ic_data);
GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
entry_kind);
GenerateDartCall(deopt_id, token_pos, stub, PcDescriptorsLayout::kIcCall,
locs, entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -1091,7 +1091,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ ldr(LR, compiler::FieldAddress(CODE_REG, entry_point_offset));
__ blr(LR);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kIcCall, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kIcCall, locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -1143,16 +1143,19 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
if (try_index == kInvalidTryIndex) {
try_index = CurrentTryIndex();
}
AddDescriptor(RawPcDescriptors::kOther, assembler()->CodeSize(),
AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(),
DeoptId::kNone, token_pos, try_index);
} else if (is_optimizing()) {
AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone,
token_pos);
AddDeoptIndexAtCall(deopt_id_after);
} else {
AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone,
token_pos);
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after,
token_pos);
}
RecordCatchEntryMoves(pending_deoptimization_env_, try_index);
__ Drop(args_desc.SizeWithTypeArgs());
@ -1207,7 +1210,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
}
__ blr(LR);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, RawPcDescriptors::kOther,
EmitCallsiteMetadata(token_pos, DeoptId::kNone, PcDescriptorsLayout::kOther,
locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -1223,7 +1226,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(R5, ic_data);
GenerateDartCall(deopt_id, token_pos, stub,
RawPcDescriptors::kUnoptStaticCall, locs, entry_kind);
PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind);
__ Drop(size_with_type_args);
}
@ -1246,7 +1249,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, token_pos, RawPcDescriptors::kOther, locs,
GenerateStaticDartCall(deopt_id, token_pos, PcDescriptorsLayout::kOther, locs,
function, entry_kind);
__ Drop(size_with_type_args);
}
@ -1282,7 +1285,8 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
} else {
__ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id,
token_pos);
// Stub returns result in flags (result of a cmp, we need Z computed).
// Discard constant.
// Restore 'reg'.
@ -1305,7 +1309,8 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
} else {
__ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id,
token_pos);
// Stub returns result in flags (result of a cmp, we need Z computed).
__ PopPair(right, left);
} else {

View file

@ -71,9 +71,9 @@ void FlowGraphCompiler::ExitIntrinsicMode() {
intrinsic_mode_ = false;
}
RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
if (deopt_env_ == NULL) {
++builder->current_info_number_;
return TypedData::null();
@ -182,7 +182,7 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
compiler::Label* is_true,
compiler::Label* is_false) {
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
compiler::Label fall_through;
__ cmpl(bool_register, raw_null);
__ j(EQUAL, &fall_through, compiler::Assembler::kNearJump);
@ -196,7 +196,7 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
}
// Clobbers ECX.
RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
TypeTestStubKind test_kind,
Register instance_reg,
Register instantiator_type_arguments_reg,
@ -207,7 +207,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
const SubtypeTestCache& type_test_cache =
SubtypeTestCache::ZoneHandle(zone(), SubtypeTestCache::New());
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ LoadObject(temp_reg, type_test_cache);
__ pushl(temp_reg); // Subtype test cache.
__ pushl(instance_reg); // Instance.
@ -249,7 +249,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
// be completed.
// EAX: instance (must survive).
// Clobbers ECX, EDI.
RawSubtypeTestCache*
SubtypeTestCachePtr
FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
@ -390,7 +390,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
// TODO(srdjan): Implement a quicker subtype check, as type test
// arrays can grow too high, but they may be useful when optimizing
// code (type-feedback).
RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
compiler::Label* is_instance_lbl,
@ -427,7 +427,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
// Generates inlined check if 'type' is a type parameter or type itself
// EAX: instance (preserved).
// Clobbers EDX, EDI, ECX.
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -438,7 +438,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
ASSERT(!type.IsFunctionType());
// Skip check if destination is a dynamic type.
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
if (type.IsTypeParameter()) {
const TypeParameter& type_param = TypeParameter::Cast(type);
@ -514,7 +514,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
// Generates function type check.
//
// See [GenerateUninstantiatedTypeTest] for calling convention.
RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -544,7 +544,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
// Note that this inlined code must be followed by the runtime_call code, as it
// may fall through to it. Otherwise, this inline code will jump to the label
// is_instance or to the label is_not_instance.
RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -603,7 +603,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
__ pushl(ECX); // Store function type arguments.
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
compiler::Label is_instance, is_not_instance;
// 'null' is an instance of Null, Object*, Never*, void, and dynamic.
// In addition, 'null' is an instance of any nullable type.
@ -688,7 +688,7 @@ void FlowGraphCompiler::GenerateAssertAssignable(CompileType* receiver_type,
compiler::Label is_assignable, runtime_call;
if (Instance::NullIsAssignableTo(dst_type)) {
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ cmpl(EAX, raw_null);
__ j(EQUAL, &is_assignable);
}
@ -762,7 +762,7 @@ void FlowGraphCompiler::GenerateSetterIntrinsic(intptr_t offset) {
__ movl(EBX, compiler::Address(ESP, 1 * kWordSize)); // Value.
__ StoreIntoObject(EAX, compiler::FieldAddress(EAX, offset), EBX);
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ movl(EAX, raw_null);
__ ret();
}
@ -819,7 +819,7 @@ void FlowGraphCompiler::EmitPrologue() {
__ Comment("Initialize spill slots");
if (num_locals > 1 || (num_locals == 1 && args_desc_slot == -1)) {
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ movl(EAX, raw_null);
}
for (intptr_t i = 0; i < num_locals; ++i) {
@ -859,7 +859,7 @@ void FlowGraphCompiler::EmitCallToStub(const Code& stub) {
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
@ -869,7 +869,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
TokenPosition token_pos,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
@ -886,7 +886,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs);
}
void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
@ -900,7 +900,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(ECX, ic_data);
GenerateDartCall(deopt_id, token_pos, stub,
RawPcDescriptors::kUnoptStaticCall, locs, entry_kind);
PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind);
__ Drop(size_with_type_args);
}
@ -936,8 +936,8 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
__ movl(EBX, compiler::Address(
ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
__ LoadObject(ECX, ic_data);
GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
entry_kind);
GenerateDartCall(deopt_id, token_pos, stub, PcDescriptorsLayout::kIcCall,
locs, entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -961,7 +961,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kIcCall, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kIcCall, locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -988,7 +988,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, token_pos);
RecordSafepoint(locs, slow_path_argument_count);
const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
// Precompilation not implemented on ia32 platform.
@ -998,7 +998,8 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
} else {
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after,
token_pos);
}
RecordCatchEntryMoves(pending_deoptimization_env_, try_index);
__ Drop(args_desc.SizeWithTypeArgs());
@ -1030,7 +1031,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, token_pos, RawPcDescriptors::kOther, locs,
GenerateStaticDartCall(deopt_id, token_pos, PcDescriptorsLayout::kOther, locs,
function, entry_kind);
__ Drop(size_with_type_args);
}
@ -1065,7 +1066,8 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
} else {
__ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id,
token_pos);
// Stub returns result in flags (result of a cmpl, we need ZF computed).
__ popl(reg); // Discard constant.
__ popl(reg); // Restore 'reg'.
@ -1088,7 +1090,8 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
} else {
__ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id,
token_pos);
// Stub returns result in flags (result of a cmpl, we need ZF computed).
__ popl(right);
__ popl(left);

View file

@ -94,9 +94,9 @@ void FlowGraphCompiler::ExitIntrinsicMode() {
intrinsic_mode_ = false;
}
RawTypedData* CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
TypedDataPtr CompilerDeoptInfo::CreateDeoptInfo(FlowGraphCompiler* compiler,
DeoptInfoBuilder* builder,
const Array& deopt_table) {
if (deopt_env_ == NULL) {
++builder->current_info_number_;
return TypedData::null();
@ -226,7 +226,7 @@ void FlowGraphCompiler::GenerateBoolToJump(Register bool_register,
// - RCX : function type arguments (if necessary).
//
// Preserves RAX/RCX/RDX.
RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
TypeTestStubKind test_kind,
Register instance_reg,
Register instantiator_type_arguments_reg,
@ -271,7 +271,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateCallSubtypeTestStub(
// be completed.
// RAX: instance (must survive).
// Clobbers R10.
RawSubtypeTestCache*
SubtypeTestCachePtr
FlowGraphCompiler::GenerateInstantiatedTypeWithArgumentsTest(
TokenPosition token_pos,
const AbstractType& type,
@ -422,7 +422,7 @@ bool FlowGraphCompiler::GenerateInstantiatedTypeNoArgumentsTest(
// TODO(srdjan): Implement a quicker subtype check, as type test
// arrays can grow too high, but they may be useful when optimizing
// code (type-feedback).
RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
SubtypeTestCachePtr FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
TokenPosition token_pos,
const Class& type_class,
compiler::Label* is_instance_lbl,
@ -465,7 +465,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateSubtype1TestCacheLookup(
// - RCX : function type arguments (if necessary).
//
// Preserves RAX/RCX/RDX.
RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
SubtypeTestCachePtr FlowGraphCompiler::GenerateUninstantiatedTypeTest(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -540,7 +540,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateUninstantiatedTypeTest(
// Generates function type check.
//
// See [GenerateUninstantiatedTypeTest] for calling convention.
RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
SubtypeTestCachePtr FlowGraphCompiler::GenerateFunctionTypeTest(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -567,7 +567,7 @@ RawSubtypeTestCache* FlowGraphCompiler::GenerateFunctionTypeTest(
// Note that this inlined code must be followed by the runtime_call code, as it
// may fall through to it. Otherwise, this inline code will jump to the label
// is_instance or to the label is_not_instance.
RawSubtypeTestCache* FlowGraphCompiler::GenerateInlineInstanceof(
SubtypeTestCachePtr FlowGraphCompiler::GenerateInlineInstanceof(
TokenPosition token_pos,
const AbstractType& type,
compiler::Label* is_instance_lbl,
@ -651,7 +651,7 @@ void FlowGraphCompiler::GenerateInstanceOf(TokenPosition token_pos,
__ LoadUniqueObject(TypeTestABI::kDstTypeReg, type);
__ LoadUniqueObject(TypeTestABI::kSubtypeTestCacheReg, test_cache);
GenerateStubCall(token_pos, StubCode::InstanceOf(),
/*kind=*/RawPcDescriptors::kOther, locs, deopt_id);
/*kind=*/PcDescriptorsLayout::kOther, locs, deopt_id);
__ jmp(&done, compiler::Assembler::kNearJump);
}
__ Bind(&is_not_instance);
@ -769,7 +769,7 @@ void FlowGraphCompiler::GenerateAssertAssignableViaTypeTestingStub(
__ call(compiler::FieldAddress(
kRegToCall, AbstractType::type_test_stub_entry_point_offset()));
}
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs);
__ Bind(&done);
}
@ -978,7 +978,7 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs) {
__ CallPatchable(stub);
EmitCallsiteMetadata(token_pos, DeoptId::kNone, kind, locs);
@ -987,7 +987,7 @@ void FlowGraphCompiler::GeneratePatchableCall(TokenPosition token_pos,
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
TokenPosition token_pos,
const Code& stub,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
@ -997,7 +997,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
TokenPosition token_pos,
RawPcDescriptors::Kind kind,
PcDescriptorsLayout::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
@ -1025,7 +1025,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(TokenPosition token_pos,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs);
}
void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
@ -1039,7 +1039,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(intptr_t size_with_type_args,
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(RBX, ic_data);
GenerateDartCall(deopt_id, token_pos, stub,
RawPcDescriptors::kUnoptStaticCall, locs, entry_kind);
PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind);
__ Drop(size_with_type_args, RCX);
}
@ -1076,8 +1076,8 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(const Code& stub,
__ movq(RDX, compiler::Address(
RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
__ LoadUniqueObject(RBX, ic_data);
GenerateDartCall(deopt_id, token_pos, stub, RawPcDescriptors::kIcCall, locs,
entry_kind);
GenerateDartCall(deopt_id, token_pos, stub, PcDescriptorsLayout::kIcCall,
locs, entry_kind);
__ Drop(ic_data.SizeWithTypeArgs(), RCX);
}
@ -1101,7 +1101,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kIcCall, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kIcCall, locs);
__ Drop(ic_data.SizeWithTypeArgs(), RCX);
}
@ -1152,16 +1152,19 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
if (try_index == kInvalidTryIndex) {
try_index = CurrentTryIndex();
}
AddDescriptor(RawPcDescriptors::kOther, assembler()->CodeSize(),
AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(),
DeoptId::kNone, token_pos, try_index);
} else if (is_optimizing()) {
AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone,
token_pos);
AddDeoptIndexAtCall(deopt_id_after);
} else {
AddCurrentDescriptor(RawPcDescriptors::kOther, DeoptId::kNone, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone,
token_pos);
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id_after, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after,
token_pos);
}
RecordCatchEntryMoves(pending_deoptimization_env_, try_index);
__ Drop(args_desc.SizeWithTypeArgs(), RCX);
@ -1204,7 +1207,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
__ LoadUniqueObject(RBX, data);
__ call(RCX);
EmitCallsiteMetadata(token_pos, deopt_id, RawPcDescriptors::kOther, locs);
EmitCallsiteMetadata(token_pos, deopt_id, PcDescriptorsLayout::kOther, locs);
__ Drop(ic_data.SizeWithTypeArgs(), RCX);
}
@ -1227,7 +1230,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, token_pos, RawPcDescriptors::kOther, locs,
GenerateStaticDartCall(deopt_id, token_pos, PcDescriptorsLayout::kOther, locs,
function, entry_kind);
__ Drop(size_with_type_args, RCX);
}
@ -1271,7 +1274,8 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
} else {
__ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id,
token_pos);
// Stub returns result in flags (result of a cmpq, we need ZF computed).
__ popq(reg); // Discard constant.
__ popq(reg); // Restore 'reg'.
@ -1294,7 +1298,8 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(Register left,
} else {
__ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(RawPcDescriptors::kRuntimeCall, deopt_id, token_pos);
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id,
token_pos);
// Stub returns result in flags (result of a cmpq, we need ZF computed).
__ popq(right);
__ popq(left);

View file

@ -3872,7 +3872,7 @@ LocationSummary* JoinEntryInstr::MakeLocationSummary(Zone* zone,
void JoinEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(compiler->GetJumpLabel(this));
if (!compiler->is_optimizing()) {
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
@ -3899,7 +3899,7 @@ void TargetEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The deoptimization descriptor points after the edge counter code for
// uniformity with ARM, where we can reuse pattern matching code that
// matches backwards from the end of the pattern.
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
@ -3973,7 +3973,7 @@ void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The deoptimization descriptor points after the edge counter code for
// uniformity with ARM, where we can reuse pattern matching code that
// matches backwards from the end of the pattern.
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
@ -4082,7 +4082,7 @@ void InitStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadObject(InitStaticFieldABI::kFieldReg,
Field::ZoneHandle(field().Original()));
compiler->GenerateStubCall(token_pos(), init_static_field_stub,
/*kind=*/RawPcDescriptors::kOther, locs(),
/*kind=*/PcDescriptorsLayout::kOther, locs(),
deopt_id());
__ Bind(&no_call);
}
@ -4116,7 +4116,7 @@ void InitInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// so deoptimization environment has to be adjusted.
// This adjustment is done in FlowGraph::AttachEnvironment.
compiler->GenerateStubCall(token_pos(), stub,
/*kind=*/RawPcDescriptors::kOther, locs(),
/*kind=*/PcDescriptorsLayout::kOther, locs(),
deopt_id());
__ Bind(&no_call);
}
@ -4136,7 +4136,7 @@ void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Code::ZoneHandle(compiler->zone(), object_store->throw_stub());
compiler->GenerateStubCall(token_pos(), throw_stub,
/*kind=*/RawPcDescriptors::kOther, locs(),
/*kind=*/PcDescriptorsLayout::kOther, locs(),
deopt_id());
// Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint
// instruction: The ThrowInstr will terminate the current block. The very
@ -4164,7 +4164,7 @@ void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler->SetNeedsStackTrace(catch_try_index());
compiler->GenerateStubCall(token_pos(), re_throw_stub,
/*kind=*/RawPcDescriptors::kOther, locs(),
/*kind=*/PcDescriptorsLayout::kOther, locs(),
deopt_id());
// Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint
// instruction: The ThrowInstr will terminate the current block. The very
@ -4198,7 +4198,7 @@ void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ CompareObject(AssertBooleanABI::kObjectReg, Object::null_instance());
__ BranchIf(NOT_EQUAL, &done);
compiler->GenerateStubCall(token_pos(), assert_boolean_stub,
/*kind=*/RawPcDescriptors::kOther, locs(),
/*kind=*/PcDescriptorsLayout::kOther, locs(),
deopt_id());
__ Bind(&done);
}
@ -4489,7 +4489,7 @@ LocationSummary* InstanceCallInstr::MakeLocationSummary(Zone* zone,
return MakeCallSummary(zone, this);
}
static RawCode* TwoArgsSmiOpInlineCacheEntry(Token::Kind kind) {
static CodePtr TwoArgsSmiOpInlineCacheEntry(Token::Kind kind) {
if (!FLAG_two_args_smi_icd) {
return Code::null();
}
@ -4596,7 +4596,7 @@ void InstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
} else {
// Unoptimized code.
compiler->AddCurrentDescriptor(RawPcDescriptors::kRewind, deopt_id(),
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kRewind, deopt_id(),
token_pos());
bool is_smi_two_args_op = false;
const Code& stub =
@ -4622,7 +4622,7 @@ bool InstanceCallInstr::MatchesCoreName(const String& name) {
return Library::IsPrivateCoreLibName(function_name(), name);
}
RawFunction* InstanceCallBaseInstr::ResolveForReceiverClass(
FunctionPtr InstanceCallBaseInstr::ResolveForReceiverClass(
const Class& cls,
bool allow_add /* = true */) {
const Array& args_desc_array = Array::Handle(GetArgumentsDescriptor());
@ -4719,7 +4719,7 @@ void DispatchTableCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler->EmitDispatchTableCall(cid_reg, selector()->offset,
arguments_descriptor);
compiler->EmitCallsiteMetadata(token_pos(), DeoptId::kNone,
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
if (selector()->called_on_null && !selector()->on_null_interface) {
Value* receiver = ArgumentValueAt(FirstArgIndex());
if (receiver->Type()->is_nullable()) {
@ -4850,7 +4850,7 @@ void PolymorphicInstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
total_call_count(), !receiver_is_not_smi());
}
RawType* PolymorphicInstanceCallInstr::ComputeRuntimeType(
TypePtr PolymorphicInstanceCallInstr::ComputeRuntimeType(
const CallTargets& targets) {
bool is_string = true;
bool is_integer = true;

View file

@ -2797,7 +2797,7 @@ class ReturnInstr : public TemplateInstruction<1, NoThrow> {
ReturnInstr(TokenPosition token_pos,
Value* value,
intptr_t deopt_id,
intptr_t yield_index = RawPcDescriptors::kInvalidYieldIndex,
intptr_t yield_index = PcDescriptorsLayout::kInvalidYieldIndex,
Representation representation = kTagged)
: TemplateInstruction(deopt_id),
token_pos_(token_pos),
@ -3693,7 +3693,7 @@ struct ArgumentsInfo {
(type_args_len > 0 ? 1 : 0)),
argument_names(argument_names) {}
RawArray* ToArgumentsDescriptor() const {
ArrayPtr ToArgumentsDescriptor() const {
return ArgumentsDescriptor::New(type_args_len, count_without_type_args,
size_without_type_args, argument_names);
}
@ -3726,7 +3726,7 @@ class TemplateDartCall : public Definition {
}
}
RawString* Selector() {
StringPtr Selector() {
if (auto static_call = this->AsStaticCall()) {
return static_call->function().name();
} else if (auto instance_call = this->AsInstanceCall()) {
@ -3783,7 +3783,7 @@ class TemplateDartCall : public Definition {
intptr_t type_args_len() const { return type_args_len_; }
const Array& argument_names() const { return argument_names_; }
virtual TokenPosition token_pos() const { return token_pos_; }
RawArray* GetArgumentsDescriptor() const {
ArrayPtr GetArgumentsDescriptor() const {
return ArgumentsDescriptor::New(
type_args_len(), ArgumentCountWithoutTypeArgs(),
ArgumentsSizeWithoutTypeArgs(), argument_names());
@ -3927,7 +3927,7 @@ class InstanceCallBaseInstr : public TemplateDartCall<0> {
return result_type_->ToCid();
}
RawFunction* ResolveForReceiverClass(const Class& cls, bool allow_add = true);
FunctionPtr ResolveForReceiverClass(const Class& cls, bool allow_add = true);
Code::EntryKind entry_kind() const { return entry_kind_; }
void set_entry_kind(Code::EntryKind value) { entry_kind_ = value; }
@ -4124,7 +4124,7 @@ class PolymorphicInstanceCallInstr : public InstanceCallBaseInstr {
virtual Definition* Canonicalize(FlowGraph* graph);
static RawType* ComputeRuntimeType(const CallTargets& targets);
static TypePtr ComputeRuntimeType(const CallTargets& targets);
PRINT_OPERANDS_TO_SUPPORT
ADD_EXTRA_INFO_TO_S_EXPRESSION_SUPPORT
@ -5024,7 +5024,7 @@ class FfiCallInstr : public Definition {
class DebugStepCheckInstr : public TemplateInstruction<0, NoThrow> {
public:
DebugStepCheckInstr(TokenPosition token_pos,
RawPcDescriptors::Kind stub_kind,
PcDescriptorsLayout::Kind stub_kind,
intptr_t deopt_id)
: TemplateInstruction<0, NoThrow>(deopt_id),
token_pos_(token_pos),
@ -5041,7 +5041,7 @@ class DebugStepCheckInstr : public TemplateInstruction<0, NoThrow> {
private:
const TokenPosition token_pos_;
const RawPcDescriptors::Kind stub_kind_;
const PcDescriptorsLayout::Kind stub_kind_;
DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
};
@ -5254,7 +5254,7 @@ class GuardFieldLengthInstr : public GuardFieldInstr {
// For a field of static type G<T0, ..., Tn> and a stored value of runtime
// type T checks that type arguments of T at G exactly match <T0, ..., Tn>
// and updates guarded state (RawField::static_type_exactness_state_)
// and updates guarded state (FieldLayout::static_type_exactness_state_)
// accordingly.
//
// See StaticTypeExactnessState for more information.

View file

@ -383,7 +383,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&stack_ok);
#endif
ASSERT(__ constant_pool_allowed());
if (yield_index() != RawPcDescriptors::kInvalidYieldIndex) {
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(token_pos(), yield_index());
}
__ LeaveDartFrameAndReturn(); // Disallows constant pool use.
@ -504,7 +504,7 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
__ blx(R2);
compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
__ Drop(argument_count);
}
@ -1154,9 +1154,9 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
: compiler::ObjectPoolBuilderEntry::kNotPatchable);
if (link_lazily()) {
compiler->GeneratePatchableCall(token_pos(), *stub,
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
} else {
compiler->GenerateStubCall(token_pos(), *stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), *stub, PcDescriptorsLayout::kOther,
locs());
}
__ Pop(result);
@ -1196,7 +1196,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// instruction. Therefore we emit the metadata here, 8 bytes (2 instructions)
// after the original mov.
compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
PcDescriptorsLayout::Kind::kOther, locs());
// Update information in the thread object and enter a safepoint.
if (CanExecuteGeneratedCodeInSafepoint()) {
@ -2450,7 +2450,7 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
compiler->SaveLiveRegisters(locs);
compiler->GenerateStubCall(TokenPosition::kNoSource, // No token position.
stub, RawPcDescriptors::kOther, locs);
stub, PcDescriptorsLayout::kOther, locs);
__ MoveRegister(result_, R0);
compiler->RestoreLiveRegisters(locs);
__ b(exit_label());
@ -2993,7 +2993,7 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// data area to be initialized.
// R8: null
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(RawArray);
const intptr_t array_size = instance_size - sizeof(ArrayLayout);
__ LoadObject(R8, Object::null_object());
if (num_elements >= 2) {
__ mov(R9, compiler::Operand(R8));
@ -3003,7 +3003,7 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
__ LoadImmediate(R9, 0x1);
#endif // DEBUG
}
__ AddImmediate(R6, R0, sizeof(RawArray) - kHeapObjectTag);
__ AddImmediate(R6, R0, sizeof(ArrayLayout) - kHeapObjectTag);
if (array_size < (kInlineArraySize * compiler::target::kWordSize)) {
__ InitializeFieldsNoBarrierUnrolled(
R0, R6, 0, num_elements * compiler::target::kWordSize, R8, R9);
@ -3055,7 +3055,7 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& allocate_array_stub =
Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
compiler->GenerateStubCall(token_pos(), allocate_array_stub,
RawPcDescriptors::kOther, locs(), deopt_id());
PcDescriptorsLayout::kOther, locs(), deopt_id());
ASSERT(locs()->out(0).reg() == kResultReg);
}
@ -3318,8 +3318,8 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
// Lookup cache in stub before calling runtime.
__ LoadObject(InstantiationABI::kUninstantiatedTypeArgumentsReg,
type_arguments());
compiler->GenerateStubCall(token_pos(), GetStub(), RawPcDescriptors::kOther,
locs());
compiler->GenerateStubCall(token_pos(), GetStub(),
PcDescriptorsLayout::kOther, locs());
__ Bind(&type_arguments_instantiated);
}
@ -3359,8 +3359,8 @@ class AllocateContextSlowPath
compiler->zone(), object_store->allocate_context_stub());
__ LoadImmediate(R1, instruction()->num_context_variables());
compiler->GenerateStubCall(instruction()->token_pos(),
allocate_context_stub, RawPcDescriptors::kOther,
locs);
allocate_context_stub,
PcDescriptorsLayout::kOther, locs);
ASSERT(instruction()->locs()->out(0).reg() == R0);
compiler->RestoreLiveRegisters(instruction()->locs());
__ b(exit_label());
@ -3410,7 +3410,7 @@ void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
__ LoadImmediate(R1, num_context_variables());
compiler->GenerateStubCall(token_pos(), allocate_context_stub,
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
@ -3432,7 +3432,7 @@ void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& clone_context_stub =
Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
compiler->GenerateStubCall(token_pos(), clone_context_stub,
/*kind=*/RawPcDescriptors::kOther, locs());
/*kind=*/PcDescriptorsLayout::kOther, locs());
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
@ -3453,7 +3453,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id);
} else {
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id,
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
TokenPosition::kNoSource);
}
}
@ -3540,7 +3540,7 @@ class CheckStackOverflowSlowPath
compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
compiler->RecordCatchEntryMoves();
compiler->AddDescriptor(
RawPcDescriptors::kOther, compiler->assembler()->CodeSize(),
PcDescriptorsLayout::kOther, compiler->assembler()->CodeSize(),
instruction()->deopt_id(), instruction()->token_pos(),
compiler->CurrentTryIndex());
} else {
@ -3552,7 +3552,7 @@ class CheckStackOverflowSlowPath
if (compiler->isolate()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry,
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
instruction()->deopt_id(),
TokenPosition::kNoSource);
}
@ -3595,7 +3595,7 @@ void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// the stub above).
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
RawPcDescriptors::kOther, locs(),
PcDescriptorsLayout::kOther, locs(),
extended_env);
return;
}
@ -4780,6 +4780,7 @@ LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
!Isolate::Current()
->object_store()
->allocate_mint_with_fpu_regs_stub()
->ptr()
->InVMIsolateHeap())
? LocationSummary::kCallOnSharedSlowPath
: LocationSummary::kCallOnSlowPath));
@ -4826,7 +4827,7 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(!locs()->live_registers()->ContainsRegister(R0));
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->GenerateStubCall(token_pos(), stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther,
locs(), DeoptId::kNone, extended_env);
} else {
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
@ -6392,7 +6393,7 @@ void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// the stub above).
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
RawPcDescriptors::kOther, locs(),
PcDescriptorsLayout::kOther, locs(),
extended_env);
CheckNullInstr::AddMetadataForRuntimeCall(this, compiler);
return;
@ -7300,7 +7301,7 @@ void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
// Add a deoptimization descriptor for deoptimizing instructions that
// may be inserted before this instruction.
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
@ -7474,7 +7475,7 @@ void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
compiler->GenerateStubCall(token_pos(), stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther,
locs());
}

View file

@ -283,7 +283,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&stack_ok);
#endif
ASSERT(__ constant_pool_allowed());
if (yield_index() != RawPcDescriptors::kInvalidYieldIndex) {
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(token_pos(), yield_index());
}
__ LeaveDartFrame(); // Disallows constant pool use.
@ -400,7 +400,7 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
__ blr(R2);
compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
__ Drop(argument_count);
}
@ -837,7 +837,7 @@ Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int64_t imm = reinterpret_cast<int64_t>(right.constant().raw());
const int64_t imm = static_cast<int64_t>(right.constant().raw());
__ TestImmediate(left, imm);
} else {
__ tst(left, compiler::Operand(right.reg()));
@ -980,9 +980,9 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
: ObjectPool::Patchability::kNotPatchable);
if (link_lazily()) {
compiler->GeneratePatchableCall(token_pos(), *stub,
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
} else {
compiler->GenerateStubCall(token_pos(), *stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), *stub, PcDescriptorsLayout::kOther,
locs());
}
__ Pop(result);
@ -1017,7 +1017,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// instruction.
__ adr(temp, compiler::Immediate(Instr::kInstrSize));
compiler->EmitCallsiteMetadata(token_pos(), DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
PcDescriptorsLayout::Kind::kOther, locs());
__ StoreToOffset(temp, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
@ -2108,7 +2108,7 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
compiler->SaveLiveRegisters(locs);
compiler->GenerateStubCall(TokenPosition::kNoSource, // No token position.
stub, RawPcDescriptors::kOther, locs);
stub, PcDescriptorsLayout::kOther, locs);
__ MoveRegister(result_, R0);
compiler->RestoreLiveRegisters(locs);
__ b(exit_label());
@ -2505,9 +2505,9 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// data area to be initialized.
// R6: null
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(RawArray);
const intptr_t array_size = instance_size - sizeof(ArrayLayout);
__ LoadObject(R6, Object::null_object());
__ AddImmediate(R8, R0, sizeof(RawArray) - kHeapObjectTag);
__ AddImmediate(R8, R0, sizeof(ArrayLayout) - kHeapObjectTag);
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
while (current_offset < array_size) {
@ -2566,7 +2566,7 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& allocate_array_stub =
Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
compiler->GenerateStubCall(token_pos(), allocate_array_stub,
RawPcDescriptors::kOther, locs(), deopt_id());
PcDescriptorsLayout::kOther, locs(), deopt_id());
ASSERT(locs()->out(0).reg() == kResultReg);
}
@ -2807,8 +2807,8 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
// Lookup cache in stub before calling runtime.
__ LoadObject(InstantiationABI::kUninstantiatedTypeArgumentsReg,
type_arguments());
compiler->GenerateStubCall(token_pos(), GetStub(), RawPcDescriptors::kOther,
locs());
compiler->GenerateStubCall(token_pos(), GetStub(),
PcDescriptorsLayout::kOther, locs());
__ Bind(&type_arguments_instantiated);
}
@ -2849,8 +2849,8 @@ class AllocateContextSlowPath
__ LoadImmediate(R1, instruction()->num_context_variables());
compiler->GenerateStubCall(instruction()->token_pos(),
allocate_context_stub, RawPcDescriptors::kOther,
locs);
allocate_context_stub,
PcDescriptorsLayout::kOther, locs);
ASSERT(instruction()->locs()->out(0).reg() == R0);
compiler->RestoreLiveRegisters(instruction()->locs());
__ b(exit_label());
@ -2900,7 +2900,7 @@ void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
__ LoadImmediate(R1, num_context_variables());
compiler->GenerateStubCall(token_pos(), allocate_context_stub,
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
@ -2922,7 +2922,7 @@ void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& clone_context_stub =
Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
compiler->GenerateStubCall(token_pos(), clone_context_stub,
/*kind=*/RawPcDescriptors::kOther, locs());
/*kind=*/PcDescriptorsLayout::kOther, locs());
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
@ -2943,7 +2943,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id);
} else {
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id,
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
TokenPosition::kNoSource);
}
}
@ -3042,7 +3042,7 @@ class CheckStackOverflowSlowPath
compiler->RecordSafepoint(locs, kNumSlowPathArgs);
compiler->RecordCatchEntryMoves();
compiler->AddDescriptor(
RawPcDescriptors::kOther, compiler->assembler()->CodeSize(),
PcDescriptorsLayout::kOther, compiler->assembler()->CodeSize(),
instruction()->deopt_id(), instruction()->token_pos(),
compiler->CurrentTryIndex());
} else {
@ -3054,7 +3054,7 @@ class CheckStackOverflowSlowPath
if (compiler->isolate()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry,
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
instruction()->deopt_id(),
TokenPosition::kNoSource);
}
@ -3151,8 +3151,7 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(max_right)));
__ CompareImmediate(right, static_cast<int64_t>(Smi::New(max_right)));
__ b(deopt, CS);
}
__ SmiUntag(TMP, right);
@ -3171,8 +3170,7 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
__ b(deopt, MI);
}
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
__ CompareImmediate(right, static_cast<int64_t>(Smi::New(Smi::kBits)));
__ csel(result, ZR, result, CS);
__ SmiUntag(TMP, right);
__ lslv(TMP, left, TMP);
@ -3184,8 +3182,7 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
} else {
if (right_needs_check) {
ASSERT(shift_left->CanDeoptimize());
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
__ CompareImmediate(right, static_cast<int64_t>(Smi::New(Smi::kBits)));
__ b(deopt, CS);
}
// Left is not a constant.
@ -3309,8 +3306,7 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kSHL:
ASSERT(result != left);
ASSERT(result != right);
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
__ CompareImmediate(right, static_cast<int64_t>(Smi::New(Smi::kBits)));
__ b(slow_path->entry_label(), CS);
__ SmiUntag(TMP, right);
@ -3322,8 +3318,7 @@ void CheckedSmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
case Token::kSHR:
ASSERT(result != left);
ASSERT(result != right);
__ CompareImmediate(right,
reinterpret_cast<int64_t>(Smi::New(Smi::kBits)));
__ CompareImmediate(right, static_cast<int64_t>(Smi::New(Smi::kBits)));
__ b(slow_path->entry_label(), CS);
__ SmiUntag(result, right);
@ -3542,7 +3537,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (locs()->in(1).IsConstant()) {
const Object& constant = locs()->in(1).constant();
ASSERT(constant.IsSmi());
const int64_t imm = reinterpret_cast<int64_t>(constant.raw());
const int64_t imm = static_cast<int64_t>(constant.raw());
switch (op_kind()) {
case Token::kADD: {
if (deopt == NULL) {
@ -3972,10 +3967,12 @@ LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
const bool stubs_in_vm_isolate = (Isolate::Current()
->object_store()
->allocate_mint_with_fpu_regs_stub()
->ptr()
->InVMIsolateHeap() ||
Isolate::Current()
->object_store()
->allocate_mint_without_fpu_regs_stub()
->ptr()
->InVMIsolateHeap());
const bool shared_slow_path_call = SlowPathSharingSupported(opt) &&
FLAG_use_bare_instructions &&
@ -4030,7 +4027,7 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(!locs()->live_registers()->ContainsRegister(R0));
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->GenerateStubCall(token_pos(), stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther,
locs(), DeoptId::kNone, extended_env);
} else {
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
@ -5409,7 +5406,7 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (index_loc.IsConstant()) {
const Register length = length_loc.reg();
const Smi& index = Smi::Cast(index_loc.constant());
__ CompareImmediate(length, reinterpret_cast<int64_t>(index.raw()));
__ CompareImmediate(length, static_cast<int64_t>(index.raw()));
__ b(deopt, LS);
} else if (length_loc.IsConstant()) {
const Smi& length = Smi::Cast(length_loc.constant());
@ -5421,7 +5418,7 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ tst(index, compiler::Operand(index));
__ b(deopt, MI);
} else {
__ CompareImmediate(index, reinterpret_cast<int64_t>(length.raw()));
__ CompareImmediate(index, static_cast<int64_t>(length.raw()));
__ b(deopt, CS);
}
} else {
@ -6250,7 +6247,7 @@ void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
// Add a deoptimization descriptor for deoptimizing instructions that
// may be inserted before this instruction.
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
@ -6417,7 +6414,7 @@ void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
compiler->GenerateStubCall(token_pos(), stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther,
locs());
}

View file

@ -1013,10 +1013,10 @@ ConstantInstr* FlowGraphDeserializer::DeserializeConstant(
DebugStepCheckInstr* FlowGraphDeserializer::DeserializeDebugStepCheck(
SExpList* sexp,
const InstrInfo& info) {
auto kind = RawPcDescriptors::kAnyKind;
auto kind = PcDescriptorsLayout::kAnyKind;
if (auto const kind_sexp = CheckSymbol(Retrieve(sexp, "stub_kind"))) {
if (!RawPcDescriptors::ParseKind(kind_sexp->value(), &kind)) {
StoreError(kind_sexp, "not a valid RawPcDescriptors::Kind name");
if (!PcDescriptorsLayout::ParseKind(kind_sexp->value(), &kind)) {
StoreError(kind_sexp, "not a valid PcDescriptorsLayout::Kind name");
return nullptr;
}
}
@ -1641,13 +1641,13 @@ bool FlowGraphDeserializer::ParseFunction(SExpList* list, Object* out) {
auto& function = Function::Cast(*out);
// Check the kind expected by the S-expression if one was specified.
if (auto const kind_sexp = CheckSymbol(list->ExtraLookupValue("kind"))) {
RawFunction::Kind kind;
if (!RawFunction::ParseKind(kind_sexp->value(), &kind)) {
FunctionLayout::Kind kind;
if (!FunctionLayout::ParseKind(kind_sexp->value(), &kind)) {
StoreError(kind_sexp, "unexpected function kind");
return false;
}
if (function.kind() != kind) {
auto const kind_str = RawFunction::KindToCString(function.kind());
auto const kind_str = FunctionLayout::KindToCString(function.kind());
StoreError(list, "retrieved function has kind %s", kind_str);
return false;
}

View file

@ -140,7 +140,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ int3();
__ Bind(&done);
#endif
if (yield_index() != RawPcDescriptors::kInvalidYieldIndex) {
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(token_pos(), yield_index());
}
__ LeaveFrame();
@ -324,7 +324,7 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
} else {
if (compiler::Assembler::IsSafeSmi(value_) || value_.IsNull()) {
__ movl(LocationToStackSlotAddress(destination),
compiler::Immediate(reinterpret_cast<int32_t>(value_.raw())));
compiler::Immediate(static_cast<int32_t>(value_.raw())));
} else {
__ pushl(EAX);
__ LoadObjectSafely(EAX, value_);
@ -713,7 +713,7 @@ Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int32_t imm = reinterpret_cast<int32_t>(right.constant().raw());
const int32_t imm = static_cast<int32_t>(right.constant().raw());
__ testl(left, compiler::Immediate(imm));
} else {
__ testl(left, right.reg());
@ -856,7 +856,7 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const compiler::ExternalLabel label(
reinterpret_cast<uword>(native_c_function()));
__ movl(ECX, compiler::Immediate(label.address()));
compiler->GenerateStubCall(token_pos(), *stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), *stub, PcDescriptorsLayout::kOther,
locs());
__ popl(result);
@ -893,7 +893,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Label get_pc;
__ call(&get_pc);
compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
PcDescriptorsLayout::Kind::kOther, locs());
__ Bind(&get_pc);
__ popl(temp);
__ movl(compiler::Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), temp);
@ -1729,7 +1729,7 @@ void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ cmpl(value_cid_reg, compiler::Immediate(kNullCid));
} else {
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ cmpl(value_reg, raw_null);
}
}
@ -1853,7 +1853,7 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
compiler->SaveLiveRegisters(locs);
compiler->GenerateStubCall(TokenPosition::kNoSource, stub,
RawPcDescriptors::kOther, locs);
PcDescriptorsLayout::kOther, locs);
__ MoveRegister(result_, EAX);
compiler->RestoreLiveRegisters(locs);
__ jmp(exit_label());
@ -1924,7 +1924,7 @@ static void EnsureMutableBox(FlowGraphCompiler* compiler,
Register temp) {
compiler::Label done;
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ movl(box_reg, compiler::FieldAddress(instance_reg, offset));
__ cmpl(box_reg, raw_null);
__ j(NOT_EQUAL, &done);
@ -2227,10 +2227,10 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// EDI: iterator which initially points to the start of the variable
// data area to be initialized.
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(RawArray);
const intptr_t array_size = instance_size - sizeof(ArrayLayout);
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
__ leal(EDI, compiler::FieldAddress(EAX, sizeof(RawArray)));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ leal(EDI, compiler::FieldAddress(EAX, sizeof(ArrayLayout)));
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
__ movl(EBX, raw_null);
@ -2282,7 +2282,7 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&slow_path);
compiler->GenerateStubCall(token_pos(), StubCode::AllocateArray(),
RawPcDescriptors::kOther, locs(), deopt_id());
PcDescriptorsLayout::kOther, locs(), deopt_id());
__ Bind(&done);
ASSERT(locs()->out(0).reg() == kResultReg);
}
@ -2493,8 +2493,8 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
// Lookup cache in stub before calling runtime.
__ LoadObject(InstantiationABI::kUninstantiatedTypeArgumentsReg,
type_arguments());
compiler->GenerateStubCall(token_pos(), GetStub(), RawPcDescriptors::kOther,
locs());
compiler->GenerateStubCall(token_pos(), GetStub(),
PcDescriptorsLayout::kOther, locs());
__ Bind(&type_arguments_instantiated);
}
@ -2531,7 +2531,7 @@ class AllocateContextSlowPath
__ movl(EDX, compiler::Immediate(instruction()->num_context_variables()));
compiler->GenerateStubCall(instruction()->token_pos(),
StubCode::AllocateContext(),
RawPcDescriptors::kOther, locs);
PcDescriptorsLayout::kOther, locs);
ASSERT(instruction()->locs()->out(0).reg() == EAX);
compiler->RestoreLiveRegisters(instruction()->locs());
__ jmp(exit_label());
@ -2579,7 +2579,7 @@ void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ movl(EDX, compiler::Immediate(num_context_variables()));
compiler->GenerateStubCall(token_pos(), StubCode::AllocateContext(),
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
@ -2598,7 +2598,7 @@ void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->out(0).reg() == EAX);
compiler->GenerateStubCall(token_pos(), StubCode::CloneContext(),
/*kind=*/RawPcDescriptors::kOther, locs());
/*kind=*/PcDescriptorsLayout::kOther, locs());
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
@ -2619,7 +2619,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id);
} else {
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id,
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
TokenPosition::kNoSource);
}
}
@ -2693,7 +2693,7 @@ class CheckStackOverflowSlowPath
if (compiler->isolate()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry,
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
instruction()->deopt_id(),
TokenPosition::kNoSource);
}
@ -2793,8 +2793,8 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
const bool right_needs_check =
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ cmpl(right, compiler::Immediate(
reinterpret_cast<int32_t>(Smi::New(max_right))));
__ cmpl(right,
compiler::Immediate(static_cast<int32_t>(Smi::New(max_right))));
__ j(ABOVE_EQUAL, deopt);
}
__ SmiUntag(right);
@ -2814,8 +2814,8 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
__ j(NEGATIVE, deopt);
}
compiler::Label done, is_not_zero;
__ cmpl(right, compiler::Immediate(
reinterpret_cast<int32_t>(Smi::New(Smi::kBits))));
__ cmpl(right,
compiler::Immediate(static_cast<int32_t>(Smi::New(Smi::kBits))));
__ j(BELOW, &is_not_zero, compiler::Assembler::kNearJump);
__ xorl(left, left);
__ jmp(&done, compiler::Assembler::kNearJump);
@ -2830,8 +2830,8 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
} else {
if (right_needs_check) {
ASSERT(shift_left->CanDeoptimize());
__ cmpl(right, compiler::Immediate(
reinterpret_cast<int32_t>(Smi::New(Smi::kBits))));
__ cmpl(right,
compiler::Immediate(static_cast<int32_t>(Smi::New(Smi::kBits))));
__ j(ABOVE_EQUAL, deopt);
}
// Left is not a constant.
@ -5160,7 +5160,7 @@ LocationSummary* CheckClassInstr::MakeLocationSummary(Zone* zone,
void CheckClassInstr::EmitNullCheck(FlowGraphCompiler* compiler,
compiler::Label* deopt) {
const compiler::Immediate& raw_null =
compiler::Immediate(reinterpret_cast<intptr_t>(Object::null()));
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ cmpl(locs()->in(0).reg(), raw_null);
ASSERT(IsDeoptIfNull() || IsDeoptIfNotNull());
Condition cond = IsDeoptIfNull() ? EQUAL : NOT_EQUAL;
@ -5325,20 +5325,17 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ testl(index, index);
__ j(NEGATIVE, deopt);
} else {
__ cmpl(index,
compiler::Immediate(reinterpret_cast<int32_t>(length.raw())));
__ cmpl(index, compiler::Immediate(static_cast<int32_t>(length.raw())));
__ j(ABOVE_EQUAL, deopt);
}
} else if (index_loc.IsConstant()) {
const Smi& index = Smi::Cast(index_loc.constant());
if (length_loc.IsStackSlot()) {
const compiler::Address& length = LocationToStackSlotAddress(length_loc);
__ cmpl(length,
compiler::Immediate(reinterpret_cast<int32_t>(index.raw())));
__ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.raw())));
} else {
Register length = length_loc.reg();
__ cmpl(length,
compiler::Immediate(reinterpret_cast<int32_t>(index.raw())));
__ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.raw())));
}
__ j(BELOW_EQUAL, deopt);
} else if (length_loc.IsStackSlot()) {
@ -6084,7 +6081,7 @@ void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
// Add a deoptimization descriptor for deoptimizing instructions that
// may be inserted before this instruction.
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
@ -6272,7 +6269,7 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ xorl(ECX, ECX);
__ call(EBX);
compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
__ Drop(argument_count);
}
@ -6321,7 +6318,7 @@ LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
compiler->GenerateStubCall(token_pos(), stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther,
locs());
}

View file

@ -1025,7 +1025,7 @@ void NativeEntryInstr::PrintTo(BufferFormatter* f) const {
void ReturnInstr::PrintOperandsTo(BufferFormatter* f) const {
Instruction::PrintOperandsTo(f);
if (yield_index() != RawPcDescriptors::kInvalidYieldIndex) {
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
f->Print(", yield_index = %" Pd "", yield_index());
}
}

View file

@ -588,9 +588,9 @@ SExpression* FlowGraphSerializer::FunctionToSExp(const Function& func) {
AddExtraSymbol(sexp, "native_name", tmp_string_.ToCString());
}
}
if (func.kind() != RawFunction::Kind::kRegularFunction ||
if (func.kind() != FunctionLayout::Kind::kRegularFunction ||
FLAG_verbose_flow_graph_serialization) {
AddExtraSymbol(sexp, "kind", RawFunction::KindToCString(func.kind()));
AddExtraSymbol(sexp, "kind", FunctionLayout::KindToCString(func.kind()));
}
function_type_args_ = func.type_parameters();
if (auto const ta_sexp = NonEmptyTypeArgumentsToSExp(function_type_args_)) {
@ -1061,9 +1061,9 @@ void DebugStepCheckInstr::AddExtraInfoToSExpression(
SExpList* sexp,
FlowGraphSerializer* s) const {
Instruction::AddExtraInfoToSExpression(sexp, s);
if (stub_kind_ != RawPcDescriptors::kAnyKind ||
if (stub_kind_ != PcDescriptorsLayout::kAnyKind ||
FLAG_verbose_flow_graph_serialization) {
auto const stub_kind_name = RawPcDescriptors::KindToCString(stub_kind_);
auto const stub_kind_name = PcDescriptorsLayout::KindToCString(stub_kind_);
ASSERT(stub_kind_name != nullptr);
s->AddExtraSymbol(sexp, "stub_kind", stub_kind_name);
}

View file

@ -148,7 +148,7 @@ class FlowGraphSerializer : ValueObject {
return a.raw() == b.raw();
}
static uword Hash(const Object& obj) {
if (obj.IsSmi()) return reinterpret_cast<uword>(obj.raw());
if (obj.IsSmi()) return static_cast<uword>(obj.raw());
if (obj.IsInstance()) return Instance::Cast(obj).CanonicalizeHash();
return obj.GetClassId();
}

View file

@ -23,9 +23,9 @@ namespace dart {
Definition* const FlowGraphBuilderHelper::kPhiSelfReference = nullptr;
RawLibrary* LoadTestScript(const char* script,
Dart_NativeEntryResolver resolver,
const char* lib_uri) {
LibraryPtr LoadTestScript(const char* script,
Dart_NativeEntryResolver resolver,
const char* lib_uri) {
Dart_Handle api_lib;
{
TransitionVMToNative transition(Thread::Current());
@ -38,7 +38,7 @@ RawLibrary* LoadTestScript(const char* script,
return lib.raw();
}
RawFunction* GetFunction(const Library& lib, const char* name) {
FunctionPtr GetFunction(const Library& lib, const char* name) {
Thread* thread = Thread::Current();
const auto& func = Function::Handle(lib.LookupFunctionAllowPrivate(
String::Handle(Symbols::New(thread, name))));
@ -46,7 +46,7 @@ RawFunction* GetFunction(const Library& lib, const char* name) {
return func.raw();
}
RawClass* GetClass(const Library& lib, const char* name) {
ClassPtr GetClass(const Library& lib, const char* name) {
Thread* thread = Thread::Current();
const auto& cls = Class::Handle(
lib.LookupClassAllowPrivate(String::Handle(Symbols::New(thread, name))));
@ -54,15 +54,15 @@ RawClass* GetClass(const Library& lib, const char* name) {
return cls.raw();
}
RawTypeParameter* GetClassTypeParameter(const Class& klass, const char* name) {
TypeParameterPtr GetClassTypeParameter(const Class& klass, const char* name) {
const auto& param = TypeParameter::Handle(
klass.LookupTypeParameter(String::Handle(String::New(name))));
EXPECT(!param.IsNull());
return param.raw();
}
RawTypeParameter* GetFunctionTypeParameter(const Function& fun,
const char* name) {
TypeParameterPtr GetFunctionTypeParameter(const Function& fun,
const char* name) {
intptr_t fun_level = 0;
const auto& param = TypeParameter::Handle(
fun.LookupTypeParameter(String::Handle(String::New(name)), &fun_level));
@ -70,7 +70,7 @@ RawTypeParameter* GetFunctionTypeParameter(const Function& fun,
return param.raw();
}
RawObject* Invoke(const Library& lib, const char* name) {
ObjectPtr Invoke(const Library& lib, const char* name) {
// These tests rely on running unoptimized code to collect type feedback. The
// interpreter does not collect type feedback for interface calls, so set
// compilation threshold to 0 in order to compile invoked function

View file

@ -50,20 +50,18 @@ namespace dart {
class FlowGraph;
class Function;
class Library;
class RawFunction;
class RawLibrary;
RawLibrary* LoadTestScript(const char* script,
Dart_NativeEntryResolver resolver = nullptr,
const char* lib_uri = RESOLVED_USER_TEST_URI);
LibraryPtr LoadTestScript(const char* script,
Dart_NativeEntryResolver resolver = nullptr,
const char* lib_uri = RESOLVED_USER_TEST_URI);
RawFunction* GetFunction(const Library& lib, const char* name);
RawClass* GetClass(const Library& lib, const char* name);
RawTypeParameter* GetClassTypeParameter(const Class& klass, const char* name);
RawTypeParameter* GetFunctionTypeParameter(const Function& fun,
const char* name);
FunctionPtr GetFunction(const Library& lib, const char* name);
ClassPtr GetClass(const Library& lib, const char* name);
TypeParameterPtr GetClassTypeParameter(const Class& klass, const char* name);
TypeParameterPtr GetFunctionTypeParameter(const Function& fun,
const char* name);
RawObject* Invoke(const Library& lib, const char* name);
ObjectPtr Invoke(const Library& lib, const char* name);
class TestPipeline : public ValueObject {
public:
@ -291,7 +289,7 @@ class FlowGraphBuilderHelper {
static FlowGraph& MakeDummyGraph(Thread* thread) {
const Function& func = Function::ZoneHandle(Function::New(
String::Handle(Symbols::New(thread, "dummy")),
RawFunction::kRegularFunction,
FunctionLayout::kRegularFunction,
/*is_static=*/true,
/*is_const=*/false,
/*is_abstract=*/false,

View file

@ -219,7 +219,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
#endif
ASSERT(__ constant_pool_allowed());
if (yield_index() != RawPcDescriptors::kInvalidYieldIndex) {
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(token_pos(), yield_index());
}
__ LeaveDartFrame(); // Disallows constant pool use.
@ -795,7 +795,7 @@ Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int64_t imm = reinterpret_cast<int64_t>(right.constant().raw());
const int64_t imm = static_cast<int64_t>(right.constant().raw());
__ TestImmediate(left_reg, compiler::Immediate(imm));
} else {
__ testq(left_reg, right.reg());
@ -920,7 +920,7 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadNativeEntry(RBX, &label,
compiler::ObjectPoolBuilderEntry::kPatchable);
compiler->GeneratePatchableCall(token_pos(), *stub,
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
} else {
if (is_bootstrap_native()) {
stub = &StubCode::CallBootstrapNative();
@ -933,7 +933,7 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
reinterpret_cast<uword>(native_c_function()));
__ LoadNativeEntry(RBX, &label,
compiler::ObjectPoolBuilderEntry::kNotPatchable);
compiler->GenerateStubCall(token_pos(), *stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), *stub, PcDescriptorsLayout::kOther,
locs());
}
__ popq(result);
@ -971,7 +971,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// 'movq'.
__ leaq(TMP, compiler::Address::AddressRIPRelative(0));
compiler->EmitCallsiteMetadata(TokenPosition::kNoSource, DeoptId::kNone,
RawPcDescriptors::Kind::kOther, locs());
PcDescriptorsLayout::Kind::kOther, locs());
__ movq(compiler::Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), TMP);
if (CanExecuteGeneratedCodeInSafepoint()) {
@ -1301,7 +1301,7 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
compiler->SaveLiveRegisters(locs);
compiler->GenerateStubCall(TokenPosition::kNoSource, // No token position.
stub, RawPcDescriptors::kOther, locs);
stub, PcDescriptorsLayout::kOther, locs);
__ MoveRegister(result_, RAX);
compiler->RestoreLiveRegisters(locs);
__ jmp(exit_label());
@ -2485,9 +2485,9 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// RDI: iterator which initially points to the start of the variable
// data area to be initialized.
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(RawArray);
const intptr_t array_size = instance_size - sizeof(ArrayLayout);
__ LoadObject(R12, Object::null_object());
__ leaq(RDI, compiler::FieldAddress(RAX, sizeof(RawArray)));
__ leaq(RDI, compiler::FieldAddress(RAX, sizeof(ArrayLayout)));
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
while (current_offset < array_size) {
@ -2549,7 +2549,7 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& allocate_array_stub =
Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
compiler->GenerateStubCall(token_pos(), allocate_array_stub,
RawPcDescriptors::kOther, locs(), deopt_id());
PcDescriptorsLayout::kOther, locs(), deopt_id());
__ Bind(&done);
ASSERT(locs()->out(0).reg() == kResultReg);
}
@ -2800,8 +2800,8 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
}
__ LoadObject(InstantiationABI::kUninstantiatedTypeArgumentsReg,
type_arguments());
compiler->GenerateStubCall(token_pos(), GetStub(), RawPcDescriptors::kOther,
locs());
compiler->GenerateStubCall(token_pos(), GetStub(),
PcDescriptorsLayout::kOther, locs());
__ Bind(&type_arguments_instantiated);
}
@ -2842,8 +2842,8 @@ class AllocateContextSlowPath
__ LoadImmediate(
R10, compiler::Immediate(instruction()->num_context_variables()));
compiler->GenerateStubCall(instruction()->token_pos(),
allocate_context_stub, RawPcDescriptors::kOther,
locs);
allocate_context_stub,
PcDescriptorsLayout::kOther, locs);
ASSERT(instruction()->locs()->out(0).reg() == RAX);
compiler->RestoreLiveRegisters(instruction()->locs());
__ jmp(exit_label());
@ -2894,7 +2894,7 @@ void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadImmediate(R10, compiler::Immediate(num_context_variables()));
compiler->GenerateStubCall(token_pos(), allocate_context_stub,
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
@ -2916,7 +2916,7 @@ void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& clone_context_stub =
Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
compiler->GenerateStubCall(token_pos(), clone_context_stub,
/*kind=*/RawPcDescriptors::kOther, locs());
/*kind=*/PcDescriptorsLayout::kOther, locs());
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
@ -2937,7 +2937,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id);
} else {
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, deopt_id,
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
TokenPosition::kNoSource);
}
}
@ -3020,7 +3020,7 @@ class CheckStackOverflowSlowPath
compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
compiler->RecordCatchEntryMoves();
compiler->AddDescriptor(
RawPcDescriptors::kOther, compiler->assembler()->CodeSize(),
PcDescriptorsLayout::kOther, compiler->assembler()->CodeSize(),
instruction()->deopt_id(), instruction()->token_pos(),
compiler->CurrentTryIndex());
} else {
@ -3032,7 +3032,7 @@ class CheckStackOverflowSlowPath
if (compiler->isolate()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(RawPcDescriptors::kOsrEntry,
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
instruction()->deopt_id(),
TokenPosition::kNoSource);
}
@ -3135,8 +3135,8 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
!RangeUtils::IsWithin(right_range, 0, max_right - 1);
if (right_needs_check) {
__ CompareImmediate(
right, compiler::Immediate(
reinterpret_cast<int64_t>(Smi::New(max_right))));
right,
compiler::Immediate(static_cast<int64_t>(Smi::New(max_right))));
__ j(ABOVE_EQUAL, deopt);
}
__ SmiUntag(right);
@ -3160,7 +3160,7 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
compiler::Label done, is_not_zero;
__ CompareImmediate(
right,
compiler::Immediate(reinterpret_cast<int64_t>(Smi::New(Smi::kBits))));
compiler::Immediate(static_cast<int64_t>(Smi::New(Smi::kBits))));
__ j(BELOW, &is_not_zero, compiler::Assembler::kNearJump);
__ xorq(left, left);
__ jmp(&done, compiler::Assembler::kNearJump);
@ -3177,7 +3177,7 @@ static void EmitSmiShiftLeft(FlowGraphCompiler* compiler,
ASSERT(shift_left->CanDeoptimize());
__ CompareImmediate(
right,
compiler::Immediate(reinterpret_cast<int64_t>(Smi::New(Smi::kBits))));
compiler::Immediate(static_cast<int64_t>(Smi::New(Smi::kBits))));
__ j(ABOVE_EQUAL, deopt);
}
// Left is not a constant.
@ -3509,8 +3509,7 @@ void CheckedSmiComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
static bool CanBeImmediate(const Object& constant) {
return constant.IsSmi() &&
compiler::Immediate(reinterpret_cast<int64_t>(constant.raw()))
.is_int32();
compiler::Immediate(static_cast<int64_t>(constant.raw())).is_int32();
}
static bool IsSmiValue(const Object& constant, intptr_t value) {
@ -3619,7 +3618,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (locs()->in(1).IsConstant()) {
const Object& constant = locs()->in(1).constant();
ASSERT(constant.IsSmi());
const int64_t imm = reinterpret_cast<int64_t>(constant.raw());
const int64_t imm = static_cast<int64_t>(constant.raw());
switch (op_kind()) {
case Token::kADD: {
__ AddImmediate(left, compiler::Immediate(imm));
@ -5616,8 +5615,8 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (index_loc.IsConstant()) {
Register length = length_loc.reg();
const Smi& index = Smi::Cast(index_loc.constant());
__ CompareImmediate(
length, compiler::Immediate(reinterpret_cast<int64_t>(index.raw())));
__ CompareImmediate(length,
compiler::Immediate(static_cast<int64_t>(index.raw())));
__ j(BELOW_EQUAL, deopt);
} else if (length_loc.IsConstant()) {
const Smi& length = Smi::Cast(length_loc.constant());
@ -5630,7 +5629,7 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ j(NEGATIVE, deopt);
} else {
__ CompareImmediate(
index, compiler::Immediate(reinterpret_cast<int64_t>(length.raw())));
index, compiler::Immediate(static_cast<int64_t>(length.raw())));
__ j(ABOVE_EQUAL, deopt);
}
} else {
@ -6514,7 +6513,7 @@ void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
// Add a deoptimization descriptor for deoptimizing instructions that
// may be inserted before this instruction.
compiler->AddCurrentDescriptor(RawPcDescriptors::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
TokenPosition::kNoSource);
}
if (HasParallelMove()) {
@ -6645,7 +6644,7 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
__ call(RCX);
compiler->EmitCallsiteMetadata(token_pos(), deopt_id(),
RawPcDescriptors::kOther, locs());
PcDescriptorsLayout::kOther, locs());
__ Drop(argument_count);
}
@ -6701,7 +6700,7 @@ void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
compiler->GenerateStubCall(token_pos(), stub, RawPcDescriptors::kOther,
compiler->GenerateStubCall(token_pos(), stub, PcDescriptorsLayout::kOther,
locs());
}

View file

@ -2285,7 +2285,7 @@ bool FlowGraphInliner::AlwaysInline(const Function& function) {
// replace them with inline FG before inlining introduces any superfluous
// AssertAssignable instructions.
if (function.IsDispatcherOrImplicitAccessor() &&
!(function.kind() == RawFunction::kDynamicInvocationForwarder &&
!(function.kind() == FunctionLayout::kDynamicInvocationForwarder &&
function.IsRecognized())) {
// Smaller or same size as the call.
return true;
@ -2298,7 +2298,7 @@ bool FlowGraphInliner::AlwaysInline(const Function& function) {
if (function.IsGetterFunction() || function.IsSetterFunction() ||
IsInlineableOperator(function) ||
(function.kind() == RawFunction::kConstructor)) {
(function.kind() == FunctionLayout::kConstructor)) {
const intptr_t count = function.optimized_instruction_count();
if ((count != 0) && (count < FLAG_inline_getters_setters_smaller_than)) {
return true;

View file

@ -53,31 +53,31 @@ class ParsedFunction;
//
// Note: native slots are expected to be non-nullable.
#define NATIVE_SLOTS_LIST(V) \
V(Array, RawArray, length, Smi, FINAL) \
V(Context, RawContext, parent, Context, FINAL) \
V(Closure, RawClosure, instantiator_type_arguments, TypeArguments, FINAL) \
V(Closure, RawClosure, delayed_type_arguments, TypeArguments, FINAL) \
V(Closure, RawClosure, function_type_arguments, TypeArguments, FINAL) \
V(Closure, RawClosure, function, Function, FINAL) \
V(Closure, RawClosure, context, Context, FINAL) \
V(Closure, RawClosure, hash, Context, VAR) \
V(GrowableObjectArray, RawGrowableObjectArray, length, Smi, VAR) \
V(GrowableObjectArray, RawGrowableObjectArray, data, Array, VAR) \
V(TypedDataBase, RawTypedDataBase, length, Smi, FINAL) \
V(TypedDataView, RawTypedDataView, offset_in_bytes, Smi, FINAL) \
V(TypedDataView, RawTypedDataView, data, Dynamic, FINAL) \
V(String, RawString, length, Smi, FINAL) \
V(LinkedHashMap, RawLinkedHashMap, index, TypedDataUint32Array, VAR) \
V(LinkedHashMap, RawLinkedHashMap, data, Array, VAR) \
V(LinkedHashMap, RawLinkedHashMap, hash_mask, Smi, VAR) \
V(LinkedHashMap, RawLinkedHashMap, used_data, Smi, VAR) \
V(LinkedHashMap, RawLinkedHashMap, deleted_keys, Smi, VAR) \
V(ArgumentsDescriptor, RawArray, type_args_len, Smi, FINAL) \
V(ArgumentsDescriptor, RawArray, positional_count, Smi, FINAL) \
V(ArgumentsDescriptor, RawArray, count, Smi, FINAL) \
V(ArgumentsDescriptor, RawArray, size, Smi, FINAL) \
V(PointerBase, RawPointerBase, data_field, Dynamic, FINAL) \
V(Type, RawType, arguments, TypeArguments, FINAL)
V(Array, ArrayLayout, length, Smi, FINAL) \
V(Context, ContextLayout, parent, Context, FINAL) \
V(Closure, ClosureLayout, instantiator_type_arguments, TypeArguments, FINAL) \
V(Closure, ClosureLayout, delayed_type_arguments, TypeArguments, FINAL) \
V(Closure, ClosureLayout, function_type_arguments, TypeArguments, FINAL) \
V(Closure, ClosureLayout, function, Function, FINAL) \
V(Closure, ClosureLayout, context, Context, FINAL) \
V(Closure, ClosureLayout, hash, Context, VAR) \
V(GrowableObjectArray, GrowableObjectArrayLayout, length, Smi, VAR) \
V(GrowableObjectArray, GrowableObjectArrayLayout, data, Array, VAR) \
V(TypedDataBase, TypedDataBaseLayout, length, Smi, FINAL) \
V(TypedDataView, TypedDataViewLayout, offset_in_bytes, Smi, FINAL) \
V(TypedDataView, TypedDataViewLayout, data, Dynamic, FINAL) \
V(String, StringLayout, length, Smi, FINAL) \
V(LinkedHashMap, LinkedHashMapLayout, index, TypedDataUint32Array, VAR) \
V(LinkedHashMap, LinkedHashMapLayout, data, Array, VAR) \
V(LinkedHashMap, LinkedHashMapLayout, hash_mask, Smi, VAR) \
V(LinkedHashMap, LinkedHashMapLayout, used_data, Smi, VAR) \
V(LinkedHashMap, LinkedHashMapLayout, deleted_keys, Smi, VAR) \
V(ArgumentsDescriptor, ArrayLayout, type_args_len, Smi, FINAL) \
V(ArgumentsDescriptor, ArrayLayout, positional_count, Smi, FINAL) \
V(ArgumentsDescriptor, ArrayLayout, count, Smi, FINAL) \
V(ArgumentsDescriptor, ArrayLayout, size, Smi, FINAL) \
V(PointerBase, PointerBaseLayout, data_field, Dynamic, FINAL) \
V(Type, TypeLayout, arguments, TypeArguments, FINAL)
// Slot is an abstraction that describes an readable (and possibly writeable)
// location within an object.

View file

@ -48,8 +48,8 @@ TEST_CASE(SlotFromGuardedField) {
const Function& dummy_function = Function::ZoneHandle(
Function::New(String::Handle(Symbols::New(thread, "foo")),
RawFunction::kRegularFunction, false, false, false, false,
false, dummy_class, TokenPosition::kMinSource));
FunctionLayout::kRegularFunction, false, false, false,
false, false, dummy_class, TokenPosition::kMinSource));
const Field& field = Field::Handle(
Field::New(String::Handle(Symbols::New(thread, "field")),

View file

@ -171,7 +171,7 @@ ISOLATE_UNIT_TEST_CASE(TypePropagator_Refinement) {
const Function& target_func = Function::ZoneHandle(Function::New(
String::Handle(Symbols::New(thread, "dummy2")),
RawFunction::kRegularFunction,
FunctionLayout::kRegularFunction,
/*is_static=*/true,
/*is_const=*/false,
/*is_abstract=*/false,

View file

@ -26,7 +26,7 @@ static YieldPoints* GetYieldPointsFromGraph(FlowGraph* flow_graph) {
while (!it.Done()) {
if (auto return_instr = it.Current()->AsReturn()) {
if (return_instr->yield_index() !=
RawPcDescriptors::kInvalidYieldIndex) {
PcDescriptorsLayout::kInvalidYieldIndex) {
ASSERT(return_instr->yield_index() > 0);
array->Add(
Pair(return_instr->yield_index(), return_instr->token_pos()));
@ -42,9 +42,9 @@ static YieldPoints* GetYieldPointsFromGraph(FlowGraph* flow_graph) {
static YieldPoints* GetYieldPointsFromCode(const Code& code) {
auto array = new YieldPoints();
const auto& pc_descriptor = PcDescriptors::Handle(code.pc_descriptors());
PcDescriptors::Iterator it(pc_descriptor, RawPcDescriptors::kOther);
PcDescriptors::Iterator it(pc_descriptor, PcDescriptorsLayout::kOther);
while (it.MoveNext()) {
if (it.YieldIndex() != RawPcDescriptors::kInvalidYieldIndex) {
if (it.YieldIndex() != PcDescriptorsLayout::kInvalidYieldIndex) {
array->Add(Pair(it.YieldIndex(), it.TokenPos()));
}
}

View file

@ -749,8 +749,8 @@ bool CallSpecializer::TryInlineImplicitInstanceGetter(InstanceCallInstr* call) {
field = field.CloneFromOriginal();
}
switch (
flow_graph()->CheckForInstanceCall(call, RawFunction::kImplicitGetter)) {
switch (flow_graph()->CheckForInstanceCall(call,
FunctionLayout::kImplicitGetter)) {
case FlowGraph::ToCheck::kCheckNull:
AddCheckNull(call->Receiver(), call->function_name(), call->deopt_id(),
call->env(), call);
@ -795,7 +795,7 @@ bool CallSpecializer::TryInlineInstanceSetter(InstanceCallInstr* instr) {
return false;
}
const Function& target = targets.FirstTarget();
if (target.kind() != RawFunction::kImplicitSetter) {
if (target.kind() != FunctionLayout::kImplicitSetter) {
// Non-implicit setter are inlined like normal method calls.
return false;
}
@ -805,8 +805,8 @@ bool CallSpecializer::TryInlineInstanceSetter(InstanceCallInstr* instr) {
field = field.CloneFromOriginal();
}
switch (
flow_graph()->CheckForInstanceCall(instr, RawFunction::kImplicitSetter)) {
switch (flow_graph()->CheckForInstanceCall(instr,
FunctionLayout::kImplicitSetter)) {
case FlowGraph::ToCheck::kCheckNull:
AddCheckNull(instr->Receiver(), instr->function_name(), instr->deopt_id(),
instr->env(), instr);
@ -956,7 +956,7 @@ bool CallSpecializer::TryInlineInstanceGetter(InstanceCallInstr* call) {
return false;
}
const Function& target = targets.FirstTarget();
if (target.kind() != RawFunction::kImplicitGetter) {
if (target.kind() != FunctionLayout::kImplicitGetter) {
// Non-implicit getters are inlined like normal methods by conventional
// inlining in FlowGraphInliner.
return false;
@ -1063,7 +1063,7 @@ bool CallSpecializer::TryInlineInstanceMethod(InstanceCallInstr* call) {
// (ic_data.NumberOfChecks() * 2) entries
// An instance-of test returning all same results can be converted to a class
// check.
RawBool* CallSpecializer::InstanceOfAsBool(
BoolPtr CallSpecializer::InstanceOfAsBool(
const ICData& ic_data,
const AbstractType& type,
ZoneGrowableArray<intptr_t>* results) const {

View file

@ -159,9 +159,9 @@ class CallSpecializer : public FlowGraphVisitor {
bool TryInlineImplicitInstanceGetter(InstanceCallInstr* call);
RawBool* InstanceOfAsBool(const ICData& ic_data,
const AbstractType& type,
ZoneGrowableArray<intptr_t>* results) const;
BoolPtr InstanceOfAsBool(const ICData& ic_data,
const AbstractType& type,
ZoneGrowableArray<intptr_t>* results) const;
bool TryOptimizeInstanceOfUsingStaticTypes(InstanceCallInstr* call,
const AbstractType& type);

View file

@ -13,15 +13,15 @@ namespace compiler {
namespace ffi {
// TODO(dartbug.com/36607): Cache the trampolines.
RawFunction* TrampolineFunction(const Function& dart_signature,
const Function& c_signature) {
FunctionPtr TrampolineFunction(const Function& dart_signature,
const Function& c_signature) {
Thread* thread = Thread::Current();
Zone* zone = thread->zone();
String& name = String::Handle(zone, Symbols::New(thread, "FfiTrampoline"));
const Library& lib = Library::Handle(zone, Library::FfiLibrary());
const Class& owner_class = Class::Handle(zone, lib.toplevel_class());
Function& function =
Function::Handle(zone, Function::New(name, RawFunction::kFfiTrampoline,
Function::Handle(zone, Function::New(name, FunctionLayout::kFfiTrampoline,
/*is_static=*/true,
/*is_const=*/false,
/*is_abstract=*/false,

View file

@ -19,8 +19,8 @@ namespace compiler {
namespace ffi {
RawFunction* TrampolineFunction(const Function& dart_signature,
const Function& c_signature);
FunctionPtr TrampolineFunction(const Function& dart_signature,
const Function& c_signature);
} // namespace ffi

View file

@ -12,9 +12,9 @@ namespace compiler {
namespace ffi {
RawFunction* NativeCallbackFunction(const Function& c_signature,
const Function& dart_target,
const Instance& exceptional_return) {
FunctionPtr NativeCallbackFunction(const Function& c_signature,
const Function& dart_target,
const Instance& exceptional_return) {
Thread* const thread = Thread::Current();
const int32_t callback_id = thread->AllocateFfiCallbackId();
@ -28,7 +28,7 @@ RawFunction* NativeCallbackFunction(const Function& c_signature,
const Library& lib = Library::Handle(zone, Library::FfiLibrary());
const Class& owner_class = Class::Handle(zone, lib.toplevel_class());
const Function& function =
Function::Handle(zone, Function::New(name, RawFunction::kFfiTrampoline,
Function::Handle(zone, Function::New(name, FunctionLayout::kFfiTrampoline,
/*is_static=*/true,
/*is_const=*/false,
/*is_abstract=*/false,

View file

@ -19,9 +19,9 @@ namespace compiler {
namespace ffi {
RawFunction* NativeCallbackFunction(const Function& c_signature,
const Function& dart_target,
const Instance& exceptional_return);
FunctionPtr NativeCallbackFunction(const Function& c_signature,
const Function& dart_target,
const Instance& exceptional_return);
} // namespace ffi

View file

@ -74,7 +74,7 @@ class BaseMarshaller : public NativeCallingConvention {
kFfiVoidCid;
}
RawString* function_name() const { return dart_signature_.name(); }
StringPtr function_name() const { return dart_signature_.name(); }
protected:
BaseMarshaller(Zone* zone, const Function& dart_signature)

View file

@ -279,7 +279,7 @@ intptr_t NativeCallingConvention::num_args() const {
return c_signature_.num_fixed_parameters() - kNativeParamsStartAt;
}
RawAbstractType* NativeCallingConvention::CType(intptr_t arg_index) const {
AbstractTypePtr NativeCallingConvention::CType(intptr_t arg_index) const {
if (arg_index == kResultIndex) {
return c_signature_.result_type();
}

View file

@ -42,7 +42,7 @@ class NativeCallingConvention : public ZoneAllocated {
// The C Type (expressed in a Dart Type) of the argument at `arg_index`.
//
// Excluding the #0 argument which is the function pointer.
RawAbstractType* CType(intptr_t arg_index) const;
AbstractTypePtr CType(intptr_t arg_index) const;
// The location of the argument at `arg_index`.
const NativeLocation& Location(intptr_t arg_index) const {

View file

@ -971,7 +971,7 @@ Fragment BaseFlowGraphBuilder::DebugStepCheck(TokenPosition position) {
return Fragment();
#else
return Fragment(new (Z) DebugStepCheckInstr(
position, RawPcDescriptors::kRuntimeCall, GetNextDeoptId()));
position, PcDescriptorsLayout::kRuntimeCall, GetNextDeoptId()));
#endif
}

Some files were not shown because too many files have changed in this diff Show more