[vm] Make naming more consistent when converting between handles, tagged and untagged pointers.

Currently we have things called XPtr which are not what you get from ptr().

Old world:
handle->raw() returns RawObject* (tagged)
raw_obj->ptr() returns RawObject* (untagged)

After 6fe15f6df9:
handle->raw() returns ObjectPtr
obj_ptr->ptr() returns ObjectLayout*

New world:
handle->ptr() returns ObjectPtr
obj_ptr->untag() returns UntaggedObject*

TEST=ci
Change-Id: I6c7f34014cf20737607caaf84979838300d12df2
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/149367
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Martin Kustermann <kustermann@google.com>
Reviewed-by: Siva Annamalai <asiva@google.com>
Reviewed-by: Vyacheslav Egorov <vegorov@google.com>
This commit is contained in:
Ryan Macnak 2021-01-15 23:32:02 +00:00 committed by commit-bot@chromium.org
parent 06fc3fa21c
commit b1c09ecd8f
259 changed files with 6763 additions and 6699 deletions

View file

@ -85,7 +85,7 @@ DEFINE_NATIVE_ENTRY(ImmutableList_from, 0, 4) {
result.SetAt(i, temp);
}
result.MakeImmutable();
return result.raw();
return result.ptr();
}
} // namespace dart

View file

@ -24,13 +24,13 @@ DEFINE_NATIVE_ENTRY(Bool_fromEnvironment, 0, 3) {
String::Handle(Api::GetEnvironmentValue(thread, name));
if (!env_value.IsNull()) {
if (Symbols::True().Equals(env_value)) {
return Bool::True().raw();
return Bool::True().ptr();
}
if (Symbols::False().Equals(env_value)) {
return Bool::False().raw();
return Bool::False().ptr();
}
}
return default_value.raw();
return default_value.ptr();
}
DEFINE_NATIVE_ENTRY(Bool_hasEnvironment, 0, 2) {
@ -39,9 +39,9 @@ DEFINE_NATIVE_ENTRY(Bool_hasEnvironment, 0, 2) {
const String& env_value =
String::Handle(Api::GetEnvironmentValue(thread, name));
if (!env_value.IsNull()) {
return Bool::True().raw();
return Bool::True().ptr();
}
return Bool::False().raw();
return Bool::False().ptr();
}
} // namespace dart

View file

@ -25,13 +25,13 @@ DEFINE_NATIVE_ENTRY(Developer_debugger, 0, 2) {
GET_NATIVE_ARGUMENT(String, msg, arguments->NativeArgAt(1));
Debugger* debugger = isolate->debugger();
if (debugger == nullptr) {
return when.raw();
return when.ptr();
}
if (when.value()) {
debugger->PauseDeveloper(msg);
}
#endif
return when.raw();
return when.ptr();
}
DEFINE_NATIVE_ENTRY(Developer_inspect, 0, 1) {
@ -39,7 +39,7 @@ DEFINE_NATIVE_ENTRY(Developer_inspect, 0, 1) {
#ifndef PRODUCT
Service::SendInspectEvent(isolate, inspectee);
#endif // !PRODUCT
return inspectee.raw();
return inspectee.ptr();
}
DEFINE_NATIVE_ENTRY(Developer_log, 0, 8) {

View file

@ -134,13 +134,13 @@ DEFINE_NATIVE_ENTRY(Double_greaterThan, 0, 2) {
OS::PrintErr("Double_greaterThan %s > %s\n", left.ToCString(),
right.ToCString());
}
return Bool::Get(result).raw();
return Bool::Get(result).ptr();
}
DEFINE_NATIVE_ENTRY(Double_greaterThanFromInteger, 0, 2) {
const Double& right = Double::CheckedHandle(zone, arguments->NativeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(Integer, left, arguments->NativeArgAt(1));
return Bool::Get(left.AsDoubleValue() > right.value()).raw();
return Bool::Get(left.AsDoubleValue() > right.value()).ptr();
}
DEFINE_NATIVE_ENTRY(Double_equal, 0, 2) {
@ -151,13 +151,13 @@ DEFINE_NATIVE_ENTRY(Double_equal, 0, 2) {
OS::PrintErr("Double_equal %s == %s\n", left.ToCString(),
right.ToCString());
}
return Bool::Get(result).raw();
return Bool::Get(result).ptr();
}
DEFINE_NATIVE_ENTRY(Double_equalToInteger, 0, 2) {
const Double& left = Double::CheckedHandle(zone, arguments->NativeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(Integer, right, arguments->NativeArgAt(1));
return Bool::Get(left.value() == right.AsDoubleValue()).raw();
return Bool::Get(left.value() == right.AsDoubleValue()).ptr();
}
DEFINE_NATIVE_ENTRY(Double_round, 0, 1) {
@ -264,19 +264,19 @@ DEFINE_NATIVE_ENTRY(Double_toStringAsPrecision, 0, 2) {
DEFINE_NATIVE_ENTRY(Double_getIsInfinite, 0, 1) {
const Double& arg = Double::CheckedHandle(zone, arguments->NativeArgAt(0));
return Bool::Get(isinf(arg.value())).raw();
return Bool::Get(isinf(arg.value())).ptr();
}
DEFINE_NATIVE_ENTRY(Double_getIsNaN, 0, 1) {
const Double& arg = Double::CheckedHandle(zone, arguments->NativeArgAt(0));
return Bool::Get(isnan(arg.value())).raw();
return Bool::Get(isnan(arg.value())).ptr();
}
DEFINE_NATIVE_ENTRY(Double_getIsNegative, 0, 1) {
const Double& arg = Double::CheckedHandle(zone, arguments->NativeArgAt(0));
// Include negative zero, infinity.
double dval = arg.value();
return Bool::Get(signbit(dval) && !isnan(dval)).raw();
return Bool::Get(signbit(dval) && !isnan(dval)).ptr();
}
DEFINE_NATIVE_ENTRY(Double_flipSignBit, 0, 1) {

View file

@ -40,7 +40,7 @@ static ScriptPtr FindScript(DartFrameIterator* iterator) {
return func.script();
}
ASSERT(!hit_assertion_error);
hit_assertion_error = (func.Owner() == assert_error_class.raw());
hit_assertion_error = (func.Owner() == assert_error_class.ptr());
inlined_iterator.Advance();
}
continue;
@ -52,7 +52,7 @@ static ScriptPtr FindScript(DartFrameIterator* iterator) {
return func.script();
}
ASSERT(!hit_assertion_error);
hit_assertion_error = (func.Owner() == assert_error_class.raw());
hit_assertion_error = (func.Owner() == assert_error_class.ptr());
}
UNREACHABLE();
return Script::null();
@ -93,7 +93,7 @@ DEFINE_NATIVE_ENTRY(AssertionError_throwNew, 0, 3) {
script.GetSnippet(from_line, from_column, to_line, to_column);
}
if (condition_text.IsNull()) {
condition_text = Symbols::OptimizedOut().raw();
condition_text = Symbols::OptimizedOut().ptr();
}
args.SetAt(0, condition_text);

View file

@ -110,7 +110,7 @@ static ObjectPtr LoadValueStruct(Zone* zone,
const Object& constructorResult =
Object::Handle(DartEntry::InvokeFunction(constructor, args));
ASSERT(!constructorResult.IsError());
return new_object.raw();
return new_object.ptr();
}
DEFINE_NATIVE_ENTRY(Ffi_loadStruct, 0, 2) {
@ -269,7 +269,7 @@ DEFINE_NATIVE_ENTRY(Ffi_nativeCallbackFunction, 1, 2) {
native_signature, func, exceptional_return)));
// Because we have already set the return value.
return Object::sentinel().raw();
return Object::sentinel().ptr();
#endif
}
@ -292,7 +292,7 @@ DEFINE_NATIVE_ENTRY(Ffi_pointerFromFunction, 1, 1) {
Exceptions::PropagateError(Error::Cast(result));
}
ASSERT(result.IsCode());
code ^= result.raw();
code ^= result.ptr();
#endif
ASSERT(!code.IsNull());

View file

@ -27,7 +27,7 @@ DEFINE_NATIVE_ENTRY(Function_apply, 0, 2) {
if (result.IsError()) {
Exceptions::PropagateError(Error::Cast(result));
}
return result.raw();
return result.ptr();
}
DEFINE_NATIVE_ENTRY(Closure_equals, 0, 2) {
@ -39,8 +39,8 @@ DEFINE_NATIVE_ENTRY(Closure_equals, 0, 2) {
// name and owner (multiple function objects could exist for the same
// function due to hot reload).
// Objects of other closure kinds are unique, so use identity comparison.
if (receiver.raw() == other.raw()) {
return Bool::True().raw();
if (receiver.ptr() == other.ptr()) {
return Bool::True().ptr();
}
if (other.IsClosure()) {
const Function& func_a = Function::Handle(zone, receiver.function());
@ -54,15 +54,15 @@ DEFINE_NATIVE_ENTRY(Closure_equals, 0, 2) {
ObjectPtr receiver_a = context_a.At(0);
ObjectPtr receiver_b = context_b.At(0);
if ((receiver_a == receiver_b) &&
((func_a.raw() == func_b.raw()) ||
((func_a.ptr() == func_b.ptr()) ||
((func_a.name() == func_b.name()) &&
(func_a.Owner() == func_b.Owner())))) {
return Bool::True().raw();
return Bool::True().ptr();
}
}
}
}
return Bool::False().raw();
return Bool::False().ptr();
}
DEFINE_NATIVE_ENTRY(Closure_computeHash, 0, 1) {

View file

@ -24,7 +24,7 @@ DEFINE_NATIVE_ENTRY(GrowableList_allocate, 0, 2) {
const GrowableObjectArray& new_array =
GrowableObjectArray::Handle(GrowableObjectArray::New(data));
new_array.SetTypeArguments(type_arguments);
return new_array.raw();
return new_array.ptr();
}
DEFINE_NATIVE_ENTRY(GrowableList_getIndexed, 0, 2) {
@ -35,7 +35,7 @@ DEFINE_NATIVE_ENTRY(GrowableList_getIndexed, 0, 2) {
Exceptions::ThrowRangeError("index", index, 0, array.Length() - 1);
}
const Instance& obj = Instance::CheckedHandle(zone, array.At(index.Value()));
return obj.raw();
return obj.ptr();
}
DEFINE_NATIVE_ENTRY(GrowableList_setIndexed, 0, 3) {
@ -89,7 +89,7 @@ DEFINE_NATIVE_ENTRY(Internal_makeListFixedLength, 0, 1) {
DEFINE_NATIVE_ENTRY(Internal_makeFixedListUnmodifiable, 0, 1) {
GET_NON_NULL_NATIVE_ARGUMENT(Array, array, arguments->NativeArgAt(0));
array.MakeImmutable();
return array.raw();
return array.ptr();
}
} // namespace dart

View file

@ -12,7 +12,7 @@ DEFINE_NATIVE_ENTRY(Identical_comparison, 0, 2) {
GET_NATIVE_ARGUMENT(Instance, a, arguments->NativeArgAt(0));
GET_NATIVE_ARGUMENT(Instance, b, arguments->NativeArgAt(1));
const bool is_identical = a.IsIdenticalTo(b);
return Bool::Get(is_identical).raw();
return Bool::Get(is_identical).ptr();
}
} // namespace dart

View file

@ -143,7 +143,7 @@ DEFINE_NATIVE_ENTRY(Integer_greaterThanFromInteger, 0, 2) {
OS::PrintErr("Integer_greaterThanFromInteger %s > %s\n", left.ToCString(),
right.ToCString());
}
return Bool::Get(left.CompareWith(right) == 1).raw();
return Bool::Get(left.CompareWith(right) == 1).ptr();
}
DEFINE_NATIVE_ENTRY(Integer_equalToInteger, 0, 2) {
@ -155,7 +155,7 @@ DEFINE_NATIVE_ENTRY(Integer_equalToInteger, 0, 2) {
OS::PrintErr("Integer_equalToInteger %s == %s\n", left.ToCString(),
right.ToCString());
}
return Bool::Get(left.CompareWith(right) == 0).raw();
return Bool::Get(left.CompareWith(right) == 0).ptr();
}
static IntegerPtr ParseInteger(const String& value) {
@ -194,12 +194,12 @@ DEFINE_NATIVE_ENTRY(Integer_fromEnvironment, 0, 3) {
const Integer& result = Integer::Handle(ParseInteger(env_value));
if (!result.IsNull()) {
if (result.IsSmi()) {
return result.raw();
return result.ptr();
}
return result.Canonicalize(thread);
}
}
return default_value.raw();
return default_value.ptr();
}
static IntegerPtr ShiftOperationHelper(Token::Kind kind,

View file

@ -39,7 +39,7 @@ DEFINE_NATIVE_ENTRY(CapabilityImpl_factory, 0, 1) {
DEFINE_NATIVE_ENTRY(CapabilityImpl_equals, 0, 2) {
GET_NON_NULL_NATIVE_ARGUMENT(Capability, recv, arguments->NativeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(Capability, other, arguments->NativeArgAt(1));
return (recv.Id() == other.Id()) ? Bool::True().raw() : Bool::False().raw();
return (recv.Id() == other.Id()) ? Bool::True().ptr() : Bool::False().ptr();
}
DEFINE_NATIVE_ENTRY(CapabilityImpl_get_hashcode, 0, 1) {
@ -107,9 +107,9 @@ DEFINE_NATIVE_ENTRY(SendPortImpl_sendInternal_, 0, 2) {
const Dart_Port destination_port_id = port.Id();
const bool can_send_any_object = isolate->origin_id() == port.origin_id();
if (ApiObjectConverter::CanConvert(obj.raw())) {
if (ApiObjectConverter::CanConvert(obj.ptr())) {
PortMap::PostMessage(
Message::New(destination_port_id, obj.raw(), Message::kNormalPriority));
Message::New(destination_port_id, obj.ptr(), Message::kNormalPriority));
} else {
MessageWriter writer(can_send_any_object);
// TODO(turnidge): Throw an exception when the return value is false?
@ -119,7 +119,7 @@ DEFINE_NATIVE_ENTRY(SendPortImpl_sendInternal_, 0, 2) {
return Object::null();
}
class ObjectPtrSetTraitsLayout {
class UntaggedObjectPtrSetTraits {
public:
static bool ReportStats() { return false; }
static const char* Name() { return "RawObjectPtrSetTraits"; }
@ -146,7 +146,7 @@ static ObjectPtr ValidateMessageObject(Zone* zone,
private:
void VisitPointers(ObjectPtr* from, ObjectPtr* to) {
for (ObjectPtr* raw = from; raw <= to; raw++) {
if (!(*raw)->IsHeapObject() || (*raw)->ptr()->IsCanonical()) {
if (!(*raw)->IsHeapObject() || (*raw)->untag()->IsCanonical()) {
continue;
}
if (visited_->GetValueExclusive(*raw) == 1) {
@ -160,8 +160,8 @@ static ObjectPtr ValidateMessageObject(Zone* zone,
WeakTable* visited_;
MallocGrowableArray<ObjectPtr>* const working_set_;
};
if (!obj.raw()->IsHeapObject() || obj.raw()->ptr()->IsCanonical()) {
return obj.raw();
if (!obj.ptr()->IsHeapObject() || obj.ptr()->untag()->IsCanonical()) {
return obj.ptr();
}
ClassTable* class_table = isolate->group()->class_table();
@ -184,8 +184,8 @@ static ObjectPtr ValidateMessageObject(Zone* zone,
SendMessageValidator visitor(isolate->group(), visited.get(), &working_set);
visited->SetValueExclusive(obj.raw(), 1);
working_set.Add(obj.raw());
visited->SetValueExclusive(obj.ptr(), 1);
working_set.Add(obj.ptr());
while (!working_set.is_empty() && !error_found) {
ObjectPtr raw = working_set.RemoveLast();
@ -225,13 +225,13 @@ static ObjectPtr ValidateMessageObject(Zone* zone,
if (cid >= kNumPredefinedCids) {
klass = class_table->At(cid);
if (klass.num_native_fields() != 0) {
erroneous_nativewrapper_class = klass.raw();
erroneous_nativewrapper_class = klass.ptr();
error_found = true;
break;
}
}
}
raw->ptr()->VisitPointers(&visitor);
raw->untag()->VisitPointers(&visitor);
}
}
if (error_found) {
@ -255,7 +255,7 @@ static ObjectPtr ValidateMessageObject(Zone* zone,
zone, Exceptions::kArgumentValue, exception_message);
}
isolate->set_forward_table_new(nullptr);
return obj.raw();
return obj.ptr();
}
DEFINE_NATIVE_ENTRY(SendPortImpl_sendAndExitInternal_, 0, 2) {
@ -271,7 +271,7 @@ DEFINE_NATIVE_ENTRY(SendPortImpl_sendAndExitInternal_, 0, 2) {
GET_NON_NULL_NATIVE_ARGUMENT(Instance, obj, arguments->NativeArgAt(1));
Object& validated_result = Object::Handle(zone);
Object& msg_obj = Object::Handle(zone, obj.raw());
Object& msg_obj = Object::Handle(zone, obj.ptr());
validated_result = ValidateMessageObject(zone, isolate, msg_obj);
if (validated_result.IsUnhandledException()) {
Exceptions::PropagateError(Error::Cast(validated_result));
@ -279,7 +279,7 @@ DEFINE_NATIVE_ENTRY(SendPortImpl_sendAndExitInternal_, 0, 2) {
}
PersistentHandle* handle =
isolate->group()->api_state()->AllocatePersistentHandle();
handle->set_raw(msg_obj);
handle->set_ptr(msg_obj);
isolate->bequeath(std::unique_ptr<Bequest>(new Bequest(handle, port.Id())));
// TODO(aam): Ensure there are no dart api calls after this point as we want
// to ensure that validated message won't get tampered with.
@ -472,7 +472,7 @@ ObjectPtr IsolateSpawnState::ResolveFunction() {
// Check whether main is reexported from the root library.
const Object& obj = Object::Handle(zone, lib.LookupReExport(func_name));
if (obj.IsFunction()) {
func ^= obj.raw();
func ^= obj.ptr();
}
}
if (func.IsNull()) {
@ -482,7 +482,7 @@ ObjectPtr IsolateSpawnState::ResolveFunction() {
function_name(), script_url()));
return LanguageError::New(msg);
}
return func.raw();
return func.ptr();
}
// Lookup the to be spawned function for the Isolate.spawn implementation.
@ -508,7 +508,7 @@ ObjectPtr IsolateSpawnState::ResolveFunction() {
function_name(), library_url()));
return LanguageError::New(msg);
}
return func.raw();
return func.ptr();
}
const String& cls_name = String::Handle(zone, String::New(class_name()));
@ -533,7 +533,7 @@ ObjectPtr IsolateSpawnState::ResolveFunction() {
(library_url() != nullptr ? library_url() : script_url())));
return LanguageError::New(msg);
}
return func.raw();
return func.ptr();
}
static InstancePtr DeserializeMessage(Thread* thread, Message* message) {
@ -547,7 +547,7 @@ static InstancePtr DeserializeMessage(Thread* thread, Message* message) {
MessageSnapshotReader reader(message, thread);
const Object& obj = Object::Handle(zone, reader.ReadObject());
ASSERT(!obj.IsError());
return Instance::RawCast(obj.raw());
return Instance::RawCast(obj.ptr());
}
}
@ -720,7 +720,7 @@ class SpawnIsolateTask : public ThreadPool::Task {
return false;
}
ASSERT(result.IsFunction());
auto& func = Function::Handle(zone, Function::Cast(result).raw());
auto& func = Function::Handle(zone, Function::Cast(result).ptr());
func = func.ImplicitClosureFunction();
const auto& entrypoint_closure =
Object::Handle(zone, func.ImplicitStaticClosure());
@ -882,7 +882,7 @@ static const char* CanonicalizeUri(Thread* thread,
result = String2UTF8(String::Cast(obj));
} else if (obj.IsError()) {
Error& error_obj = Error::Handle();
error_obj ^= obj.raw();
error_obj ^= obj.ptr();
*error = zone->PrintToString("Unable to canonicalize uri '%s': %s",
uri.ToCString(), error_obj.ToErrorCString());
} else {
@ -983,7 +983,7 @@ DEFINE_NATIVE_ENTRY(Isolate_getPortAndCapabilitiesOfCurrentIsolate, 0, 0) {
1, Capability::Handle(Capability::New(isolate->pause_capability())));
result.SetAt(
2, Capability::Handle(Capability::New(isolate->terminate_capability())));
return result.raw();
return result.ptr();
}
DEFINE_NATIVE_ENTRY(Isolate_getCurrentRootUriStr, 0, 0) {
@ -1045,7 +1045,7 @@ DEFINE_NATIVE_ENTRY(TransferableTypedData_factory, 0, 2) {
array ^= growable_array.data();
array_length = growable_array.Length();
} else if (array_instance.IsArray()) {
array ^= Array::Cast(array_instance).raw();
array ^= Array::Cast(array_instance).ptr();
array_length = array.Length();
} else {
Exceptions::ThrowArgumentError(array_instance);
@ -1102,7 +1102,7 @@ DEFINE_NATIVE_ENTRY(TransferableTypedData_materialize, 0, 1) {
void* peer;
{
NoSafepointScope no_safepoint;
peer = thread->heap()->GetPeer(t.raw());
peer = thread->heap()->GetPeer(t.ptr());
// Assume that object's Peer is only used to track transferrability state.
ASSERT(peer != nullptr);
}
@ -1126,7 +1126,7 @@ DEFINE_NATIVE_ENTRY(TransferableTypedData_materialize, 0, 1) {
/* peer= */ data,
&ExternalTypedDataFinalizer, length,
/*auto_delete=*/true);
return typed_data.raw();
return typed_data.ptr();
}
} // namespace dart

View file

@ -86,7 +86,7 @@ static TypedDataPtr GetRandomStateArray(const Instance& receiver) {
const TypedData& array = TypedData::Cast(state_field_value);
ASSERT(array.Length() == 2);
ASSERT(array.ElementType() == kUint32ArrayElement);
return array.raw();
return array.ptr();
}
// Implements:
@ -113,7 +113,7 @@ TypedDataPtr CreateRandomState(Zone* zone, uint64_t seed) {
result.SetUint32(0, static_cast<uint32_t>(seed));
result.SetUint32(result.ElementSizeInBytes(),
static_cast<uint32_t>(seed >> 32));
return result.raw();
return result.ptr();
}
uint64_t mix64(uint64_t n) {

View file

@ -38,7 +38,7 @@ static InstancePtr CreateMirror(const String& mirror_class_name,
if (result.IsError()) {
Exceptions::PropagateError(Error::Cast(result));
}
return Instance::Cast(result).raw();
return Instance::Cast(result).ptr();
}
// Conventions:
@ -140,7 +140,7 @@ static InstancePtr CreateParameterMirrorList(const Function& func,
Exceptions::PropagateError(Error::Cast(result));
UNREACHABLE();
}
param_descriptor ^= result.raw();
param_descriptor ^= result.ptr();
ASSERT(param_descriptor.Length() ==
(Parser::kParameterEntrySize * non_implicit_param_count));
}
@ -149,7 +149,7 @@ static InstancePtr CreateParameterMirrorList(const Function& func,
args.SetAt(2, owner_mirror);
if (!has_extra_parameter_info) {
is_final = Bool::True().raw();
is_final = Bool::True().ptr();
default_value = Object::null();
metadata = Object::null();
}
@ -184,7 +184,7 @@ static InstancePtr CreateParameterMirrorList(const Function& func,
results.SetAt(i, param);
}
results.MakeImmutable();
return results.raw();
return results.ptr();
}
static InstancePtr CreateTypeVariableMirror(const TypeParameter& param,
@ -201,7 +201,7 @@ static InstancePtr CreateTypeVariableMirror(const TypeParameter& param,
static InstancePtr CreateTypeVariableList(const Class& cls) {
const TypeArguments& args = TypeArguments::Handle(cls.type_parameters());
if (args.IsNull()) {
return Object::empty_array().raw();
return Object::empty_array().ptr();
}
const Array& result = Array::Handle(Array::New(args.Length() * 2));
TypeParameter& type = TypeParameter::Handle();
@ -214,7 +214,7 @@ static InstancePtr CreateTypeVariableList(const Class& cls) {
result.SetAt(2 * i, name);
result.SetAt(2 * i + 1, type);
}
return result.raw();
return result.ptr();
}
static InstancePtr CreateFunctionTypeMirror(const AbstractType& type) {
@ -249,7 +249,7 @@ static InstancePtr CreateMethodMirror(const Function& func,
(static_cast<intptr_t>(func.IsGetterFunction()) << Mirrors::kGetter);
kind_flags |=
(static_cast<intptr_t>(func.IsSetterFunction()) << Mirrors::kSetter);
bool is_ctor = (func.kind() == FunctionLayout::kConstructor);
bool is_ctor = (func.kind() == UntaggedFunction::kConstructor);
kind_flags |= (static_cast<intptr_t>(is_ctor) << Mirrors::kConstructor);
kind_flags |= (static_cast<intptr_t>(is_ctor && func.is_const())
<< Mirrors::kConstCtor);
@ -491,7 +491,7 @@ DEFINE_NATIVE_ENTRY(LibraryMirror_libraryDependencies, 0, 2) {
while (entries.HasNext()) {
entry = entries.GetNext();
if (entry.IsLibraryPrefix()) {
prefix ^= entry.raw();
prefix ^= entry.ptr();
ports = prefix.imports();
for (intptr_t i = 0; i < ports.Length(); i++) {
ns ^= ports.At(i);
@ -506,7 +506,7 @@ DEFINE_NATIVE_ENTRY(LibraryMirror_libraryDependencies, 0, 2) {
}
}
return deps.raw();
return deps.ptr();
}
static InstancePtr CreateTypeMirror(const AbstractType& type) {
@ -644,7 +644,7 @@ DEFINE_NATIVE_ENTRY(MirrorSystem_libraries, 0, 0) {
library_mirrors.Add(library_mirror);
}
}
return library_mirrors.raw();
return library_mirrors.ptr();
}
DEFINE_NATIVE_ENTRY(MirrorSystem_isolate, 0, 0) {
@ -671,7 +671,7 @@ DEFINE_NATIVE_ENTRY(IsolateMirror_loadUri, 0, 1) {
// Canonicalize library URI.
String& canonical_uri = String::Handle(zone);
if (uri.StartsWith(Symbols::DartScheme())) {
canonical_uri = uri.raw();
canonical_uri = uri.ptr();
} else {
isolate->BlockClassFinalization();
const Object& result = Object::Handle(
@ -690,7 +690,7 @@ DEFINE_NATIVE_ENTRY(IsolateMirror_loadUri, 0, 1) {
ThrowLanguageError("library handler failed URI canonicalization");
}
canonical_uri ^= result.raw();
canonical_uri ^= result.ptr();
}
// Return the existing library if it has already been loaded.
@ -810,20 +810,20 @@ DEFINE_NATIVE_ENTRY(Mirrors_instantiateGenericType, 0, 2) {
Type& instantiated_type = Type::Handle(Type::New(clz, type_args_obj));
instantiated_type ^= ClassFinalizer::FinalizeType(instantiated_type);
return instantiated_type.raw();
return instantiated_type.ptr();
}
DEFINE_NATIVE_ENTRY(Mirrors_mangleName, 0, 2) {
GET_NON_NULL_NATIVE_ARGUMENT(String, name, arguments->NativeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(MirrorReference, ref, arguments->NativeArgAt(1));
const Library& lib = Library::Handle(ref.GetLibraryReferent());
return lib.IsPrivate(name) ? lib.PrivateName(name) : name.raw();
return lib.IsPrivate(name) ? lib.PrivateName(name) : name.ptr();
}
DEFINE_NATIVE_ENTRY(MirrorReference_equals, 0, 2) {
GET_NON_NULL_NATIVE_ARGUMENT(MirrorReference, a, arguments->NativeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(MirrorReference, b, arguments->NativeArgAt(1));
return Bool::Get(a.referent() == b.referent()).raw();
return Bool::Get(a.referent() == b.referent()).ptr();
}
DEFINE_NATIVE_ENTRY(DeclarationMirror_metadata, 0, 1) {
@ -833,7 +833,7 @@ DEFINE_NATIVE_ENTRY(DeclarationMirror_metadata, 0, 1) {
const MirrorReference& decl_ref = MirrorReference::Cast(reflectee);
decl = decl_ref.referent();
} else if (reflectee.IsTypeParameter()) {
decl = reflectee.raw();
decl = reflectee.ptr();
} else {
UNREACHABLE();
}
@ -842,7 +842,7 @@ DEFINE_NATIVE_ENTRY(DeclarationMirror_metadata, 0, 1) {
Library& library = Library::Handle();
if (decl.IsClass()) {
klass ^= decl.raw();
klass ^= decl.ptr();
library = klass.library();
} else if (decl.IsFunction()) {
klass = Function::Cast(decl).origin();
@ -851,19 +851,19 @@ DEFINE_NATIVE_ENTRY(DeclarationMirror_metadata, 0, 1) {
klass = Field::Cast(decl).Origin();
library = klass.library();
} else if (decl.IsLibrary()) {
library ^= decl.raw();
library ^= decl.ptr();
} else if (decl.IsTypeParameter()) {
// There is no reference from a canonical type parameter to its declaration.
return Object::empty_array().raw();
return Object::empty_array().ptr();
} else {
return Object::empty_array().raw();
return Object::empty_array().ptr();
}
const Object& metadata = Object::Handle(library.GetMetadata(decl));
if (metadata.IsError()) {
Exceptions::PropagateError(Error::Cast(metadata));
}
return metadata.raw();
return metadata.ptr();
}
DEFINE_NATIVE_ENTRY(FunctionTypeMirror_call_method, 0, 2) {
@ -913,7 +913,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_supertype, 0, 1) {
: type.type_class());
const AbstractType& super_type = AbstractType::Handle(cls.super_type());
ASSERT(super_type.IsNull() || super_type.IsFinalized());
return super_type.raw();
return super_type.ptr();
}
DEFINE_NATIVE_ENTRY(ClassMirror_supertype_instantiated, 0, 1) {
@ -964,7 +964,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_interfaces_instantiated, 0, 1) {
interfaces_inst.SetAt(i, interface);
}
return interfaces_inst.raw();
return interfaces_inst.ptr();
}
DEFINE_NATIVE_ENTRY(ClassMirror_mixin, 0, 1) {
@ -980,7 +980,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_mixin, 0, 1) {
mixin_type ^= interfaces.At(interfaces.Length() - 1);
}
ASSERT(mixin_type.IsNull() || mixin_type.IsFinalized());
return mixin_type.raw();
return mixin_type.ptr();
}
DEFINE_NATIVE_ENTRY(ClassMirror_mixin_instantiated, 0, 2) {
@ -998,7 +998,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_mixin_instantiated, 0, 2) {
mixin_type ^= interfaces.At(interfaces.Length() - 1);
}
if (mixin_type.IsNull()) {
return mixin_type.raw();
return mixin_type.ptr();
}
return InstantiateType(mixin_type, instantiator);
@ -1040,16 +1040,16 @@ DEFINE_NATIVE_ENTRY(ClassMirror_members, 0, 3) {
for (intptr_t i = 0; i < num_functions; i++) {
func ^= functions.At(i);
if (func.is_reflectable() &&
(func.kind() == FunctionLayout::kRegularFunction ||
func.kind() == FunctionLayout::kGetterFunction ||
func.kind() == FunctionLayout::kSetterFunction)) {
(func.kind() == UntaggedFunction::kRegularFunction ||
func.kind() == UntaggedFunction::kGetterFunction ||
func.kind() == UntaggedFunction::kSetterFunction)) {
member_mirror =
CreateMethodMirror(func, owner_mirror, owner_instantiator);
member_mirrors.Add(member_mirror);
}
}
return member_mirrors.raw();
return member_mirrors.ptr();
}
DEFINE_NATIVE_ENTRY(ClassMirror_constructors, 0, 3) {
@ -1075,14 +1075,15 @@ DEFINE_NATIVE_ENTRY(ClassMirror_constructors, 0, 3) {
Function& func = Function::Handle();
for (intptr_t i = 0; i < num_functions; i++) {
func ^= functions.At(i);
if (func.is_reflectable() && func.kind() == FunctionLayout::kConstructor) {
if (func.is_reflectable() &&
func.kind() == UntaggedFunction::kConstructor) {
constructor_mirror =
CreateMethodMirror(func, owner_mirror, owner_instantiator);
constructor_mirrors.Add(constructor_mirror);
}
}
return constructor_mirrors.raw();
return constructor_mirrors.ptr();
}
DEFINE_NATIVE_ENTRY(LibraryMirror_members, 0, 2) {
@ -1128,9 +1129,9 @@ DEFINE_NATIVE_ENTRY(LibraryMirror_members, 0, 2) {
} else if (entry.IsFunction()) {
const Function& func = Function::Cast(entry);
if (func.is_reflectable() &&
(func.kind() == FunctionLayout::kRegularFunction ||
func.kind() == FunctionLayout::kGetterFunction ||
func.kind() == FunctionLayout::kSetterFunction)) {
(func.kind() == UntaggedFunction::kRegularFunction ||
func.kind() == UntaggedFunction::kGetterFunction ||
func.kind() == UntaggedFunction::kSetterFunction)) {
member_mirror =
CreateMethodMirror(func, owner_mirror, AbstractType::Handle());
member_mirrors.Add(member_mirror);
@ -1138,7 +1139,7 @@ DEFINE_NATIVE_ENTRY(LibraryMirror_members, 0, 2) {
}
}
return member_mirrors.raw();
return member_mirrors.ptr();
}
DEFINE_NATIVE_ENTRY(ClassMirror_type_variables, 0, 1) {
@ -1162,7 +1163,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_type_arguments, 0, 1) {
const intptr_t num_params = cls.NumTypeParameters();
if (num_params == 0) {
return Object::empty_array().raw();
return Object::empty_array().ptr();
}
const Array& result = Array::Handle(Array::New(num_params));
@ -1174,12 +1175,12 @@ DEFINE_NATIVE_ENTRY(ClassMirror_type_arguments, 0, 1) {
// arguments have been provided, or all arguments are dynamic. Return a list
// of typemirrors on dynamic in this case.
if (args.IsNull()) {
arg_type = Object::dynamic_type().raw();
arg_type = Object::dynamic_type().ptr();
type_mirror = CreateTypeMirror(arg_type);
for (intptr_t i = 0; i < num_params; i++) {
result.SetAt(i, type_mirror);
}
return result.raw();
return result.ptr();
}
ASSERT(args.Length() >= num_params);
@ -1189,7 +1190,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_type_arguments, 0, 1) {
type_mirror = CreateTypeMirror(arg_type);
result.SetAt(i, type_mirror);
}
return result.raw();
return result.ptr();
}
DEFINE_NATIVE_ENTRY(TypeVariableMirror_owner, 0, 1) {
@ -1261,7 +1262,7 @@ DEFINE_NATIVE_ENTRY(ClosureMirror_function, 0, 1) {
// In the case of extension methods also we avoid handing out a reference
// to the tear-off and instead get the parent function of the
// anonymous closure.
function = parent.raw();
function = parent.ptr();
}
Type& instantiator = Type::Handle();
@ -1343,20 +1344,20 @@ DEFINE_NATIVE_ENTRY(ClassMirror_invokeConstructor, 0, 5) {
// unnamed constructor for class 'A' is labeled 'A.'.
// This convention prevents users from explicitly calling constructors.
const String& klass_name = String::Handle(klass.Name());
String& external_constructor_name = String::Handle(klass_name.raw());
String& external_constructor_name = String::Handle(klass_name.ptr());
String& internal_constructor_name =
String::Handle(String::Concat(klass_name, Symbols::Dot()));
if (!constructor_name.IsNull() && constructor_name.Length() > 0) {
internal_constructor_name =
String::Concat(internal_constructor_name, constructor_name);
external_constructor_name = internal_constructor_name.raw();
external_constructor_name = internal_constructor_name.ptr();
}
Function& lookup_constructor = Function::Handle(
Resolver::ResolveFunction(zone, klass, internal_constructor_name));
if (lookup_constructor.IsNull() ||
(lookup_constructor.kind() != FunctionLayout::kConstructor) ||
(lookup_constructor.kind() != UntaggedFunction::kConstructor) ||
!lookup_constructor.is_reflectable()) {
ThrowNoSuchMethod(AbstractType::Handle(klass.RareType()),
external_constructor_name, explicit_args, arg_names,
@ -1384,7 +1385,7 @@ DEFINE_NATIVE_ENTRY(ClassMirror_invokeConstructor, 0, 5) {
type_arguments = rare_type.arguments();
}
Class& redirected_klass = Class::Handle(klass.raw());
Class& redirected_klass = Class::Handle(klass.ptr());
const intptr_t num_explicit_args = explicit_args.Length();
const intptr_t num_implicit_args = 1;
const Array& args =
@ -1454,9 +1455,9 @@ DEFINE_NATIVE_ENTRY(ClassMirror_invokeConstructor, 0, 5) {
ASSERT(result.IsInstance() || result.IsNull());
if (lookup_constructor.IsGenerativeConstructor()) {
return new_object.raw();
return new_object.ptr();
} else {
return result.raw();
return result.ptr();
}
}
@ -1554,7 +1555,7 @@ DEFINE_NATIVE_ENTRY(DeclarationMirror_location, 0, 1) {
const MirrorReference& decl_ref = MirrorReference::Cast(reflectee);
decl = decl_ref.referent();
} else if (reflectee.IsTypeParameter()) {
decl = reflectee.raw();
decl = reflectee.ptr();
} else {
UNREACHABLE();
}
@ -1585,7 +1586,7 @@ DEFINE_NATIVE_ENTRY(DeclarationMirror_location, 0, 1) {
return Instance::null();
} else if (decl.IsLibrary()) {
const Library& lib = Library::Cast(decl);
if (lib.raw() == Library::NativeWrappersLibrary()) {
if (lib.ptr() == Library::NativeWrappersLibrary()) {
return Instance::null(); // No source.
}
const Array& scripts = Array::Handle(zone, lib.LoadedScripts());
@ -1633,7 +1634,7 @@ DEFINE_NATIVE_ENTRY(VariableMirror_type, 0, 2) {
DEFINE_NATIVE_ENTRY(TypeMirror_subtypeTest, 0, 2) {
GET_NON_NULL_NATIVE_ARGUMENT(AbstractType, a, arguments->NativeArgAt(0));
GET_NON_NULL_NATIVE_ARGUMENT(AbstractType, b, arguments->NativeArgAt(1));
return Bool::Get(a.IsSubtypeOf(b, Heap::kNew)).raw();
return Bool::Get(a.IsSubtypeOf(b, Heap::kNew)).ptr();
}
#endif // !DART_PRECOMPILED_RUNTIME

View file

@ -54,7 +54,7 @@ DEFINE_NATIVE_ENTRY(Object_setHash, 0, 2) {
const Instance& instance =
Instance::CheckedHandle(zone, arguments->NativeArgAt(0));
Heap* heap = isolate->group()->heap();
heap->SetHash(instance.raw(), hash.Value());
heap->SetHash(instance.ptr(), hash.Value());
#endif
return Object::null();
}
@ -63,7 +63,7 @@ DEFINE_NATIVE_ENTRY(Object_toString, 0, 1) {
const Instance& instance =
Instance::CheckedHandle(zone, arguments->NativeArgAt(0));
if (instance.IsString()) {
return instance.raw();
return instance.ptr();
}
if (instance.IsAbstractType()) {
return AbstractType::Cast(instance).UserVisibleName();
@ -98,13 +98,13 @@ DEFINE_NATIVE_ENTRY(Object_haveSameRuntimeType, 0, 2) {
if (left_cid != right_cid) {
if (IsIntegerClassId(left_cid)) {
return Bool::Get(IsIntegerClassId(right_cid)).raw();
return Bool::Get(IsIntegerClassId(right_cid)).ptr();
} else if (IsStringClassId(left_cid)) {
return Bool::Get(IsStringClassId(right_cid)).raw();
return Bool::Get(IsStringClassId(right_cid)).ptr();
} else if (IsTypeClassId(left_cid)) {
return Bool::Get(IsTypeClassId(right_cid)).raw();
return Bool::Get(IsTypeClassId(right_cid)).ptr();
} else {
return Bool::False().raw();
return Bool::False().ptr();
}
}
@ -117,15 +117,15 @@ DEFINE_NATIVE_ENTRY(Object_haveSameRuntimeType, 0, 2) {
AbstractType::Handle(right.GetType(Heap::kNew));
return Bool::Get(
left_type.IsEquivalent(right_type, TypeEquality::kSyntactical))
.raw();
.ptr();
}
if (!cls.IsGeneric()) {
return Bool::True().raw();
return Bool::True().ptr();
}
if (left.GetTypeArguments() == right.GetTypeArguments()) {
return Bool::True().raw();
return Bool::True().ptr();
}
const TypeArguments& left_type_arguments =
TypeArguments::Handle(left.GetTypeArguments());
@ -136,7 +136,7 @@ DEFINE_NATIVE_ENTRY(Object_haveSameRuntimeType, 0, 2) {
return Bool::Get(left_type_arguments.IsSubvectorEquivalent(
right_type_arguments, num_type_args - num_type_params,
num_type_params, TypeEquality::kSyntactical))
.raw();
.ptr();
}
DEFINE_NATIVE_ENTRY(Object_instanceOf, 0, 4) {
@ -161,7 +161,7 @@ DEFINE_NATIVE_ENTRY(Object_instanceOf, 0, 4) {
OS::PrintErr(" test type: %s\n",
String::Handle(zone, type.Name()).ToCString());
}
return Bool::Get(is_instance_of).raw();
return Bool::Get(is_instance_of).ptr();
}
DEFINE_NATIVE_ENTRY(Object_simpleInstanceOf, 0, 2) {
@ -175,7 +175,7 @@ DEFINE_NATIVE_ENTRY(Object_simpleInstanceOf, 0, 2) {
ASSERT(type.IsInstantiated());
const bool is_instance_of = instance.IsInstanceOf(
type, Object::null_type_arguments(), Object::null_type_arguments());
return Bool::Get(is_instance_of).raw();
return Bool::Get(is_instance_of).ptr();
}
DEFINE_NATIVE_ENTRY(AbstractType_toString, 0, 1) {
@ -196,10 +196,10 @@ DEFINE_NATIVE_ENTRY(Type_equality, 0, 2) {
const Type& type = Type::CheckedHandle(zone, arguments->NativeArgAt(0));
const Instance& other =
Instance::CheckedHandle(zone, arguments->NativeArgAt(1));
if (type.raw() == other.raw()) {
return Bool::True().raw();
if (type.ptr() == other.ptr()) {
return Bool::True().ptr();
}
return Bool::Get(type.IsEquivalent(other, TypeEquality::kSyntactical)).raw();
return Bool::Get(type.IsEquivalent(other, TypeEquality::kSyntactical)).ptr();
}
DEFINE_NATIVE_ENTRY(FunctionType_getHashCode, 0, 1) {
@ -216,16 +216,16 @@ DEFINE_NATIVE_ENTRY(FunctionType_equality, 0, 2) {
FunctionType::CheckedHandle(zone, arguments->NativeArgAt(0));
const Instance& other =
Instance::CheckedHandle(zone, arguments->NativeArgAt(1));
if (type.raw() == other.raw()) {
return Bool::True().raw();
if (type.ptr() == other.ptr()) {
return Bool::True().ptr();
}
return Bool::Get(type.IsEquivalent(other, TypeEquality::kSyntactical)).raw();
return Bool::Get(type.IsEquivalent(other, TypeEquality::kSyntactical)).ptr();
}
DEFINE_NATIVE_ENTRY(LibraryPrefix_isLoaded, 0, 1) {
const LibraryPrefix& prefix =
LibraryPrefix::CheckedHandle(zone, arguments->NativeArgAt(0));
return Bool::Get(prefix.is_loaded()).raw();
return Bool::Get(prefix.is_loaded()).ptr();
}
DEFINE_NATIVE_ENTRY(LibraryPrefix_setLoaded, 0, 1) {
@ -268,9 +268,9 @@ DEFINE_NATIVE_ENTRY(LibraryPrefix_issueLoad, 0, 1) {
DEFINE_NATIVE_ENTRY(Internal_inquireIs64Bit, 0, 0) {
#if defined(ARCH_IS_64_BIT)
return Bool::True().raw();
return Bool::True().ptr();
#else
return Bool::False().raw();
return Bool::False().ptr();
#endif // defined(ARCH_IS_64_BIT)
}
@ -293,7 +293,7 @@ static bool ExtractInterfaceTypeArgs(Zone* zone,
const TypeArguments& instance_type_args,
const Class& interface_cls,
TypeArguments* interface_type_args) {
Class& cur_cls = Class::Handle(zone, instance_cls.raw());
Class& cur_cls = Class::Handle(zone, instance_cls.ptr());
// The following code is a specialization of Class::IsSubtypeOf().
Array& interfaces = Array::Handle(zone);
AbstractType& interface = AbstractType::Handle(zone);
@ -301,8 +301,8 @@ static bool ExtractInterfaceTypeArgs(Zone* zone,
TypeArguments& cur_interface_type_args = TypeArguments::Handle(zone);
while (true) {
// Additional subtyping rules related to 'FutureOr' are not applied.
if (cur_cls.raw() == interface_cls.raw()) {
*interface_type_args = instance_type_args.raw();
if (cur_cls.ptr() == interface_cls.ptr()) {
*interface_type_args = instance_type_args.ptr();
return true;
}
interfaces = cur_cls.interfaces();
@ -415,7 +415,7 @@ DEFINE_NATIVE_ENTRY(Internal_extractTypeArguments, 0, 2) {
Exceptions::PropagateError(Error::Cast(result));
UNREACHABLE();
}
return result.raw();
return result.ptr();
}
DEFINE_NATIVE_ENTRY(Internal_prependTypeArguments, 0, 4) {
@ -462,7 +462,7 @@ DEFINE_NATIVE_ENTRY(Internal_boundsCheckForPartialInstantiation, 0, 2) {
for (intptr_t i = 0; i < bounds.Length(); ++i) {
parameter ^= bounds.TypeAt(i);
supertype = parameter.bound();
subtype = type_args_to_check.IsNull() ? Object::dynamic_type().raw()
subtype = type_args_to_check.IsNull() ? Object::dynamic_type().ptr()
: type_args_to_check.TypeAt(i);
ASSERT(!subtype.IsNull());
@ -510,7 +510,7 @@ DEFINE_NATIVE_ENTRY(InvocationMirror_unpackTypeArguments, 0, 2) {
}
}
type_list.MakeImmutable();
return type_list.raw();
return type_list.ptr();
}
DEFINE_NATIVE_ENTRY(NoSuchMethodError_existingMethodSignature, 0, 3) {

View file

@ -34,7 +34,7 @@ DEFINE_NATIVE_ENTRY(UserTag_makeCurrent, 0, 1) {
}
const UserTag& old = UserTag::Handle(zone, isolate->current_tag());
self.MakeActive();
return old.raw();
return old.ptr();
}
DEFINE_NATIVE_ENTRY(UserTag_defaultTag, 0, 0) {

View file

@ -29,10 +29,10 @@ DEFINE_NATIVE_ENTRY(RegExp_factory, 0, 6) {
arguments->NativeArgAt(4));
GET_NON_NULL_NATIVE_ARGUMENT(Instance, handle_dot_all,
arguments->NativeArgAt(5));
bool ignore_case = handle_case_sensitive.raw() != Bool::True().raw();
bool multi_line = handle_multi_line.raw() == Bool::True().raw();
bool unicode = handle_unicode.raw() == Bool::True().raw();
bool dot_all = handle_dot_all.raw() == Bool::True().raw();
bool ignore_case = handle_case_sensitive.ptr() != Bool::True().ptr();
bool multi_line = handle_multi_line.ptr() == Bool::True().ptr();
bool unicode = handle_unicode.ptr() == Bool::True().ptr();
bool dot_all = handle_dot_all.ptr() == Bool::True().ptr();
RegExpFlags flags;
@ -60,25 +60,25 @@ DEFINE_NATIVE_ENTRY(RegExp_getPattern, 0, 1) {
DEFINE_NATIVE_ENTRY(RegExp_getIsMultiLine, 0, 1) {
const RegExp& regexp = RegExp::CheckedHandle(zone, arguments->NativeArgAt(0));
ASSERT(!regexp.IsNull());
return Bool::Get(regexp.flags().IsMultiLine()).raw();
return Bool::Get(regexp.flags().IsMultiLine()).ptr();
}
DEFINE_NATIVE_ENTRY(RegExp_getIsUnicode, 0, 1) {
const RegExp& regexp = RegExp::CheckedHandle(zone, arguments->NativeArgAt(0));
ASSERT(!regexp.IsNull());
return Bool::Get(regexp.flags().IsUnicode()).raw();
return Bool::Get(regexp.flags().IsUnicode()).ptr();
}
DEFINE_NATIVE_ENTRY(RegExp_getIsDotAll, 0, 1) {
const RegExp& regexp = RegExp::CheckedHandle(zone, arguments->NativeArgAt(0));
ASSERT(!regexp.IsNull());
return Bool::Get(regexp.flags().IsDotAll()).raw();
return Bool::Get(regexp.flags().IsDotAll()).ptr();
}
DEFINE_NATIVE_ENTRY(RegExp_getIsCaseSensitive, 0, 1) {
const RegExp& regexp = RegExp::CheckedHandle(zone, arguments->NativeArgAt(0));
ASSERT(!regexp.IsNull());
return Bool::Get(!regexp.flags().IgnoreCase()).raw();
return Bool::Get(!regexp.flags().IgnoreCase()).ptr();
}
DEFINE_NATIVE_ENTRY(RegExp_getGroupCount, 0, 1) {

View file

@ -550,25 +550,25 @@ DEFINE_NATIVE_ENTRY(Int32x4_setW, 0, 2) {
DEFINE_NATIVE_ENTRY(Int32x4_getFlagX, 0, 1) {
GET_NON_NULL_NATIVE_ARGUMENT(Int32x4, self, arguments->NativeArgAt(0));
int32_t value = self.x();
return Bool::Get(value != 0).raw();
return Bool::Get(value != 0).ptr();
}
DEFINE_NATIVE_ENTRY(Int32x4_getFlagY, 0, 1) {
GET_NON_NULL_NATIVE_ARGUMENT(Int32x4, self, arguments->NativeArgAt(0));
int32_t value = self.y();
return Bool::Get(value != 0).raw();
return Bool::Get(value != 0).ptr();
}
DEFINE_NATIVE_ENTRY(Int32x4_getFlagZ, 0, 1) {
GET_NON_NULL_NATIVE_ARGUMENT(Int32x4, self, arguments->NativeArgAt(0));
int32_t value = self.z();
return Bool::Get(value != 0).raw();
return Bool::Get(value != 0).ptr();
}
DEFINE_NATIVE_ENTRY(Int32x4_getFlagW, 0, 1) {
GET_NON_NULL_NATIVE_ARGUMENT(Int32x4, self, arguments->NativeArgAt(0));
int32_t value = self.w();
return Bool::Get(value != 0).raw();
return Bool::Get(value != 0).ptr();
}
DEFINE_NATIVE_ENTRY(Int32x4_setFlagX, 0, 2) {
@ -578,7 +578,7 @@ DEFINE_NATIVE_ENTRY(Int32x4_setFlagX, 0, 2) {
int32_t _y = self.y();
int32_t _z = self.z();
int32_t _w = self.w();
_x = flagX.raw() == Bool::True().raw() ? 0xFFFFFFFF : 0x0;
_x = flagX.ptr() == Bool::True().ptr() ? 0xFFFFFFFF : 0x0;
return Int32x4::New(_x, _y, _z, _w);
}
@ -589,7 +589,7 @@ DEFINE_NATIVE_ENTRY(Int32x4_setFlagY, 0, 2) {
int32_t _y = self.y();
int32_t _z = self.z();
int32_t _w = self.w();
_y = flagY.raw() == Bool::True().raw() ? 0xFFFFFFFF : 0x0;
_y = flagY.ptr() == Bool::True().ptr() ? 0xFFFFFFFF : 0x0;
return Int32x4::New(_x, _y, _z, _w);
}
@ -600,7 +600,7 @@ DEFINE_NATIVE_ENTRY(Int32x4_setFlagZ, 0, 2) {
int32_t _y = self.y();
int32_t _z = self.z();
int32_t _w = self.w();
_z = flagZ.raw() == Bool::True().raw() ? 0xFFFFFFFF : 0x0;
_z = flagZ.ptr() == Bool::True().ptr() ? 0xFFFFFFFF : 0x0;
return Int32x4::New(_x, _y, _z, _w);
}
@ -611,7 +611,7 @@ DEFINE_NATIVE_ENTRY(Int32x4_setFlagW, 0, 2) {
int32_t _y = self.y();
int32_t _z = self.z();
int32_t _w = self.w();
_w = flagW.raw() == Bool::True().raw() ? 0xFFFFFFFF : 0x0;
_w = flagW.ptr() == Bool::True().ptr() ? 0xFFFFFFFF : 0x0;
return Int32x4::New(_x, _y, _z, _w);
}

View file

@ -25,7 +25,7 @@ DEFINE_NATIVE_ENTRY(String_fromEnvironment, 0, 3) {
if (!env_value.IsNull()) {
return Symbols::New(thread, env_value);
}
return default_value.raw();
return default_value.ptr();
}
DEFINE_NATIVE_ENTRY(StringBase_createFromCodePoints, 0, 3) {
@ -40,7 +40,7 @@ DEFINE_NATIVE_ENTRY(StringBase_createFromCodePoints, 0, 3) {
a = growableArray.data();
length = growableArray.Length();
} else if (list.IsArray()) {
a = Array::Cast(list).raw();
a = Array::Cast(list).ptr();
length = a.Length();
} else {
Exceptions::ThrowArgumentError(list);
@ -242,7 +242,7 @@ DEFINE_NATIVE_ENTRY(StringBase_joinReplaceAllResult, 0, 4) {
if (write_index < length) {
Exceptions::ThrowArgumentError(matches_growable);
}
return result.raw();
return result.ptr();
}
DEFINE_NATIVE_ENTRY(OneByteString_substringUnchecked, 0, 3) {
@ -283,7 +283,7 @@ DEFINE_NATIVE_ENTRY(OneByteString_splitWithCharCode, 0, 2) {
result.Add(str);
result.SetTypeArguments(TypeArguments::Handle(
zone, isolate->group()->object_store()->type_argument_string()));
return result.raw();
return result.ptr();
}
DEFINE_NATIVE_ENTRY(Internal_allocateOneByteString, 0, 1) {
@ -379,7 +379,7 @@ DEFINE_NATIVE_ENTRY(OneByteString_allocateFromOneByteList, 0, 3) {
intptr_t value = Smi::Value(static_cast<SmiPtr>(array.At(start + i)));
OneByteString::SetCharAt(string, i, value);
}
return string.raw();
return string.ptr();
} else if (list.IsGrowableObjectArray()) {
const GrowableObjectArray& array = GrowableObjectArray::Cast(list);
if (end > array.Length()) {
@ -392,7 +392,7 @@ DEFINE_NATIVE_ENTRY(OneByteString_allocateFromOneByteList, 0, 3) {
intptr_t value = Smi::Value(static_cast<SmiPtr>(array.At(start + i)));
OneByteString::SetCharAt(string, i, value);
}
return string.raw();
return string.ptr();
}
UNREACHABLE();
return Object::null();
@ -483,7 +483,7 @@ DEFINE_NATIVE_ENTRY(TwoByteString_allocateFromTwoByteList, 0, 3) {
intptr_t value = Smi::Value(static_cast<SmiPtr>(array.At(start + i)));
TwoByteString::SetCharAt(string, i, value);
}
return string.raw();
return string.ptr();
} else if (list.IsGrowableObjectArray()) {
const GrowableObjectArray& array = GrowableObjectArray::Cast(list);
if (end > array.Length()) {
@ -495,7 +495,7 @@ DEFINE_NATIVE_ENTRY(TwoByteString_allocateFromTwoByteList, 0, 3) {
intptr_t value = Smi::Value(static_cast<SmiPtr>(array.At(start + i)));
TwoByteString::SetCharAt(string, i, value);
}
return string.raw();
return string.ptr();
}
UNREACHABLE();
return Object::null();
@ -579,7 +579,7 @@ DEFINE_NATIVE_ENTRY(String_concatRange, 0, 3) {
Array& strings = Array::Handle();
intptr_t length = -1;
if (argument.IsArray()) {
strings ^= argument.raw();
strings ^= argument.ptr();
length = strings.Length();
} else if (argument.IsGrowableObjectArray()) {
const GrowableObjectArray& g_array = GrowableObjectArray::Cast(argument);
@ -619,7 +619,7 @@ DEFINE_NATIVE_ENTRY(StringBuffer_createStringFromUint16Array, 0, 3) {
uint16_t* data_position = reinterpret_cast<uint16_t*>(codeUnits.DataAddr(0));
String::Copy(result, 0, data_position, length_value);
return result.raw();
return result.ptr();
}
} // namespace dart

View file

@ -18,10 +18,10 @@ namespace dart {
DEFINE_NATIVE_ENTRY(Timeline_isDartStreamEnabled, 0, 0) {
#if defined(SUPPORT_TIMELINE)
if (Timeline::GetDartStream()->enabled()) {
return Bool::True().raw();
return Bool::True().ptr();
}
#endif
return Bool::False().raw();
return Bool::False().ptr();
}
DEFINE_NATIVE_ENTRY(Timeline_getNextAsyncId, 0, 0) {

View file

@ -108,7 +108,7 @@ static BoolPtr CopyData(const Instance& dst,
TypedData::Copy<DstType, SrcType>(dst_array, dst_offset_in_bytes, src_array,
src_offset_in_bytes, length_in_bytes);
}
return Bool::True().raw();
return Bool::True().ptr();
}
static bool IsClamped(intptr_t cid) {
@ -174,7 +174,7 @@ DEFINE_NATIVE_ENTRY(TypedData_setRange, 0, 7) {
}
}
UNREACHABLE();
return Bool::False().raw();
return Bool::False().ptr();
}
// Native methods for typed data allocation are recognized and implemented

View file

@ -10,9 +10,9 @@ namespace dart {
DEFINE_NATIVE_ENTRY(Uri_isWindowsPlatform, 0, 0) {
#if defined(HOST_OS_WINDOWS)
return Bool::True().raw();
return Bool::True().ptr();
#else
return Bool::False().raw();
return Bool::False().ptr();
#endif
}

View file

@ -55,7 +55,7 @@ DEFINE_NATIVE_ENTRY(VMService_SendIsolateServiceMessage, 0, 2) {
// TODO(turnidge): Throw an exception when the return value is false?
bool result = PortMap::PostMessage(
writer.WriteMessage(message, sp.Id(), Message::kOOBPriority));
return Bool::Get(result).raw();
return Bool::Get(result).ptr();
#else
return Object::null();
#endif
@ -122,7 +122,7 @@ DEFINE_NATIVE_ENTRY(VMService_ListenStream, 0, 1) {
#ifndef PRODUCT
GET_NON_NULL_NATIVE_ARGUMENT(String, stream_id, arguments->NativeArgAt(0));
bool result = Service::ListenStream(stream_id.ToCString());
return Bool::Get(result).raw();
return Bool::Get(result).ptr();
#else
return Object::null();
#endif
@ -323,7 +323,7 @@ DEFINE_NATIVE_ENTRY(VMService_DecodeAssets, 0, 1) {
#ifndef PRODUCT
GET_NON_NULL_NATIVE_ARGUMENT(TypedData, data, arguments->NativeArgAt(0));
Api::Scope scope(thread);
Dart_Handle data_handle = Api::NewHandle(thread, data.raw());
Dart_Handle data_handle = Api::NewHandle(thread, data.ptr());
Dart_Handle result_list;
{
TransitionVMToNative transition(thread);
@ -368,7 +368,7 @@ DEFINE_NATIVE_ENTRY(VMService_DecodeAssets, 0, 1) {
idx += 2;
}
}
return Api::UnwrapArrayHandle(thread->zone(), result_list).raw();
return Api::UnwrapArrayHandle(thread->zone(), result_list).ptr();
#else
return Object::null();
#endif

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View file

@ -133,7 +133,7 @@ static ErrorPtr BootstrapFromKernel(Thread* thread,
const Object& result = Object::Handle(zone, loader.LoadProgram());
program.reset();
if (result.IsError()) {
return Error::Cast(result).raw();
return Error::Cast(result).ptr();
}
// The builtin library should be registered with the VM.
@ -169,7 +169,7 @@ ErrorPtr Bootstrap::DoBootstrapping(const uint8_t* kernel_buffer,
ObjectStore::BootstrapLibraryId id = bootstrap_libraries[i].index;
uri = Symbols::New(thread, bootstrap_libraries[i].uri);
lib = isolate_group->object_store()->bootstrap_library(id);
ASSERT(lib.raw() == Library::LookupLibrary(thread, uri));
ASSERT(lib.ptr() == Library::LookupLibrary(thread, uri));
if (lib.IsNull()) {
lib = Library::NewLibraryHelper(uri, false);
lib.SetLoadRequested();

View file

@ -52,7 +52,7 @@ bool MetadataMapTraits::IsMatch(const Object& a, const Object& b) {
return TypeParameter::Cast(a).parameterized_class_id() ==
TypeParameter::Cast(b).parameterized_class_id();
}
return a.raw() == b.raw();
return a.ptr() == b.ptr();
}
uword MetadataMapTraits::Hash(const Object& key) {

View file

@ -21,7 +21,7 @@ class CharArray {
String& result = String::Handle(StringFrom(data_, len_, Heap::kOld));
result.SetCanonical();
result.SetHash(hash_);
return result.raw();
return result.ptr();
}
bool Equals(const String& other) const {
ASSERT(other.HasHash());
@ -167,7 +167,7 @@ class CanonicalTypeTraits {
}
static uword Hash(const CanonicalTypeKey& key) { return key.Hash(); }
static ObjectPtr NewKey(const CanonicalTypeKey& obj) {
return obj.key_.raw();
return obj.key_.ptr();
}
};
typedef UnorderedHashSet<CanonicalTypeTraits> CanonicalTypeSet;
@ -206,7 +206,7 @@ class CanonicalFunctionTypeTraits {
}
static uword Hash(const CanonicalFunctionTypeKey& key) { return key.Hash(); }
static ObjectPtr NewKey(const CanonicalFunctionTypeKey& obj) {
return obj.key_.raw();
return obj.key_.ptr();
}
};
typedef UnorderedHashSet<CanonicalFunctionTypeTraits> CanonicalFunctionTypeSet;
@ -245,7 +245,7 @@ class CanonicalTypeParameterTraits {
}
static uword Hash(const CanonicalTypeParameterKey& key) { return key.Hash(); }
static ObjectPtr NewKey(const CanonicalTypeParameterKey& obj) {
return obj.key_.raw();
return obj.key_.ptr();
}
};
typedef UnorderedHashSet<CanonicalTypeParameterTraits>
@ -287,7 +287,7 @@ class CanonicalTypeArgumentsTraits {
}
static uword Hash(const CanonicalTypeArgumentsKey& key) { return key.Hash(); }
static ObjectPtr NewKey(const CanonicalTypeArgumentsKey& obj) {
return obj.key_.raw();
return obj.key_.ptr();
}
};
typedef UnorderedHashSet<CanonicalTypeArgumentsTraits>

View file

@ -84,7 +84,7 @@ static void AddSuperType(const AbstractType& type,
static void CollectFinalizedSuperClasses(
const Class& cls_,
GrowableArray<intptr_t>* finalized_super_classes) {
Class& cls = Class::Handle(cls_.raw());
Class& cls = Class::Handle(cls_.ptr());
AbstractType& super_type = Type::Handle();
super_type = cls.super_type();
if (!super_type.IsNull()) {
@ -117,7 +117,7 @@ class InterfaceFinder {
ScopedHandle<Class> current_class(&class_handles_);
ScopedHandle<AbstractType> type(&type_handles_);
*current_class = klass.raw();
*current_class = klass.ptr();
while (true) {
// We don't care about top types.
const intptr_t cid = current_class->id();
@ -361,8 +361,8 @@ void ClassFinalizer::CheckRecursiveType(const AbstractType& type,
String::Handle(pending_type.Name()).ToCString(),
pending_type.ToCString());
}
if ((pending_type.raw() != type.raw()) && pending_type.IsType() &&
(pending_type.type_class() == type_cls.raw())) {
if ((pending_type.ptr() != type.ptr()) && pending_type.IsType() &&
(pending_type.type_class() == type_cls.ptr())) {
pending_arguments = pending_type.arguments();
// By using TypeEquality::kInSubtypeTest, we throw a wider net than
// using canonical or syntactical equality and may reject more
@ -589,7 +589,7 @@ void ClassFinalizer::FinalizeTypeArguments(const Class& cls,
// While finalizing D<T>, the super type arg D<T> (a typeref) gets
// instantiated from vector [T], yielding itself.
if (super_type_arg.IsTypeRef() &&
(super_type_arg.arguments() == arguments.raw())) {
(super_type_arg.arguments() == arguments.ptr())) {
ASSERT(super_type_arg.IsBeingFinalized());
arguments.SetTypeAt(i, super_type_arg);
continue;
@ -607,7 +607,7 @@ void ClassFinalizer::FinalizeTypeArguments(const Class& cls,
unfinalized_type = TypeRef::Cast(super_type_arg).type();
} else {
ASSERT(super_type_arg.IsType());
unfinalized_type = super_type_arg.raw();
unfinalized_type = super_type_arg.ptr();
}
if (FLAG_trace_type_finalization) {
THR_Print("Instantiated unfinalized '%s': '%s'\n",
@ -660,19 +660,19 @@ AbstractTypePtr ClassFinalizer::FinalizeType(const AbstractType& type,
!type.IsBeingFinalized()) {
return type.Canonicalize(Thread::Current(), nullptr);
}
return type.raw();
return type.ptr();
}
if (type.IsTypeRef()) {
// The referenced type will be finalized later by the code that set the
// is_being_finalized mark bit.
return type.raw();
return type.ptr();
}
if (type.IsTypeParameter() && type.IsBeingFinalized()) {
// The base and index have already been adjusted, but the bound referring
// back to the type parameter is still being finalized.
return type.raw();
return type.ptr();
}
// Recursive types must be processed in FinalizeTypeArguments() and cannot be
@ -732,7 +732,7 @@ AbstractTypePtr ClassFinalizer::FinalizeType(const AbstractType& type,
if (finalization >= kCanonicalize) {
return type_parameter.Canonicalize(thread, nullptr);
}
return type_parameter.raw();
return type_parameter.ptr();
}
// If the type is a function type, we also need to finalize the types in its
@ -781,11 +781,11 @@ AbstractTypePtr ClassFinalizer::FinalizeType(const AbstractType& type,
AbstractType::Handle(zone, type.Canonicalize(thread, nullptr));
THR_Print("Done canonicalizing type '%s'\n",
String::Handle(zone, canonical_type.Name()).ToCString());
return canonical_type.raw();
return canonical_type.ptr();
}
return type.Canonicalize(thread, nullptr);
} else {
return type.raw();
return type.ptr();
}
}
@ -804,7 +804,7 @@ AbstractTypePtr ClassFinalizer::FinalizeSignature(Zone* zone,
for (intptr_t i = 0; i < num_type_params; i++) {
type_param ^= type_params.TypeAt(i);
finalized_type ^= FinalizeType(type_param, kFinalize, pending_types);
if (type_param.raw() != finalized_type.raw()) {
if (type_param.ptr() != finalized_type.ptr()) {
type_params.SetTypeAt(i, TypeParameter::Cast(finalized_type));
}
}
@ -813,7 +813,7 @@ AbstractTypePtr ClassFinalizer::FinalizeSignature(Zone* zone,
// Finalize result type.
type = signature.result_type();
finalized_type = FinalizeType(type, kFinalize, pending_types);
if (finalized_type.raw() != type.raw()) {
if (finalized_type.ptr() != type.ptr()) {
signature.set_result_type(finalized_type);
}
// Finalize formal parameter types.
@ -821,7 +821,7 @@ AbstractTypePtr ClassFinalizer::FinalizeSignature(Zone* zone,
for (intptr_t i = 0; i < num_parameters; i++) {
type = signature.ParameterTypeAt(i);
finalized_type = FinalizeType(type, kFinalize, pending_types);
if (type.raw() != finalized_type.raw()) {
if (type.ptr() != finalized_type.ptr()) {
signature.SetParameterTypeAt(i, finalized_type);
}
}
@ -835,7 +835,7 @@ AbstractTypePtr ClassFinalizer::FinalizeSignature(Zone* zone,
if (finalization >= kCanonicalize) {
return signature.Canonicalize(Thread::Current(), nullptr);
}
return signature.raw();
return signature.ptr();
}
#if defined(TARGET_ARCH_X64)
@ -930,7 +930,7 @@ static void MarkImplemented(Zone* zone, const Class& iface) {
return;
}
Class& cls = Class::Handle(zone, iface.raw());
Class& cls = Class::Handle(zone, iface.ptr());
AbstractType& type = AbstractType::Handle(zone);
while (!cls.is_implemented()) {
@ -967,7 +967,7 @@ void ClassFinalizer::FinalizeTypesInClass(const Class& cls) {
}
// Finalize type parameters before finalizing the super type.
FinalizeTypeParameters(cls, kFinalize);
ASSERT(super_class.raw() == cls.SuperClass()); // Not modified.
ASSERT(super_class.ptr() == cls.SuperClass()); // Not modified.
ASSERT(super_class.IsNull() || super_class.is_type_finalized());
FinalizeTypeParameters(cls, kCanonicalize);
// Finalize super type.
@ -1189,7 +1189,7 @@ void ClassFinalizer::AllocateEnumValues(const Class& enum_cls) {
for (intptr_t i = 0; i < fields.Length(); i++) {
field = Field::RawCast(fields.At(i));
if (!field.is_static() || !field.is_const() ||
(sentinel.raw() == field.raw())) {
(sentinel.ptr() == field.ptr())) {
continue;
}
// Hot-reload expects the static const fields to be evaluated when
@ -1405,25 +1405,25 @@ class CidRewriteVisitor : public ObjectVisitor {
void VisitObject(ObjectPtr obj) {
if (obj->IsClass()) {
ClassPtr cls = Class::RawCast(obj);
const classid_t old_cid = cls->ptr()->id_;
const classid_t old_cid = cls->untag()->id_;
if (ClassTable::IsTopLevelCid(old_cid)) {
// We don't remap cids of top level classes.
return;
}
cls->ptr()->id_ = Map(old_cid);
cls->untag()->id_ = Map(old_cid);
} else if (obj->IsField()) {
FieldPtr field = Field::RawCast(obj);
field->ptr()->guarded_cid_ = Map(field->ptr()->guarded_cid_);
field->ptr()->is_nullable_ = Map(field->ptr()->is_nullable_);
field->untag()->guarded_cid_ = Map(field->untag()->guarded_cid_);
field->untag()->is_nullable_ = Map(field->untag()->is_nullable_);
} else if (obj->IsTypeParameter()) {
TypeParameterPtr param = TypeParameter::RawCast(obj);
param->ptr()->parameterized_class_id_ =
Map(param->ptr()->parameterized_class_id_);
param->untag()->parameterized_class_id_ =
Map(param->untag()->parameterized_class_id_);
} else if (obj->IsType()) {
TypePtr type = Type::RawCast(obj);
ObjectPtr id = type->ptr()->type_class_id_;
ObjectPtr id = type->untag()->type_class_id_;
if (!id->IsHeapObject()) {
type->ptr()->type_class_id_ =
type->untag()->type_class_id_ =
Smi::New(Map(Smi::Value(Smi::RawCast(id))));
}
} else {
@ -1432,7 +1432,7 @@ class CidRewriteVisitor : public ObjectVisitor {
if (old_cid != new_cid) {
// Don't touch objects that are unchanged. In particular, Instructions,
// which are write-protected.
obj->ptr()->SetClassIdUnsynchronized(new_cid);
obj->untag()->SetClassIdUnsynchronized(new_cid);
}
}
}
@ -1490,13 +1490,13 @@ void ClassFinalizer::RemapClassIds(intptr_t* old_to_new_cid) {
// In the Dart VM heap the following instances directly use cids for the
// computation of canonical hash codes:
//
// * TypePtr (due to TypeLayout::type_class_id_)
// * TypeParameterPtr (due to TypeParameterLayout::parameterized_class_id_)
// * TypePtr (due to UntaggedType::type_class_id_)
// * TypeParameterPtr (due to UntaggedTypeParameter::parameterized_class_id_)
//
// The following instances use cids for the computation of canonical hash codes
// indirectly:
//
// * TypeRefPtr (due to TypeRefLayout::type_->type_class_id)
// * TypeRefPtr (due to UntaggedTypeRef::type_->type_class_id)
// * TypePtr (due to type arguments)
// * FunctionTypePtr (due to the result and parameter types)
// * TypeArgumentsPtr (due to type references)
@ -1505,17 +1505,17 @@ void ClassFinalizer::RemapClassIds(intptr_t* old_to_new_cid) {
//
// Caching of the canonical hash codes happens for:
//
// * TypeLayout::hash_
// * FunctionTypeLayout::hash_
// * TypeParameterLayout::hash_
// * TypeArgumentsLayout::hash_
// * RawInstance (weak table)
// * RawArray (weak table)
// * UntaggedType::hash_
// * UntaggedFunctionType::hash_
// * UntaggedTypeParameter::hash_
// * UntaggedTypeArguments::hash_
// * InstancePtr (weak table)
// * ArrayPtr (weak table)
//
// No caching of canonical hash codes (i.e. it gets re-computed every time)
// happens for:
//
// * TypeRefPtr (computed via TypeRefLayout::type_->type_class_id)
// * TypeRefPtr (computed via UntaggedTypeRef::type_->type_class_id)
//
// Usages of canonical hash codes are:
//

View file

@ -20,7 +20,7 @@ static ClassPtr CreateTestClass(const char* name) {
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
cls.SetFunctions(Object::empty_array());
cls.SetFields(Object::empty_array());
return cls.raw();
return cls.ptr();
}
ISOLATE_UNIT_TEST_CASE(ClassFinalizer) {

View file

@ -14,7 +14,7 @@
namespace dart {
// Size of the class-id part of the object header. See ObjectLayout.
// Size of the class-id part of the object header. See UntaggedObject.
typedef uint16_t ClassIdTagType;
#define CLASS_LIST_NO_OBJECT_NOR_STRING_NOR_ARRAY(V) \

View file

@ -160,7 +160,7 @@ void ClassTable::Register(const Class& cls) {
// parallel to [ClassTable].
const intptr_t instance_size =
cls.is_abstract() ? 0 : Class::host_instance_size(cls.raw());
cls.is_abstract() ? 0 : Class::host_instance_size(cls.ptr());
const intptr_t expected_cid =
shared_class_table_->Register(cid, instance_size);
@ -168,7 +168,7 @@ void ClassTable::Register(const Class& cls) {
if (cid != kIllegalCid) {
ASSERT(cid > 0 && cid < kNumPredefinedCids && cid < top_);
ASSERT(table_.load()[cid] == nullptr);
table_.load()[cid] = cls.raw();
table_.load()[cid] = cls.ptr();
} else {
if (top_ == capacity_) {
const intptr_t new_capacity = capacity_ + kCapacityIncrement;
@ -176,7 +176,7 @@ void ClassTable::Register(const Class& cls) {
}
ASSERT(top_ < capacity_);
cls.set_id(top_);
table_.load()[top_] = cls.raw();
table_.load()[top_] = cls.ptr();
top_++; // Increment next index.
}
ASSERT(expected_cid == cls.id());
@ -200,7 +200,7 @@ void ClassTable::RegisterTopLevel(const Class& cls) {
}
ASSERT(tlc_top_ < tlc_capacity_);
cls.set_id(ClassTable::CidFromTopLevelIndex(tlc_top_));
tlc_table_.load()[tlc_top_] = cls.raw();
tlc_table_.load()[tlc_top_] = cls.ptr();
tlc_top_++; // Increment next index.
}
@ -496,7 +496,7 @@ void ClassTable::Print() {
continue;
}
cls = At(i);
if (cls.raw() != nullptr) {
if (cls.ptr() != nullptr) {
name = cls.Name();
OS::PrintErr("%" Pd ": %s\n", i, name.ToCString());
}

View file

@ -34,8 +34,8 @@ FunctionPtr ClosureFunctionsCache::LookupClosureFunctionLocked(
intptr_t num_closures = closures.Length();
for (intptr_t i = 0; i < num_closures; i++) {
closure ^= closures.At(i);
if (closure.token_pos() == token_pos && closure.Owner() == owner.raw()) {
return closure.raw();
if (closure.token_pos() == token_pos && closure.Owner() == owner.ptr()) {
return closure.ptr();
}
}
return Function::null();
@ -66,8 +66,8 @@ FunctionPtr ClosureFunctionsCache::LookupClosureFunctionLocked(
for (intptr_t i = 0; i < num_closures; i++) {
closure ^= closures.At(i);
if (closure.token_pos() == token_pos &&
closure.parent_function() == parent.raw()) {
return closure.raw();
closure.parent_function() == parent.ptr()) {
return closure.ptr();
}
}
return Function::null();
@ -101,7 +101,7 @@ intptr_t ClosureFunctionsCache::FindClosureIndex(const Function& needle) {
GrowableObjectArray::Handle(zone, object_store->closure_functions());
intptr_t num_closures = closures_array.Length();
for (intptr_t i = 0; i < num_closures; i++) {
if (closures_array.At(i) == needle.raw()) {
if (closures_array.At(i) == needle.ptr()) {
return i;
}
}
@ -136,15 +136,15 @@ FunctionPtr ClosureFunctionsCache::GetUniqueInnerClosure(
auto& entry = Function::Handle(zone);
for (intptr_t i = (closures.Length() - 1); i >= 0; i--) {
entry ^= closures.At(i);
if (entry.parent_function() == outer.raw()) {
if (entry.parent_function() == outer.ptr()) {
#if defined(DEBUG)
auto& other = Function::Handle(zone);
for (intptr_t j = i - 1; j >= 0; j--) {
other ^= closures.At(j);
ASSERT(other.parent_function() != outer.raw());
ASSERT(other.parent_function() != outer.ptr());
}
#endif
return entry.raw();
return entry.ptr();
}
}
return Function::null();

File diff suppressed because it is too large Load diff

View file

@ -327,18 +327,18 @@ class Serializer : public ThreadStackResource {
template <typename T, typename... P>
void WriteFromTo(T obj, P&&... args) {
ObjectPtr* from = obj->ptr()->from();
ObjectPtr* to = obj->ptr()->to_snapshot(kind(), args...);
ObjectPtr* from = obj->untag()->from();
ObjectPtr* to = obj->untag()->to_snapshot(kind(), args...);
for (ObjectPtr* p = from; p <= to; p++) {
WriteOffsetRef(*p, (p - reinterpret_cast<ObjectPtr*>(obj->ptr())) *
WriteOffsetRef(*p, (p - reinterpret_cast<ObjectPtr*>(obj->untag())) *
sizeof(ObjectPtr));
}
}
template <typename T, typename... P>
void PushFromTo(T obj, P&&... args) {
ObjectPtr* from = obj->ptr()->from();
ObjectPtr* to = obj->ptr()->to_snapshot(kind(), args...);
ObjectPtr* from = obj->untag()->from();
ObjectPtr* to = obj->untag()->to_snapshot(kind(), args...);
for (ObjectPtr* p = from; p <= to; p++) {
Push(*p);
}
@ -347,7 +347,7 @@ class Serializer : public ThreadStackResource {
void WriteTokenPosition(TokenPosition pos) { Write(pos.Serialize()); }
void WriteCid(intptr_t cid) {
COMPILE_ASSERT(ObjectLayout::kClassIdTagSize <= 32);
COMPILE_ASSERT(UntaggedObject::kClassIdTagSize <= 32);
Write<int32_t>(cid);
}
@ -487,7 +487,7 @@ class Serializer : public ThreadStackResource {
#define PushFromTo(obj, ...) s->PushFromTo(obj, ##__VA_ARGS__);
#define WriteField(obj, field) s->WritePropertyRef(obj->ptr()->field, #field)
#define WriteField(obj, field) s->WritePropertyRef(obj->untag()->field, #field)
class SerializerWritingObjectScope {
public:
@ -598,23 +598,23 @@ class Deserializer : public ThreadStackResource {
void AssignRef(ObjectPtr object) {
ASSERT(next_ref_index_ <= num_objects_);
refs_->ptr()->data()[next_ref_index_] = object;
refs_->untag()->data()[next_ref_index_] = object;
next_ref_index_++;
}
ObjectPtr Ref(intptr_t index) const {
ASSERT(index > 0);
ASSERT(index <= num_objects_);
return refs_->ptr()->data()[index];
return refs_->untag()->data()[index];
}
ObjectPtr ReadRef() { return Ref(ReadUnsigned()); }
template <typename T, typename... P>
void ReadFromTo(T obj, P&&... params) {
ObjectPtr* from = obj->ptr()->from();
ObjectPtr* to_snapshot = obj->ptr()->to_snapshot(kind(), params...);
ObjectPtr* to = obj->ptr()->to(params...);
ObjectPtr* from = obj->untag()->from();
ObjectPtr* to_snapshot = obj->untag()->to_snapshot(kind(), params...);
ObjectPtr* to = obj->untag()->to(params...);
for (ObjectPtr* p = from; p <= to_snapshot; p++) {
*p = ReadRef();
}
@ -632,7 +632,7 @@ class Deserializer : public ThreadStackResource {
}
intptr_t ReadCid() {
COMPILE_ASSERT(ObjectLayout::kClassIdTagSize <= 32);
COMPILE_ASSERT(UntaggedObject::kClassIdTagSize <= 32);
return Read<int32_t>();
}

View file

@ -20,7 +20,7 @@ DescriptorList::DescriptorList(
: function_(Function::Handle(
zone,
FLAG_check_token_positions && (inline_id_to_function != nullptr)
? inline_id_to_function->At(0)->raw()
? inline_id_to_function->At(0)->ptr()
: Function::null())),
script_(Script::Handle(
zone,
@ -30,7 +30,7 @@ DescriptorList::DescriptorList(
prev_deopt_id(0),
prev_token_pos(0) {}
void DescriptorList::AddDescriptor(PcDescriptorsLayout::Kind kind,
void DescriptorList::AddDescriptor(UntaggedPcDescriptors::Kind kind,
intptr_t pc_offset,
intptr_t deopt_id,
const TokenPosition token_pos,
@ -39,20 +39,20 @@ void DescriptorList::AddDescriptor(PcDescriptorsLayout::Kind kind,
// yield index 0 is reserved for normal entry.
RELEASE_ASSERT(yield_index != 0);
ASSERT((kind == PcDescriptorsLayout::kRuntimeCall) ||
(kind == PcDescriptorsLayout::kBSSRelocation) ||
(kind == PcDescriptorsLayout::kOther) ||
(yield_index != PcDescriptorsLayout::kInvalidYieldIndex) ||
ASSERT((kind == UntaggedPcDescriptors::kRuntimeCall) ||
(kind == UntaggedPcDescriptors::kBSSRelocation) ||
(kind == UntaggedPcDescriptors::kOther) ||
(yield_index != UntaggedPcDescriptors::kInvalidYieldIndex) ||
(deopt_id != DeoptId::kNone));
// When precompiling, we only use pc descriptors for exceptions,
// relocations and yield indices.
if (!FLAG_precompiled_mode || try_index != -1 ||
yield_index != PcDescriptorsLayout::kInvalidYieldIndex ||
kind == PcDescriptorsLayout::kBSSRelocation) {
yield_index != UntaggedPcDescriptors::kInvalidYieldIndex ||
kind == UntaggedPcDescriptors::kBSSRelocation) {
const int32_t kind_and_metadata =
PcDescriptorsLayout::KindAndMetadata::Encode(kind, try_index,
yield_index);
UntaggedPcDescriptors::KindAndMetadata::Encode(kind, try_index,
yield_index);
encoded_data_.WriteSLEB128(kind_and_metadata);
encoded_data_.WriteSLEB128(pc_offset - prev_pc_offset);
@ -65,17 +65,18 @@ void DescriptorList::AddDescriptor(PcDescriptorsLayout::Kind kind,
function_.end_token_pos())) {
FATAL("Token position %s for PC descriptor %s at offset 0x%" Px
" invalid for function %s (%s, %s)",
token_pos.ToCString(), PcDescriptorsLayout::KindToCString(kind),
pc_offset, function_.ToFullyQualifiedCString(),
token_pos.ToCString(),
UntaggedPcDescriptors::KindToCString(kind), pc_offset,
function_.ToFullyQualifiedCString(),
function_.token_pos().ToCString(),
function_.end_token_pos().ToCString());
}
if (!script_.IsNull() && !script_.IsValidTokenPosition(token_pos)) {
FATAL("Token position %s for PC descriptor %s at offset 0x%" Px
" invalid for script %s of function %s",
token_pos.ToCString(), PcDescriptorsLayout::KindToCString(kind),
pc_offset, script_.ToCString(),
function_.ToFullyQualifiedCString());
token_pos.ToCString(),
UntaggedPcDescriptors::KindToCString(kind), pc_offset,
script_.ToCString(), function_.ToFullyQualifiedCString());
}
}
const int32_t encoded_pos = token_pos.Serialize();
@ -90,7 +91,7 @@ void DescriptorList::AddDescriptor(PcDescriptorsLayout::Kind kind,
PcDescriptorsPtr DescriptorList::FinalizePcDescriptors(uword entry_point) {
if (encoded_data_.bytes_written() == 0) {
return Object::empty_descriptors().raw();
return Object::empty_descriptors().ptr();
}
return PcDescriptors::New(encoded_data_.buffer(),
encoded_data_.bytes_written());
@ -114,7 +115,7 @@ void CompressedStackMapsBuilder::AddEntry(intptr_t pc_offset,
CompressedStackMapsPtr CompressedStackMapsBuilder::Finalize() const {
if (encoded_bytes_.bytes_written() == 0) {
return Object::empty_compressed_stackmaps().raw();
return Object::empty_compressed_stackmaps().ptr();
}
return CompressedStackMaps::NewInlined(encoded_bytes_.buffer(),
encoded_bytes_.bytes_written());
@ -124,7 +125,7 @@ ExceptionHandlersPtr ExceptionHandlerList::FinalizeExceptionHandlers(
uword entry_point) const {
intptr_t num_handlers = Length();
if (num_handlers == 0) {
return Object::empty_exception_handlers().raw();
return Object::empty_exception_handlers().ptr();
}
const ExceptionHandlers& handlers =
ExceptionHandlers::Handle(ExceptionHandlers::New(num_handlers));
@ -149,7 +150,7 @@ ExceptionHandlersPtr ExceptionHandlerList::FinalizeExceptionHandlers(
handlers.SetHandledTypes(i, *list_[i].handler_types);
}
}
return handlers.raw();
return handlers.ptr();
}
#if !defined(DART_PRECOMPILED_RUNTIME)
@ -233,7 +234,7 @@ TypedDataPtr CatchEntryMovesMapBuilder::FinalizeCatchEntryMovesMap() {
for (intptr_t i = 0; i < stream_.bytes_written(); i++) {
dest[i] = src[i];
}
return td.raw();
return td.ptr();
}
#endif // !defined(DART_PRECOMPILED_RUNTIME)
@ -437,12 +438,12 @@ void CodeSourceMapBuilder::EndCodeSourceRange(int32_t pc_offset,
BufferAdvancePC(pc_offset - buffered_pc_offset_);
}
void CodeSourceMapBuilder::NoteDescriptor(PcDescriptorsLayout::Kind kind,
void CodeSourceMapBuilder::NoteDescriptor(UntaggedPcDescriptors::Kind kind,
int32_t pc_offset,
const InstructionSource& source) {
const uint8_t kCanThrow =
PcDescriptorsLayout::kIcCall | PcDescriptorsLayout::kUnoptStaticCall |
PcDescriptorsLayout::kRuntimeCall | PcDescriptorsLayout::kOther;
UntaggedPcDescriptors::kIcCall | UntaggedPcDescriptors::kUnoptStaticCall |
UntaggedPcDescriptors::kRuntimeCall | UntaggedPcDescriptors::kOther;
if ((kind & kCanThrow) != 0) {
StartInliningInterval(pc_offset, source);
BufferChangePosition(source.token_pos);
@ -464,7 +465,7 @@ void CodeSourceMapBuilder::NoteNullCheck(int32_t pc_offset,
intptr_t CodeSourceMapBuilder::GetFunctionId(intptr_t inline_id) {
const Function& function = *inline_id_to_function_[inline_id];
for (intptr_t i = 0; i < inlined_functions_.Length(); i++) {
if (inlined_functions_.At(i) == function.raw()) {
if (inlined_functions_.At(i) == function.ptr()) {
return i;
}
}
@ -486,7 +487,7 @@ TokenPosition CodeSourceMapBuilder::RootPosition(
ArrayPtr CodeSourceMapBuilder::InliningIdToFunction() {
if (inlined_functions_.Length() == 0) {
return Object::empty_array().raw();
return Object::empty_array().ptr();
}
return Array::MakeFixedLength(inlined_functions_);
}
@ -499,7 +500,7 @@ CodeSourceMapPtr CodeSourceMapBuilder::Finalize() {
const auto& map = CodeSourceMap::Handle(zone_, CodeSourceMap::New(length));
NoSafepointScope no_safepoint;
memmove(map.Data(), stream_.buffer(), length);
return map.raw();
return map.ptr();
}
void CodeSourceMapBuilder::BufferChangePosition(TokenPosition pos) {

View file

@ -24,7 +24,7 @@ class DescriptorList : public ZoneAllocated {
~DescriptorList() {}
void AddDescriptor(PcDescriptorsLayout::Kind kind,
void AddDescriptor(UntaggedPcDescriptors::Kind kind,
intptr_t pc_offset,
intptr_t deopt_id,
TokenPosition token_pos,
@ -240,7 +240,7 @@ class CodeSourceMapBuilder : public ZoneAllocated {
void BeginCodeSourceRange(int32_t pc_offset, const InstructionSource& source);
void EndCodeSourceRange(int32_t pc_offset, const InstructionSource& source);
void NoteDescriptor(PcDescriptorsLayout::Kind kind,
void NoteDescriptor(UntaggedPcDescriptors::Kind kind,
int32_t pc_offset,
const InstructionSource& source);
void NoteNullCheck(int32_t pc_offset,

View file

@ -99,7 +99,7 @@ TEST_CASE(StackMapGC) {
PcDescriptors::Handle(code.pc_descriptors());
int call_count = 0;
PcDescriptors::Iterator iter(descriptors,
PcDescriptorsLayout::kUnoptStaticCall);
UntaggedPcDescriptors::kUnoptStaticCall);
CompressedStackMapsBuilder compressed_maps_builder(thread->zone());
while (iter.MoveNext()) {
compressed_maps_builder.AddEntry(iter.PcOffset(), stack_bitmap, 0);
@ -145,7 +145,7 @@ ISOLATE_UNIT_TEST_CASE(DescriptorList_TokenPositions) {
for (intptr_t i = 0; i < num_token_positions; i++) {
const TokenPosition& tp = TokenPosition::Deserialize(token_positions[i]);
descriptors->AddDescriptor(PcDescriptorsLayout::kRuntimeCall, 0, 0, tp, 0,
descriptors->AddDescriptor(UntaggedPcDescriptors::kRuntimeCall, 0, 0, tp, 0,
1);
}
@ -154,7 +154,7 @@ ISOLATE_UNIT_TEST_CASE(DescriptorList_TokenPositions) {
ASSERT(!finalized_descriptors.IsNull());
PcDescriptors::Iterator it(finalized_descriptors,
PcDescriptorsLayout::kRuntimeCall);
UntaggedPcDescriptors::kRuntimeCall);
intptr_t i = 0;
while (it.MoveNext()) {

View file

@ -73,7 +73,7 @@ FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
ICData& ic_data = ICData::Handle();
ic_data ^= static_call.Data();
if (ic_data_result != NULL) {
*ic_data_result = ic_data.raw();
*ic_data_result = ic_data.ptr();
}
return ic_data.GetTargetAt(0);
}

View file

@ -109,7 +109,7 @@ FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
ICData& ic_data = ICData::Handle();
ic_data ^= static_call.Data();
if (ic_data_result != NULL) {
*ic_data_result = ic_data.raw();
*ic_data_result = ic_data.ptr();
}
return ic_data.GetTargetAt(0);
}

View file

@ -30,7 +30,7 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
String::Handle(Symbols::New(thread, "callerFunction"));
const FunctionType& signature = FunctionType::ZoneHandle(FunctionType::New());
const Function& function = Function::Handle(Function::New(
signature, function_name, FunctionLayout::kRegularFunction, true, false,
signature, function_name, UntaggedFunction::kRegularFunction, true, false,
false, false, false, owner_class, TokenPosition::kNoSource));
const String& target_name = String::Handle(String::New("targetFunction"));

View file

@ -30,7 +30,7 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
String::Handle(Symbols::New(thread, "callerFunction"));
const FunctionType& signature = FunctionType::ZoneHandle(FunctionType::New());
const Function& function = Function::Handle(Function::New(
signature, function_name, FunctionLayout::kRegularFunction, true, false,
signature, function_name, UntaggedFunction::kRegularFunction, true, false,
false, false, false, owner_class, TokenPosition::kNoSource));
const String& target_name = String::Handle(String::New("targetFunction"));

View file

@ -94,14 +94,14 @@ class InstanceCall : public UnoptimizedCall {
return LoadUnaligned(reinterpret_cast<ObjectPtr*>(start_ + 1));
}
void set_data(const Object& data) const {
StoreUnaligned(reinterpret_cast<ObjectPtr*>(start_ + 1), data.raw());
StoreUnaligned(reinterpret_cast<ObjectPtr*>(start_ + 1), data.ptr());
}
CodePtr target() const {
return LoadUnaligned(reinterpret_cast<CodePtr*>(start_ + 6));
}
void set_target(const Code& target) const {
StoreUnaligned(reinterpret_cast<CodePtr*>(start_ + 6), target.raw());
StoreUnaligned(reinterpret_cast<CodePtr*>(start_ + 6), target.ptr());
}
private:
@ -147,7 +147,7 @@ class StaticCall : public ValueObject {
void set_target(const Code& target) const {
uword* target_addr = reinterpret_cast<uword*>(start_ + 1);
uword imm = static_cast<uword>(target.raw());
uword imm = static_cast<uword>(target.ptr());
*target_addr = imm;
CPU::FlushICache(start_ + 1, sizeof(imm));
}
@ -238,7 +238,7 @@ FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
ICData& ic_data = ICData::Handle();
ic_data ^= static_call.ic_data();
if (ic_data_result != NULL) {
*ic_data_result = ic_data.raw();
*ic_data_result = ic_data.ptr();
}
return ic_data.GetTargetAt(0);
}

View file

@ -30,7 +30,7 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
String::Handle(Symbols::New(thread, "callerFunction"));
const FunctionType& signature = FunctionType::ZoneHandle(FunctionType::New());
const Function& function = Function::Handle(Function::New(
signature, function_name, FunctionLayout::kRegularFunction, true, false,
signature, function_name, UntaggedFunction::kRegularFunction, true, false,
false, false, false, owner_class, TokenPosition::kNoSource));
const String& target_name = String::Handle(String::New("targetFunction"));

View file

@ -78,7 +78,7 @@ class UnoptimizedCall : public ValueObject {
CodePtr target() const {
Code& code = Code::Handle();
code ^= object_pool_.ObjectAt(code_index_);
return code.raw();
return code.ptr();
}
void set_target(const Code& target) const {
@ -197,7 +197,7 @@ class PoolPointerCall : public ValueObject {
CodePtr Target() const {
Code& code = Code::Handle();
code ^= object_pool_.ObjectAt(code_index_);
return code.raw();
return code.ptr();
}
void SetTarget(const Code& target) const {
@ -476,7 +476,7 @@ FunctionPtr CodePatcher::GetUnoptimizedStaticCallAt(uword return_address,
ICData& ic_data = ICData::Handle();
ic_data ^= static_call.ic_data();
if (ic_data_result != NULL) {
*ic_data_result = ic_data.raw();
*ic_data_result = ic_data.ptr();
}
return ic_data.GetTargetAt(0);
}

View file

@ -30,7 +30,7 @@ ASSEMBLER_TEST_GENERATE(IcDataAccess, assembler) {
String::Handle(Symbols::New(thread, "callerFunction"));
const FunctionType& signature = FunctionType::ZoneHandle(FunctionType::New());
const Function& function = Function::Handle(Function::New(
signature, function_name, FunctionLayout::kRegularFunction, true, false,
signature, function_name, UntaggedFunction::kRegularFunction, true, false,
false, false, false, owner_class, TokenPosition::kNoSource));
const String& target_name = String::Handle(String::New("targetFunction"));

View file

@ -105,7 +105,7 @@ ObjectPtr CompilationTraceLoader::CompileTrace(uint8_t* buffer, intptr_t size) {
*newline = 0;
error_ = CompileTriple(uri, cls_name, func_name);
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
cursor = newline + 1;
}
@ -125,10 +125,10 @@ ObjectPtr CompilationTraceLoader::CompileTrace(uint8_t* buffer, intptr_t size) {
arguments_descriptor = ArgumentsDescriptor::NewBoxed(kTypeArgsLen, argc);
dispatcher = closure_class.GetInvocationDispatcher(
Symbols::Call(), arguments_descriptor,
FunctionLayout::kInvokeFieldDispatcher, true /* create_if_absent */);
UntaggedFunction::kInvokeFieldDispatcher, true /* create_if_absent */);
error_ = CompileFunction(dispatcher);
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
}
@ -141,7 +141,7 @@ ObjectPtr CompilationTraceLoader::CompileTrace(uint8_t* buffer, intptr_t size) {
if (function2_.HasCode()) {
result = CompileFunction(function_);
if (result.IsError()) {
error_ = result.raw();
error_ = result.ptr();
return false; // Stop iteration.
}
}
@ -239,7 +239,7 @@ ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
function_name_.ToCString(),
Error::Cast(error_).ToErrorCString());
}
return error_.raw();
return error_.ptr();
}
function_ = cls_.LookupFunctionAllowPrivate(function_name_);
@ -265,7 +265,7 @@ ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
class_name_.ToCString(), function_name_.ToCString(),
Error::Cast(error_).ToErrorCString());
}
return error_.raw();
return error_.ptr();
}
}
}
@ -281,7 +281,7 @@ ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
}
if (!field_.IsNull() && field_.is_const() && field_.is_static() &&
(field_.StaticValue() == Object::sentinel().raw())) {
(field_.StaticValue() == Object::sentinel().ptr())) {
processed = true;
error_ = field_.InitializeStatic();
if (error_.IsError()) {
@ -292,7 +292,7 @@ ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
field_.ToCString(), uri_.ToCString(), class_name_.ToCString(),
function_name_.ToCString(), Error::Cast(error_).ToErrorCString());
}
return error_.raw();
return error_.ptr();
}
}
@ -306,7 +306,7 @@ ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
function_name_.ToCString(),
Error::Cast(error_).ToErrorCString());
}
return error_.raw();
return error_.ptr();
}
if (add_closure) {
function_ = function_.ImplicitClosureFunction();
@ -318,7 +318,7 @@ ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
uri_.ToCString(), class_name_.ToCString(),
function_name_.ToCString(), Error::Cast(error_).ToErrorCString());
}
return error_.raw();
return error_.ptr();
}
} else if (is_dyn) {
function_name_ = function_.name(); // With private mangling.
@ -334,7 +334,7 @@ ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
uri_.ToCString(), class_name_.ToCString(),
function_name_.ToCString(), Error::Cast(error_).ToErrorCString());
}
return error_.raw();
return error_.ptr();
}
}
}
@ -351,7 +351,7 @@ ObjectPtr CompilationTraceLoader::CompileTriple(const char* uri_cstr,
uri_.ToCString(), class_name_.ToCString(),
function_name_.ToCString(), Error::Cast(error_).ToErrorCString());
}
return error_.raw();
return error_.ptr();
}
}
@ -371,12 +371,12 @@ ObjectPtr CompilationTraceLoader::CompileFunction(const Function& function) {
error_ = Compiler::CompileFunction(thread_, function);
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
SpeculateInstanceCallTargets(function);
return error_.raw();
return error_.ptr();
}
// For instance calls, if the receiver's static type has one concrete
@ -536,7 +536,7 @@ void TypeFeedbackSaver::VisitFunction(const Function& function) {
call_sites_ = function.ic_data_array();
if (call_sites_.IsNull()) {
call_sites_ = Object::empty_array().raw(); // Remove edge case.
call_sites_ = Object::empty_array().ptr(); // Remove edge case.
}
// First element is edge counters.
@ -618,23 +618,23 @@ ObjectPtr TypeFeedbackLoader::LoadFeedback(ReadStream* stream) {
error_ = CheckHeader();
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
error_ = LoadClasses();
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
error_ = LoadFields();
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
while (stream_->PendingBytes() > 0) {
error_ = LoadFunction();
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
}
@ -645,7 +645,7 @@ ObjectPtr TypeFeedbackLoader::LoadFeedback(ReadStream* stream) {
(func_.usage_counter() >= FLAG_optimization_counter_threshold)) {
error_ = Compiler::CompileOptimizedFunction(thread_, func_);
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
}
}
@ -741,7 +741,7 @@ ObjectPtr TypeFeedbackLoader::LoadFields() {
if (!skip && (num_fields > 0)) {
error_ = cls_.EnsureIsFinalized(thread_);
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
fields_ = cls_.fields();
}
@ -809,14 +809,14 @@ ObjectPtr TypeFeedbackLoader::LoadFunction() {
if (!cls_.IsNull()) {
error_ = cls_.EnsureIsFinalized(thread_);
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
} else {
skip = true;
}
func_name_ = ReadString(); // Without private mangling.
FunctionLayout::Kind kind = static_cast<FunctionLayout::Kind>(ReadInt());
UntaggedFunction::Kind kind = static_cast<UntaggedFunction::Kind>(ReadInt());
const TokenPosition& token_pos = TokenPosition::Deserialize(ReadInt());
intptr_t usage = ReadInt();
intptr_t inlining_depth = ReadInt();
@ -836,11 +836,11 @@ ObjectPtr TypeFeedbackLoader::LoadFunction() {
if (!skip) {
error_ = Compiler::CompileFunction(thread_, func_);
if (error_.IsError()) {
return error_.raw();
return error_.ptr();
}
call_sites_ = func_.ic_data_array();
if (call_sites_.IsNull()) {
call_sites_ = Object::empty_array().raw(); // Remove edge case.
call_sites_ = Object::empty_array().ptr(); // Remove edge case.
}
if (call_sites_.Length() != num_call_sites + 1) {
skip = true;
@ -928,7 +928,7 @@ ObjectPtr TypeFeedbackLoader::LoadFunction() {
return Error::null();
}
FunctionPtr TypeFeedbackLoader::FindFunction(FunctionLayout::Kind kind,
FunctionPtr TypeFeedbackLoader::FindFunction(UntaggedFunction::Kind kind,
const TokenPosition& token_pos) {
if (cls_name_.Equals(Symbols::TopLevel())) {
func_ = lib_.LookupFunctionAllowPrivate(func_name_);
@ -938,7 +938,7 @@ FunctionPtr TypeFeedbackLoader::FindFunction(FunctionLayout::Kind kind,
if (!func_.IsNull()) {
// Found regular method.
} else if (kind == FunctionLayout::kMethodExtractor) {
} else if (kind == UntaggedFunction::kMethodExtractor) {
ASSERT(Field::IsGetterName(func_name_));
// Without private mangling:
String& name = String::Handle(zone_, Field::NameFromGetter(func_name_));
@ -950,7 +950,7 @@ FunctionPtr TypeFeedbackLoader::FindFunction(FunctionLayout::Kind kind,
} else {
func_ = Function::null();
}
} else if (kind == FunctionLayout::kDynamicInvocationForwarder) {
} else if (kind == UntaggedFunction::kDynamicInvocationForwarder) {
// Without private mangling:
String& name = String::Handle(
zone_, Function::DemangleDynamicInvocationForwarderName(func_name_));
@ -962,7 +962,7 @@ FunctionPtr TypeFeedbackLoader::FindFunction(FunctionLayout::Kind kind,
} else {
func_ = Function::null();
}
} else if (kind == FunctionLayout::kClosureFunction) {
} else if (kind == UntaggedFunction::kClosureFunction) {
// Note this lookup relies on parent functions appearing before child
// functions in the serialized feedback, so the parent will have already
// been unoptimized compilated and the child function created and added to
@ -976,7 +976,7 @@ FunctionPtr TypeFeedbackLoader::FindFunction(FunctionLayout::Kind kind,
}
if (!func_.IsNull()) {
if (kind == FunctionLayout::kImplicitClosureFunction) {
if (kind == UntaggedFunction::kImplicitClosureFunction) {
func_ = func_.ImplicitClosureFunction();
}
if (func_.is_abstract() || (func_.kind() != kind)) {
@ -984,7 +984,7 @@ FunctionPtr TypeFeedbackLoader::FindFunction(FunctionLayout::Kind kind,
}
}
return func_.raw();
return func_.ptr();
}
ClassPtr TypeFeedbackLoader::ReadClassByName() {
@ -1010,7 +1010,7 @@ ClassPtr TypeFeedbackLoader::ReadClassByName() {
}
}
}
return cls_.raw();
return cls_.ptr();
}
StringPtr TypeFeedbackLoader::ReadString() {

View file

@ -102,7 +102,7 @@ class TypeFeedbackLoader : public ValueObject {
ObjectPtr LoadClasses();
ObjectPtr LoadFields();
ObjectPtr LoadFunction();
FunctionPtr FindFunction(FunctionLayout::Kind kind,
FunctionPtr FindFunction(UntaggedFunction::Kind kind,
const TokenPosition& token_pos);
ClassPtr ReadClassByName();

View file

@ -56,7 +56,7 @@ static void GetUniqueDynamicTarget(IsolateGroup* isolate_group,
isolate_group->object_store()->unique_dynamic_targets());
ASSERT(fname.IsSymbol());
*function = functions_map.GetOrNull(fname);
ASSERT(functions_map.Release().raw() ==
ASSERT(functions_map.Release().ptr() ==
isolate_group->object_store()->unique_dynamic_targets());
}
@ -155,7 +155,7 @@ bool AotCallSpecializer::RecognizeRuntimeTypeGetter(InstanceCallInstr* call) {
return false;
}
if (call->function_name().raw() != Symbols::GetRuntimeType().raw()) {
if (call->function_name().ptr() != Symbols::GetRuntimeType().ptr()) {
return false;
}
@ -165,7 +165,7 @@ bool AotCallSpecializer::RecognizeRuntimeTypeGetter(InstanceCallInstr* call) {
const Function& function =
Function::Handle(Z, call->ResolveForReceiverClass(cls));
ASSERT(!function.IsNull());
const Function& target = Function::ZoneHandle(Z, function.raw());
const Function& target = Function::ZoneHandle(Z, function.ptr());
StaticCallInstr* static_call =
StaticCallInstr::FromCall(Z, call, target, call->CallCount());
// Since the result is either a Type or a FunctionType, we cannot pin it.
@ -812,7 +812,7 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
if (has_one_target) {
const Function& target = targets.FirstTarget();
FunctionLayout::Kind function_kind = target.kind();
UntaggedFunction::Kind function_kind = target.kind();
if (flow_graph()->CheckForInstanceCall(instr, function_kind) ==
FlowGraph::ToCheck::kNoCheck) {
StaticCallInstr* call = StaticCallInstr::FromCall(
@ -877,7 +877,7 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
const Function& function =
Function::Handle(Z, instr->ResolveForReceiverClass(receiver_class));
if (!function.IsNull()) {
const Function& target = Function::ZoneHandle(Z, function.raw());
const Function& target = Function::ZoneHandle(Z, function.ptr());
StaticCallInstr* call =
StaticCallInstr::FromCall(Z, instr, target, instr->CallCount());
instr->ReplaceWith(call, current_iterator());
@ -967,9 +967,9 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
// First we are trying to compute a single target for all subclasses.
if (single_target.IsNull()) {
ASSERT(i == 0);
single_target = target.raw();
single_target = target.ptr();
continue;
} else if (single_target.raw() == target.raw()) {
} else if (single_target.ptr() == target.ptr()) {
continue;
}
@ -992,12 +992,12 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
single_target = Function::null();
}
ASSERT(ic_data.raw() != ICData::null());
ASSERT(single_target.raw() == Function::null());
ASSERT(ic_data.ptr() != ICData::null());
ASSERT(single_target.ptr() == Function::null());
ic_data.AddReceiverCheck(cid, target);
}
if (single_target.raw() != Function::null()) {
if (single_target.ptr() != Function::null()) {
// If this is a getter or setter invocation try inlining it right away
// instead of replacing it with a static call.
if ((op_kind == Token::kGET) || (op_kind == Token::kSET)) {
@ -1017,12 +1017,12 @@ void AotCallSpecializer::VisitInstanceCall(InstanceCallInstr* instr) {
// We have computed that there is only a single target for this call
// within the whole hierarchy. Replace InstanceCall with StaticCall.
const Function& target = Function::ZoneHandle(Z, single_target.raw());
const Function& target = Function::ZoneHandle(Z, single_target.ptr());
StaticCallInstr* call =
StaticCallInstr::FromCall(Z, instr, target, instr->CallCount());
instr->ReplaceWith(call, current_iterator());
return;
} else if ((ic_data.raw() != ICData::null()) &&
} else if ((ic_data.ptr() != ICData::null()) &&
!ic_data.NumberOfChecksIs(0)) {
const CallTargets* targets = CallTargets::Create(Z, ic_data);
ASSERT(!targets->is_empty());
@ -1071,7 +1071,7 @@ bool AotCallSpecializer::TryExpandCallThroughGetter(const Class& receiver_class,
// Ignore callsites like f.call() for now. Those need to be handled
// specially if f is a closure.
if (call->function_name().raw() == Symbols::Call().raw()) {
if (call->function_name().ptr() == Symbols::Call().ptr()) {
return false;
}
@ -1089,7 +1089,7 @@ bool AotCallSpecializer::TryExpandCallThroughGetter(const Class& receiver_class,
ArgumentsDescriptor args_desc(args_desc_array);
target = Resolver::ResolveDynamicForReceiverClass(
receiver_class, getter_name, args_desc, /*allow_add=*/false);
if (target.raw() == Function::null() || target.IsMethodExtractor()) {
if (target.ptr() == Function::null() || target.IsMethodExtractor()) {
return false;
}

View file

@ -444,7 +444,7 @@ void DispatchTableGenerator::NumberSelectors() {
for (classid_t cid = kIllegalCid + 1; cid < num_classes_; cid++) {
obj = classes_->At(cid);
if (obj.IsClass()) {
klass = Class::RawCast(obj.raw());
klass = Class::RawCast(obj.ptr());
functions = klass.current_functions();
if (!functions.IsNull()) {
for (intptr_t j = 0; j < functions.Length(); j++) {
@ -489,7 +489,7 @@ void DispatchTableGenerator::SetupSelectorRows() {
if (cid > kIllegalCid) {
obj = classes_->At(cid);
if (obj.IsClass()) {
klass = Class::RawCast(obj.raw());
klass = Class::RawCast(obj.ptr());
concrete = !klass.is_abstract();
klass = klass.SuperClass();
if (!klass.IsNull()) {
@ -558,7 +558,7 @@ void DispatchTableGenerator::SetupSelectorRows() {
for (classid_t cid = kIllegalCid + 1; cid < num_classes_; cid++) {
obj = classes_->At(cid);
if (obj.IsClass()) {
klass = Class::RawCast(obj.raw());
klass = Class::RawCast(obj.ptr());
GrowableArray<Interval>& subclasss_cid_ranges = cid_subclass_ranges[cid];
functions = klass.current_functions();
@ -572,7 +572,7 @@ void DispatchTableGenerator::SetupSelectorRows() {
if (sid != SelectorMap::kInvalidSelectorId) {
auto MakeIntervals = [&](const Function& function, int32_t sid) {
// A function handle that survives until the table is built.
auto& function_handle = Function::ZoneHandle(Z, function.raw());
auto& function_handle = Function::ZoneHandle(Z, function.ptr());
for (intptr_t i = 0; i < subclasss_cid_ranges.length(); i++) {
Interval& subclass_cid_range = subclasss_cid_ranges[i];
@ -667,7 +667,7 @@ ArrayPtr DispatchTableGenerator::BuildCodeArray() {
table_rows_[i]->FillTable(classes_, entries);
}
entries.MakeImmutable();
return entries.raw();
return entries.ptr();
}
} // namespace compiler

View file

@ -590,7 +590,7 @@ void Precompiler::CollectCallbackFields() {
if (!IsSent(field_name)) continue;
// Create arguments descriptor with fixed parameters from
// signature of field_type.
signature ^= field_type.raw();
signature ^= field_type.ptr();
if (signature.IsGeneric()) continue;
if (signature.HasOptionalParameters()) continue;
if (FLAG_trace_precompiler) {
@ -610,7 +610,8 @@ void Precompiler::CollectCallbackFields() {
if (subcls.is_allocated()) {
// Add dispatcher to cls.
dispatcher = subcls.GetInvocationDispatcher(
field_name, args_desc, FunctionLayout::kInvokeFieldDispatcher,
field_name, args_desc,
UntaggedFunction::kInvokeFieldDispatcher,
/* create_if_absent = */ true);
if (FLAG_trace_precompiler) {
THR_Print("Added invoke-field-dispatcher for %s to %s\n",
@ -631,7 +632,7 @@ void Precompiler::ProcessFunction(const Function& function) {
: 0;
RELEASE_ASSERT(!function.HasCode());
// Ffi trampoline functions have no signature.
ASSERT(function.kind() == FunctionLayout::kFfiTrampoline ||
ASSERT(function.kind() == UntaggedFunction::kFfiTrampoline ||
FunctionType::Handle(Z, function.signature()).IsFinalized());
TracingScope tracing_scope(this);
@ -719,8 +720,8 @@ void Precompiler::AddCalleesOf(const Function& function, intptr_t gop_offset) {
}
static bool IsPotentialClosureCall(const String& selector) {
return selector.raw() == Symbols::Call().raw() ||
selector.raw() == Symbols::DynamicCall().raw();
return selector.ptr() == Symbols::Call().ptr() ||
selector.ptr() == Symbols::DynamicCall().ptr();
}
void Precompiler::AddCalleesOfHelper(const Object& entry,
@ -770,7 +771,7 @@ void Precompiler::AddCalleesOfHelper(const Object& entry,
void Precompiler::AddTypesOf(const Class& cls) {
if (cls.IsNull()) return;
if (classes_to_retain_.HasKey(&cls)) return;
classes_to_retain_.Insert(&Class::ZoneHandle(Z, cls.raw()));
classes_to_retain_.Insert(&Class::ZoneHandle(Z, cls.ptr()));
Array& interfaces = Array::Handle(Z, cls.interfaces());
AbstractType& type = AbstractType::Handle(Z);
@ -833,7 +834,7 @@ void Precompiler::AddType(const AbstractType& abstype) {
if (abstype.IsTypeParameter()) {
const auto& param = TypeParameter::Cast(abstype);
if (typeparams_to_retain_.HasKey(&param)) return;
typeparams_to_retain_.Insert(&TypeParameter::ZoneHandle(Z, param.raw()));
typeparams_to_retain_.Insert(&TypeParameter::ZoneHandle(Z, param.ptr()));
auto& type = AbstractType::Handle(Z, param.bound());
AddType(type);
@ -845,7 +846,7 @@ void Precompiler::AddType(const AbstractType& abstype) {
if (abstype.IsFunctionType()) {
if (functiontypes_to_retain_.HasKey(&FunctionType::Cast(abstype))) return;
const FunctionType& signature =
FunctionType::ZoneHandle(Z, FunctionType::Cast(abstype).raw());
FunctionType::ZoneHandle(Z, FunctionType::Cast(abstype).ptr());
functiontypes_to_retain_.Insert(&signature);
AddTypeArguments(TypeArguments::Handle(Z, signature.type_parameters()));
@ -861,7 +862,7 @@ void Precompiler::AddType(const AbstractType& abstype) {
}
if (types_to_retain_.HasKey(&abstype)) return;
types_to_retain_.Insert(&AbstractType::ZoneHandle(Z, abstype.raw()));
types_to_retain_.Insert(&AbstractType::ZoneHandle(Z, abstype.ptr()));
if (abstype.IsType()) {
const Type& type = Type::Cast(abstype);
@ -880,7 +881,7 @@ void Precompiler::AddTypeArguments(const TypeArguments& args) {
if (args.IsNull()) return;
if (typeargs_to_retain_.HasKey(&args)) return;
typeargs_to_retain_.Insert(&TypeArguments::ZoneHandle(Z, args.raw()));
typeargs_to_retain_.Insert(&TypeArguments::ZoneHandle(Z, args.ptr()));
AbstractType& arg = AbstractType::Handle(Z);
for (intptr_t i = 0; i < args.Length(); i++) {
@ -899,8 +900,8 @@ void Precompiler::AddConstObject(const class Instance& instance) {
return;
}
if (instance.raw() == Object::sentinel().raw() ||
instance.raw() == Object::transition_sentinel().raw()) {
if (instance.ptr() == Object::sentinel().ptr() ||
instance.ptr() == Object::transition_sentinel().ptr()) {
return;
}
@ -928,7 +929,7 @@ void Precompiler::AddConstObject(const class Instance& instance) {
const Library& target = Library::Handle(Z, prefix.GetLibrary(0));
cls = target.toplevel_class();
if (!classes_to_retain_.HasKey(&cls)) {
classes_to_retain_.Insert(&Class::ZoneHandle(Z, cls.raw()));
classes_to_retain_.Insert(&Class::ZoneHandle(Z, cls.ptr()));
}
return;
}
@ -943,7 +944,7 @@ void Precompiler::AddConstObject(const class Instance& instance) {
// Constants are canonicalized and we avoid repeated processing of them.
if (consts_to_retain_.HasKey(&instance)) return;
consts_to_retain_.Insert(&Instance::ZoneHandle(Z, instance.raw()));
consts_to_retain_.Insert(&Instance::ZoneHandle(Z, instance.ptr()));
if (cls.NumTypeArguments() > 0) {
AddTypeArguments(TypeArguments::Handle(Z, instance.GetTypeArguments()));
@ -972,7 +973,7 @@ void Precompiler::AddConstObject(const class Instance& instance) {
};
ConstObjectVisitor visitor(this, IG);
instance.raw()->ptr()->VisitPointers(&visitor);
instance.ptr()->untag()->VisitPointers(&visitor);
}
void Precompiler::AddClosureCall(const String& call_selector,
@ -982,7 +983,7 @@ void Precompiler::AddClosureCall(const String& call_selector,
const Function& dispatcher =
Function::Handle(Z, cache_class.GetInvocationDispatcher(
call_selector, arguments_descriptor,
FunctionLayout::kInvokeFieldDispatcher,
UntaggedFunction::kInvokeFieldDispatcher,
true /* create_if_absent */));
AddFunction(dispatcher);
}
@ -994,15 +995,15 @@ void Precompiler::AddField(const Field& field) {
if (fields_to_retain_.HasKey(&field)) return;
fields_to_retain_.Insert(&Field::ZoneHandle(Z, field.raw()));
fields_to_retain_.Insert(&Field::ZoneHandle(Z, field.ptr()));
if (field.is_static()) {
const Object& value = Object::Handle(Z, field.StaticValue());
// Should not be in the middle of initialization while precompiling.
ASSERT(value.raw() != Object::transition_sentinel().raw());
ASSERT(value.ptr() != Object::transition_sentinel().ptr());
if (value.raw() != Object::sentinel().raw() &&
value.raw() != Object::null()) {
if (value.ptr() != Object::sentinel().ptr() &&
value.ptr() != Object::null()) {
ASSERT(value.IsInstance());
AddConstObject(Instance::Cast(value));
}
@ -1030,15 +1031,15 @@ bool Precompiler::MustRetainFunction(const Function& function) {
// Resolver::ResolveDynamic uses.
const auto& selector = String::Handle(Z, function.name());
if (selector.raw() == Symbols::toString().raw()) return true;
if (selector.raw() == Symbols::AssignIndexToken().raw()) return true;
if (selector.raw() == Symbols::IndexToken().raw()) return true;
if (selector.raw() == Symbols::hashCode().raw()) return true;
if (selector.raw() == Symbols::NoSuchMethod().raw()) return true;
if (selector.raw() == Symbols::EqualOperator().raw()) return true;
if (selector.ptr() == Symbols::toString().ptr()) return true;
if (selector.ptr() == Symbols::AssignIndexToken().ptr()) return true;
if (selector.ptr() == Symbols::IndexToken().ptr()) return true;
if (selector.ptr() == Symbols::hashCode().ptr()) return true;
if (selector.ptr() == Symbols::NoSuchMethod().ptr()) return true;
if (selector.ptr() == Symbols::EqualOperator().ptr()) return true;
// Use the same check for _Closure.call as in stack_trace.{h|cc}.
if (selector.raw() == Symbols::Call().raw()) {
if (selector.ptr() == Symbols::Call().ptr()) {
const auto& name = String::Handle(Z, function.QualifiedScrubbedName());
if (name.Equals(Symbols::_ClosureCall())) return true;
}
@ -1084,7 +1085,7 @@ void Precompiler::AddSelector(const String& selector) {
ASSERT(!selector.IsNull());
if (!IsSent(selector)) {
sent_selectors_.Insert(&String::ZoneHandle(Z, selector.raw()));
sent_selectors_.Insert(&String::ZoneHandle(Z, selector.ptr()));
selector_count_++;
changed_ = true;
@ -1224,7 +1225,7 @@ void Precompiler::AddAnnotatedRoots() {
if ((type == EntryPointPragma::kAlways ||
type == EntryPointPragma::kGetterOnly) &&
function.kind() != FunctionLayout::kConstructor &&
function.kind() != UntaggedFunction::kConstructor &&
!function.IsSetterFunction()) {
function2 = function.ImplicitClosureFunction();
AddFunction(function2);
@ -1234,29 +1235,29 @@ void Precompiler::AddAnnotatedRoots() {
AddInstantiatedClass(cls);
}
}
if (function.kind() == FunctionLayout::kImplicitGetter &&
if (function.kind() == UntaggedFunction::kImplicitGetter &&
!implicit_getters.IsNull()) {
for (intptr_t i = 0; i < implicit_getters.Length(); ++i) {
field ^= implicit_getters.At(i);
if (function.accessor_field() == field.raw()) {
if (function.accessor_field() == field.ptr()) {
AddFunction(function);
}
}
}
if (function.kind() == FunctionLayout::kImplicitSetter &&
if (function.kind() == UntaggedFunction::kImplicitSetter &&
!implicit_setters.IsNull()) {
for (intptr_t i = 0; i < implicit_setters.Length(); ++i) {
field ^= implicit_setters.At(i);
if (function.accessor_field() == field.raw()) {
if (function.accessor_field() == field.ptr()) {
AddFunction(function);
}
}
}
if (function.kind() == FunctionLayout::kImplicitStaticGetter &&
if (function.kind() == UntaggedFunction::kImplicitStaticGetter &&
!implicit_static_getters.IsNull()) {
for (intptr_t i = 0; i < implicit_static_getters.Length(); ++i) {
field ^= implicit_static_getters.At(i);
if (function.accessor_field() == field.raw()) {
if (function.accessor_field() == field.ptr()) {
AddFunction(function);
}
}
@ -1312,7 +1313,7 @@ void Precompiler::CheckForNewDynamicFunctions() {
// Handle the implicit call type conversions.
if (Field::IsGetterName(selector) &&
(function.kind() != FunctionLayout::kMethodExtractor)) {
(function.kind() != UntaggedFunction::kMethodExtractor)) {
// Call-through-getter.
// Function is get:foo and somewhere foo (or dyn:foo) is called.
// Note that we need to skip method extractors (which were potentially
@ -1330,7 +1331,7 @@ void Precompiler::CheckForNewDynamicFunctions() {
function2 = function.GetDynamicInvocationForwarder(selector2);
AddFunction(function2);
}
} else if (function.kind() == FunctionLayout::kRegularFunction) {
} else if (function.kind() == UntaggedFunction::kRegularFunction) {
selector2 = Field::LookupGetterSymbol(selector);
selector3 = String::null();
if (!selector2.IsNull()) {
@ -1355,18 +1356,18 @@ void Precompiler::CheckForNewDynamicFunctions() {
}
const bool is_getter =
function.kind() == FunctionLayout::kImplicitGetter ||
function.kind() == FunctionLayout::kGetterFunction;
function.kind() == UntaggedFunction::kImplicitGetter ||
function.kind() == UntaggedFunction::kGetterFunction;
const bool is_setter =
function.kind() == FunctionLayout::kImplicitSetter ||
function.kind() == FunctionLayout::kSetterFunction;
function.kind() == UntaggedFunction::kImplicitSetter ||
function.kind() == UntaggedFunction::kSetterFunction;
const bool is_regular =
function.kind() == FunctionLayout::kRegularFunction;
function.kind() == UntaggedFunction::kRegularFunction;
if (is_getter || is_setter || is_regular) {
selector2 = Function::CreateDynamicInvocationForwarderName(selector);
if (IsSent(selector2)) {
if (function.kind() == FunctionLayout::kImplicitGetter ||
function.kind() == FunctionLayout::kImplicitSetter) {
if (function.kind() == UntaggedFunction::kImplicitGetter ||
function.kind() == UntaggedFunction::kImplicitSetter) {
field = function.accessor_field();
metadata = kernel::ProcedureAttributesOf(field, Z);
} else if (!found_metadata) {
@ -1401,7 +1402,7 @@ class NameFunctionsTraits {
String::Cast(a).Equals(String::Cast(b));
}
static uword Hash(const Object& obj) { return String::Cast(obj).Hash(); }
static ObjectPtr NewKey(const String& str) { return str.raw(); }
static ObjectPtr NewKey(const String& str) { return str.ptr(); }
};
typedef UnorderedHashMap<NameFunctionsTraits> Table;
@ -1425,7 +1426,7 @@ static void AddNamesToFunctionsTable(Zone* zone,
Function* dyn_function) {
AddNameToFunctionsTable(zone, table, fname, function);
*dyn_function = function.raw();
*dyn_function = function.ptr();
if (kernel::NeedsDynamicInvocationForwarder(function)) {
*mangled_name = function.name();
*mangled_name =
@ -1513,12 +1514,12 @@ void Precompiler::CollectDynamicFunctionNames() {
// create lazily.
// => We disable unique target optimization if the target belongs to the
// lazily created functions.
key_demangled = key.raw();
key_demangled = key.ptr();
if (Function::IsDynamicInvocationForwarderName(key)) {
key_demangled = Function::DemangleDynamicInvocationForwarderName(key);
}
if (function.name() != key.raw() &&
function.name() != key_demangled.raw()) {
if (function.name() != key.ptr() &&
function.name() != key_demangled.ptr()) {
continue;
}
functions_map.UpdateOrInsert(key, function);
@ -1698,7 +1699,7 @@ void Precompiler::ReplaceFunctionStaticCallEntries() {
const uword pc = pc_offset + code.PayloadStart();
CodePatcher::PatchStaticCallAt(pc, code, target_code_);
if (append_to_pool) {
builder.AddObject(Object::ZoneHandle(target_code_.raw()));
builder.AddObject(Object::ZoneHandle(target_code_.ptr()));
}
}
if (FLAG_trace_precompiler) {
@ -1805,7 +1806,7 @@ void Precompiler::DropFunctions() {
retained_functions.Add(Object::null_object());
functions = Array::MakeFixedLength(retained_functions);
} else {
functions = Object::empty_array().raw();
functions = Object::empty_array().ptr();
}
cls.set_invocation_dispatcher_cache(functions);
}
@ -1969,7 +1970,7 @@ void Precompiler::DropTypes() {
const intptr_t dict_size =
Utils::RoundUpToPowerOfTwo(retained_types.Length() * 4 / 3);
types_array = HashTables::New<CanonicalTypeSet>(dict_size, Heap::kOld);
CanonicalTypeSet types_table(Z, types_array.raw());
CanonicalTypeSet types_table(Z, types_array.ptr());
bool present;
for (intptr_t i = 0; i < retained_types.Length(); i++) {
type ^= retained_types.At(i);
@ -2008,7 +2009,7 @@ void Precompiler::DropFunctionTypes() {
Utils::RoundUpToPowerOfTwo(retained_types.Length() * 4 / 3);
types_array =
HashTables::New<CanonicalFunctionTypeSet>(dict_size, Heap::kOld);
CanonicalFunctionTypeSet types_table(Z, types_array.raw());
CanonicalFunctionTypeSet types_table(Z, types_array.ptr());
bool present;
for (intptr_t i = 0; i < retained_types.Length(); i++) {
type ^= retained_types.At(i);
@ -2050,7 +2051,7 @@ void Precompiler::DropTypeParameters() {
Utils::RoundUpToPowerOfTwo(retained_typeparams.Length() * 4 / 3);
typeparams_array =
HashTables::New<CanonicalTypeParameterSet>(dict_size, Heap::kOld);
CanonicalTypeParameterSet typeparams_table(Z, typeparams_array.raw());
CanonicalTypeParameterSet typeparams_table(Z, typeparams_array.ptr());
bool present;
for (intptr_t i = 0; i < retained_typeparams.Length(); i++) {
typeparam ^= retained_typeparams.At(i);
@ -2089,7 +2090,7 @@ void Precompiler::DropTypeArguments() {
Utils::RoundUpToPowerOfTwo(retained_typeargs.Length() * 4 / 3);
typeargs_array =
HashTables::New<CanonicalTypeArgumentsSet>(dict_size, Heap::kOld);
CanonicalTypeArgumentsSet typeargs_table(Z, typeargs_array.raw());
CanonicalTypeArgumentsSet typeargs_table(Z, typeargs_array.ptr());
bool present;
for (intptr_t i = 0; i < retained_typeargs.Length(); i++) {
typeargs ^= retained_typeargs.At(i);
@ -2259,7 +2260,7 @@ void Precompiler::DropLibraryEntries() {
lib.RehashDictionary(dict, used * 4 / 3 + 1);
if (!(retain_root_library_caches_ &&
(lib.raw() == IG->object_store()->root_library()))) {
(lib.ptr() == IG->object_store()->root_library()))) {
lib.DropDependenciesAndCaches();
}
}
@ -2335,7 +2336,7 @@ void Precompiler::DropLibraries() {
} else if (lib.is_dart_scheme()) {
// The core libraries are referenced from the object store.
retain = true;
} else if (lib.raw() == root_lib.raw()) {
} else if (lib.ptr() == root_lib.ptr()) {
// The root library might have no surviving members if it only exports
// main from another library. It will still be referenced from the object
// store, so retain it.
@ -2367,7 +2368,7 @@ void Precompiler::DropLibraries() {
}
Library::RegisterLibraries(T, retained_libraries);
libraries_ = retained_libraries.raw();
libraries_ = retained_libraries.ptr();
}
// Traits for the HashTable template.
@ -2375,7 +2376,7 @@ struct CodeKeyTraits {
static uint32_t Hash(const Object& key) { return Code::Cast(key).Size(); }
static const char* Name() { return "CodeKeyTraits"; }
static bool IsMatch(const Object& x, const Object& y) {
return x.raw() == y.raw();
return x.ptr() == y.ptr();
}
static bool ReportStats() { return false; }
};
@ -2409,7 +2410,7 @@ FunctionPtr Precompiler::FindUnvisitedRetainedFunction() {
function ^= functions_to_retain_.GetKey(it.Current());
if (!function.HasCode()) continue;
code = function.CurrentCode();
if (!visited.ContainsKey(code)) return function.raw();
if (!visited.ContainsKey(code)) return function.ptr();
}
return Function::null();
}
@ -2739,13 +2740,13 @@ bool PrecompileParsedFunctionHelper::Compile(CompilationPipeline* pipeline) {
// We bailed out or we encountered an error.
const Error& error = Error::Handle(thread()->StealStickyError());
if (error.raw() == Object::branch_offset_error().raw()) {
if (error.ptr() == Object::branch_offset_error().ptr()) {
// Compilation failed due to an out of range branch offset in the
// assembler. We try again (done = false) with far branches enabled.
done = false;
ASSERT(!use_far_branches);
use_far_branches = true;
} else if (error.raw() == Object::speculative_inlining_error().raw()) {
} else if (error.ptr() == Object::speculative_inlining_error().ptr()) {
// The return value of setjmp is the deopt id of the check instruction
// that caused the bailout.
done = false;
@ -2801,7 +2802,7 @@ static ErrorPtr PrecompileFunctionHelper(Precompiler* precompiler,
per_compile_timer.Start();
ParsedFunction* parsed_function = new (zone)
ParsedFunction(thread, Function::ZoneHandle(zone, function.raw()));
ParsedFunction(thread, Function::ZoneHandle(zone, function.ptr()));
if (trace_compiler) {
THR_Print("Precompiling %sfunction: '%s' @ token %" Pd ", size %" Pd "\n",
(optimized ? "optimized " : ""),
@ -2821,7 +2822,7 @@ static ErrorPtr PrecompileFunctionHelper(Precompiler* precompiler,
const Error& error = Error::Handle(thread->StealStickyError());
ASSERT(error.IsLanguageError() &&
LanguageError::Cast(error).kind() != Report::kBailout);
return error.raw();
return error.ptr();
}
per_compile_timer.Stop();
@ -2851,7 +2852,7 @@ static ErrorPtr PrecompileFunctionHelper(Precompiler* precompiler,
// Precompilation may encounter compile-time errors.
// Do not attempt to optimize functions that can cause errors.
function.set_is_optimizable(false);
return error.raw();
return error.ptr();
}
UNREACHABLE();
return Error::null();
@ -3012,7 +3013,7 @@ StringPtr Obfuscator::ObfuscationState::RenameImpl(const String& name,
renamed_ = BuildRename(name, atomic);
renames_.UpdateOrInsert(name, renamed_);
}
return renamed_.raw();
return renamed_.ptr();
}
static const char* const kGetterPrefix = "get:";
@ -3088,8 +3089,8 @@ StringPtr Obfuscator::ObfuscationState::NewAtomicRename(
should_be_private ? "_" : "", name_);
// Must check if our generated name clashes with something that will
// have an identity renaming.
} while (renames_.GetOrNull(renamed_) == renamed_.raw());
return renamed_.raw();
} while (renames_.GetOrNull(renamed_) == renamed_.ptr());
return renamed_.ptr();
}
StringPtr Obfuscator::ObfuscationState::BuildRename(const String& name,
@ -3143,7 +3144,7 @@ StringPtr Obfuscator::ObfuscationState::BuildRename(const String& name,
} else if (is_setter) {
return Symbols::FromSet(thread_, string_);
}
return string_.raw();
return string_.ptr();
} else {
return NewAtomicRename(is_private);
}
@ -3161,19 +3162,19 @@ void Obfuscator::Deobfuscate(Thread* thread,
const Array& renames = Array::Handle(
thread->zone(), GetRenamesFromSavedState(obfuscation_state));
ObfuscationMap renames_map(renames.raw());
ObfuscationMap renames_map(renames.ptr());
String& piece = String::Handle();
for (intptr_t i = 0; i < pieces.Length(); i++) {
piece ^= pieces.At(i);
ASSERT(piece.IsSymbol());
// Fast path: skip '.'
if (piece.raw() == Symbols::Dot().raw()) {
if (piece.ptr() == Symbols::Dot().ptr()) {
continue;
}
// Fast path: check if piece has an identity obfuscation.
if (renames_map.GetOrNull(piece) == piece.raw()) {
if (renames_map.GetOrNull(piece) == piece.ptr()) {
continue;
}
@ -3183,7 +3184,7 @@ void Obfuscator::Deobfuscate(Thread* thread,
ObfuscationMap::Iterator it(&renames_map);
while (it.MoveNext()) {
const intptr_t entry = it.Current();
if (renames_map.GetPayload(entry, 0) == piece.raw()) {
if (renames_map.GetPayload(entry, 0) == piece.ptr()) {
piece ^= renames_map.GetKey(entry);
pieces.SetAt(i, piece);
break;
@ -3211,7 +3212,7 @@ const char** Obfuscator::SerializeMap(Thread* thread) {
const Array& renames = Array::Handle(
thread->zone(), GetRenamesFromSavedState(obfuscation_state));
ObfuscationMap renames_map(renames.raw());
ObfuscationMap renames_map(renames.ptr());
const char** result = new const char*[renames_map.NumOccupied() * 2 + 1];
intptr_t idx = 0;

View file

@ -62,7 +62,7 @@ class SymbolKeyValueTrait {
static inline intptr_t Hashcode(Key key) { return key->Hash(); }
static inline bool IsKeyEqual(Pair pair, Key key) {
return pair->raw() == key->raw();
return pair->ptr() == key->ptr();
}
};
@ -73,7 +73,7 @@ struct FunctionKeyTraits {
static uint32_t Hash(const Object& key) { return Function::Cast(key).Hash(); }
static const char* Name() { return "FunctionKeyTraits"; }
static bool IsMatch(const Object& x, const Object& y) {
return x.raw() == y.raw();
return x.ptr() == y.ptr();
}
static bool ReportStats() { return false; }
};
@ -100,7 +100,7 @@ class FieldKeyValueTrait {
}
static inline bool IsKeyEqual(Pair pair, Key key) {
return pair->raw() == key->raw();
return pair->ptr() == key->ptr();
}
};
@ -120,7 +120,7 @@ class ClassKeyValueTrait {
static inline intptr_t Hashcode(Key key) { return key->token_pos().Hash(); }
static inline bool IsKeyEqual(Pair pair, Key key) {
return pair->raw() == key->raw();
return pair->ptr() == key->ptr();
}
};
@ -140,7 +140,7 @@ class AbstractTypeKeyValueTrait {
static inline intptr_t Hashcode(Key key) { return key->Hash(); }
static inline bool IsKeyEqual(Pair pair, Key key) {
return pair->raw() == key->raw();
return pair->ptr() == key->ptr();
}
};
@ -160,7 +160,7 @@ class FunctionTypeKeyValueTrait {
static inline intptr_t Hashcode(Key key) { return key->Hash(); }
static inline bool IsKeyEqual(Pair pair, Key key) {
return pair->raw() == key->raw();
return pair->ptr() == key->ptr();
}
};
@ -180,7 +180,7 @@ class TypeParameterKeyValueTrait {
static inline intptr_t Hashcode(Key key) { return key->Hash(); }
static inline bool IsKeyEqual(Pair pair, Key key) {
return pair->raw() == key->raw();
return pair->ptr() == key->ptr();
}
};
@ -200,7 +200,7 @@ class TypeArgumentsKeyValueTrait {
static inline intptr_t Hashcode(Key key) { return key->Hash(); }
static inline bool IsKeyEqual(Pair pair, Key key) {
return pair->raw() == key->raw();
return pair->ptr() == key->ptr();
}
};
@ -220,7 +220,7 @@ class InstanceKeyValueTrait {
static inline intptr_t Hashcode(Key key) { return key->GetClassId(); }
static inline bool IsKeyEqual(Pair pair, Key key) {
return pair->raw() == key->raw();
return pair->ptr() == key->ptr();
}
};
@ -405,7 +405,7 @@ class FunctionsTraits {
static bool ReportStats() { return false; }
static bool IsMatch(const Object& a, const Object& b) {
return String::Cast(a).raw() == String::Cast(b).raw();
return String::Cast(a).ptr() == String::Cast(b).ptr();
}
static uword Hash(const Object& obj) { return String::Cast(obj).Hash(); }
};
@ -421,7 +421,7 @@ class ObfuscationMapTraits {
// Only for non-descriptor lookup and table expansion.
static bool IsMatch(const Object& a, const Object& b) {
return a.raw() == b.raw();
return a.ptr() == b.ptr();
}
static uword Hash(const Object& key) { return String::Cast(key).Hash(); }
@ -467,7 +467,7 @@ class Obfuscator : public ValueObject {
// input and it always preserves leading '_' even for atomic renames.
StringPtr Rename(const String& name, bool atomic = false) {
if (state_ == NULL) {
return name.raw();
return name.ptr();
}
return state_->RenameImpl(name, atomic);
@ -502,13 +502,13 @@ class Obfuscator : public ValueObject {
static ArrayPtr GetRenamesFromSavedState(const Array& saved_state) {
Array& renames = Array::Handle();
renames ^= saved_state.At(kSavedStateRenamesIndex);
return renames.raw();
return renames.ptr();
}
static StringPtr GetNameFromSavedState(const Array& saved_state) {
String& name = String::Handle();
name ^= saved_state.At(kSavedStateNameIndex);
return name.raw();
return name.ptr();
}
class ObfuscationState : public ZoneAllocated {
@ -605,7 +605,7 @@ class Obfuscator {
~Obfuscator() {}
StringPtr Rename(const String& name, bool atomic = false) {
return name.raw();
return name.ptr();
}
void PreventRenaming(const String& name) {}

View file

@ -153,7 +153,7 @@ intptr_t PrecompilerTracer::InternEntity(const Object& obj) {
} else if (obj.IsField()) {
cls_ = Field::Cast(obj).Owner();
}
if (cls_.raw() != Class::null()) {
if (cls_.ptr() != Class::null()) {
InternEntity(cls_);
}
}

View file

@ -92,7 +92,7 @@ class PrecompilerTracer : public ZoneAllocated {
static const char* Name() { return "EntityTableTraits"; }
static bool IsMatch(const Object& a, const Object& b) {
return a.raw() == b.raw();
return a.ptr() == b.ptr();
}
static uword Hash(const Object& obj) {

View file

@ -1973,10 +1973,10 @@ static void TryAllocateString(Assembler* assembler,
// R1: new object end address.
// R2: allocation size.
{
const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos -
const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag);
__ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
__ mov(R3, Operand(R2, LSL, shift), LS);
__ mov(R3, Operand(0), HI);

View file

@ -1776,7 +1776,7 @@ void AsmIntrinsifier::Object_setHash(Assembler* assembler,
// R0: Untagged address of header word (ldxr/stxr do not support offsets).
__ sub(R0, R0, Operand(kHeapObjectTag));
__ SmiUntag(R1);
__ LslImmediate(R1, R1, target::ObjectLayout::kHashTagPos);
__ LslImmediate(R1, R1, target::UntaggedObject::kHashTagPos);
Label retry;
__ Bind(&retry);
__ ldxr(R2, R0, kEightBytes);
@ -2008,7 +2008,7 @@ void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
// R1: Untagged address of header word (ldxr/stxr do not support offsets).
__ sub(R1, R1, Operand(kHeapObjectTag));
__ LslImmediate(R0, R0, target::ObjectLayout::kHashTagPos);
__ LslImmediate(R0, R0, target::UntaggedObject::kHashTagPos);
Label retry;
__ Bind(&retry);
__ ldxr(R2, R1, kEightBytes);
@ -2016,7 +2016,7 @@ void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
__ stxr(R4, R2, R1, kEightBytes);
__ cbnz(&retry, R4);
__ LsrImmediate(R0, R0, target::ObjectLayout::kHashTagPos);
__ LsrImmediate(R0, R0, target::UntaggedObject::kHashTagPos);
__ SmiTag(R0);
__ ret();
}
@ -2075,10 +2075,10 @@ static void TryAllocateString(Assembler* assembler,
// R1: new object end address.
// R2: allocation size.
{
const intptr_t shift = target::ObjectLayout::kTagBitsSizeTagPos -
const intptr_t shift = target::UntaggedObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R2, target::ObjectLayout::kSizeTagMaxSizeTag);
__ CompareImmediate(R2, target::UntaggedObject::kSizeTagMaxSizeTag);
__ LslImmediate(R2, R2, shift);
__ csel(R2, R2, ZR, LS);

View file

@ -1992,9 +1992,9 @@ static void TryAllocateString(Assembler* assembler,
// EDI: allocation size.
{
Label size_tag_overflow, done;
__ cmpl(EDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
__ cmpl(EDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shll(EDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
__ shll(EDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);

View file

@ -1715,7 +1715,7 @@ void AsmIntrinsifier::Object_setHash(Assembler* assembler,
__ movq(RAX, Address(RSP, +2 * target::kWordSize)); // Object.
__ movq(RDX, Address(RSP, +1 * target::kWordSize)); // Value.
__ SmiUntag(RDX);
__ shlq(RDX, Immediate(target::ObjectLayout::kHashTagPos));
__ shlq(RDX, Immediate(target::UntaggedObject::kHashTagPos));
// lock+orq is an atomic read-modify-write.
__ lock();
__ orq(FieldAddress(RAX, target::Object::tags_offset()), RDX);
@ -1953,11 +1953,11 @@ void AsmIntrinsifier::OneByteString_getHashCode(Assembler* assembler,
__ j(NOT_EQUAL, &set_hash_code, Assembler::kNearJump);
__ incq(RAX);
__ Bind(&set_hash_code);
__ shlq(RAX, Immediate(target::ObjectLayout::kHashTagPos));
__ shlq(RAX, Immediate(target::UntaggedObject::kHashTagPos));
// lock+orq is an atomic read-modify-write.
__ lock();
__ orq(FieldAddress(RBX, target::Object::tags_offset()), RAX);
__ sarq(RAX, Immediate(target::ObjectLayout::kHashTagPos));
__ sarq(RAX, Immediate(target::UntaggedObject::kHashTagPos));
__ SmiTag(RAX);
__ ret();
}
@ -2020,9 +2020,9 @@ static void TryAllocateString(Assembler* assembler,
// RDI: allocation size.
{
Label size_tag_overflow, done;
__ cmpq(RDI, Immediate(target::ObjectLayout::kSizeTagMaxSizeTag));
__ cmpq(RDI, Immediate(target::UntaggedObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shlq(RDI, Immediate(target::ObjectLayout::kTagBitsSizeTagPos -
__ shlq(RDI, Immediate(target::UntaggedObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);

View file

@ -1739,7 +1739,7 @@ void Assembler::StoreIntoObject(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare ObjectLayout::StorePointer.
// Compare UntaggedObject::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
@ -1752,7 +1752,7 @@ void Assembler::StoreIntoObject(Register object,
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
and_(TMP, LR,
Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift));
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
tst(TMP, Operand(LR));
});
@ -1810,7 +1810,7 @@ void Assembler::StoreIntoArray(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare ObjectLayout::StorePointer.
// Compare UntaggedObject::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
@ -1824,7 +1824,7 @@ void Assembler::StoreIntoArray(Register object,
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
ldrb(LR, FieldAddress(value, target::Object::tags_offset()));
and_(TMP, LR,
Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift));
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
ldr(LR, Address(THR, target::Thread::write_barrier_mask_offset()));
tst(TMP, Operand(LR));
});
@ -1867,7 +1867,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
ldrb(TMP, FieldAddress(object, target::Object::tags_offset()));
tst(TMP, Operand(1 << target::ObjectLayout::kOldAndNotRememberedBit));
tst(TMP, Operand(1 << target::UntaggedObject::kOldAndNotRememberedBit));
b(&done, ZERO);
Stop("Store buffer update is required");
@ -1990,29 +1990,29 @@ void Assembler::StoreIntoSmiField(const Address& dest, Register value) {
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
Lsr(result, tags, Operand(target::ObjectLayout::kClassIdTagPos), AL);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
Lsr(result, tags, Operand(target::UntaggedObject::kClassIdTagPos), AL);
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::ObjectLayout::kSizeTagPos == 8);
ASSERT(target::ObjectLayout::kSizeTagSize == 8);
ASSERT(target::UntaggedObject::kSizeTagPos == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 8);
Lsr(result, tags,
Operand(target::ObjectLayout::kSizeTagPos -
Operand(target::UntaggedObject::kSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2),
AL);
AndImmediate(result, result,
(Utils::NBitMask(target::ObjectLayout::kSizeTagSize)
(Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
<< target::ObjectAlignment::kObjectAlignmentLog2));
}
void Assembler::LoadClassId(Register result, Register object, Condition cond) {
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
ldrh(result, FieldAddress(object, class_id_offset), cond);
}

View file

@ -1254,7 +1254,7 @@ class Assembler : public AssemblerBase {
// before the code can be used.
//
// The neccessary information for the "linker" (i.e. the relocation
// information) is stored in [CodeLayout::static_calls_target_table_]: an
// information) is stored in [UntaggedCode::static_calls_target_table_]: an
// entry of the form
//
// (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)

View file

@ -996,7 +996,7 @@ void Assembler::StoreIntoObject(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare ObjectLayout::StorePointer.
// Compare UntaggedObject::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
@ -1006,7 +1006,7 @@ void Assembler::StoreIntoObject(Register object,
ldr(TMP2, FieldAddress(value, target::Object::tags_offset(), kByte),
kUnsignedByte);
and_(TMP, TMP2,
Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift));
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
tst(TMP, Operand(BARRIER_MASK));
b(&done, ZERO);
@ -1064,7 +1064,7 @@ void Assembler::StoreIntoArray(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare ObjectLayout::StorePointer.
// Compare UntaggedObject::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
BranchIfSmi(value, &done);
@ -1074,7 +1074,7 @@ void Assembler::StoreIntoArray(Register object,
ldr(TMP2, FieldAddress(value, target::Object::tags_offset(), kByte),
kUnsignedByte);
and_(TMP, TMP2,
Operand(TMP, LSR, target::ObjectLayout::kBarrierOverlapShift));
Operand(TMP, LSR, target::UntaggedObject::kBarrierOverlapShift));
tst(TMP, Operand(BARRIER_MASK));
b(&done, ZERO);
if (spill_lr) {
@ -1104,7 +1104,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
ldr(TMP, FieldAddress(object, target::Object::tags_offset(), kByte),
kUnsignedByte);
tsti(TMP, Immediate(1 << target::ObjectLayout::kOldAndNotRememberedBit));
tsti(TMP, Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
b(&done, ZERO);
Stop("Store buffer update is required");
@ -1156,25 +1156,26 @@ void Assembler::StoreInternalPointer(Register object,
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
LsrImmediate(result, tags, target::ObjectLayout::kClassIdTagPos, kFourBytes);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
LsrImmediate(result, tags, target::UntaggedObject::kClassIdTagPos,
kFourBytes);
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::ObjectLayout::kSizeTagPos == 8);
ASSERT(target::ObjectLayout::kSizeTagSize == 8);
ubfx(result, tags, target::ObjectLayout::kSizeTagPos,
target::ObjectLayout::kSizeTagSize);
ASSERT(target::UntaggedObject::kSizeTagPos == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 8);
ubfx(result, tags, target::UntaggedObject::kSizeTagPos,
target::UntaggedObject::kSizeTagSize);
LslImmediate(result, result, target::ObjectAlignment::kObjectAlignmentLog2);
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
LoadFromOffset(result, object, class_id_offset - kHeapObjectTag,
kUnsignedTwoBytes);
}

View file

@ -1870,7 +1870,7 @@ class Assembler : public AssemblerBase {
// the code can be used.
//
// The neccessary information for the "linker" (i.e. the relocation
// information) is stored in [CodeLayout::static_calls_target_table_]: an
// information) is stored in [UntaggedCode::static_calls_target_table_]: an
// entry of the form
//
// (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)

View file

@ -2643,7 +2643,7 @@ ASSEMBLER_TEST_GENERATE(CompareObjectNull, assembler) {
}
ASSEMBLER_TEST_RUN(CompareObjectNull, test) {
EXPECT_EQ(static_cast<uword>(Bool::True().raw()),
EXPECT_EQ(static_cast<uword>(Bool::True().ptr()),
test->InvokeWithCodeAndThread<uword>());
}
@ -2657,7 +2657,7 @@ ASSEMBLER_TEST_GENERATE(LoadObjectTrue, assembler) {
}
ASSEMBLER_TEST_RUN(LoadObjectTrue, test) {
EXPECT_EQ(static_cast<uword>(Bool::True().raw()),
EXPECT_EQ(static_cast<uword>(Bool::True().ptr()),
test->InvokeWithCodeAndThread<uword>());
}
@ -2671,7 +2671,7 @@ ASSEMBLER_TEST_GENERATE(LoadObjectFalse, assembler) {
}
ASSEMBLER_TEST_RUN(LoadObjectFalse, test) {
EXPECT_EQ(static_cast<uword>(Bool::False().raw()),
EXPECT_EQ(static_cast<uword>(Bool::False().ptr()),
test->InvokeWithCodeAndThread<uword>());
}

View file

@ -1990,7 +1990,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
testb(FieldAddress(object, target::Object::tags_offset()),
Immediate(1 << target::ObjectLayout::kOldAndNotRememberedBit));
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
j(ZERO, &done, Assembler::kNearJump);
Stop("Store buffer update is required");
@ -2693,11 +2693,11 @@ void Assembler::EmitGenericShift(int rm,
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
}
@ -2723,11 +2723,11 @@ void Assembler::SmiUntagOrCheckClass(Register object,
Register scratch,
Label* is_smi) {
ASSERT(kSmiTagShift == 1);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);
@ -2754,7 +2754,7 @@ void Assembler::LoadClassIdMayBeSmi(Register result, Register object) {
} else {
ASSERT(result != object);
static const intptr_t kSmiCidSource =
kSmiCid << target::ObjectLayout::kClassIdTagPos;
kSmiCid << target::UntaggedObject::kClassIdTagPos;
// Make a dummy "Object" whose cid is kSmiCid.
movl(result, Immediate(reinterpret_cast<int32_t>(&kSmiCidSource) + 1));

View file

@ -29,38 +29,38 @@ ASSEMBLER_TEST_RUN(StoreIntoObject, test) {
Smi& smi = Smi::Handle();
Thread* thread = Thread::Current();
EXPECT(old_array.raw() == grow_old_array.data());
EXPECT(!thread->StoreBufferContains(grow_old_array.raw()));
EXPECT(old_array.raw() == grow_new_array.data());
EXPECT(!thread->StoreBufferContains(grow_new_array.raw()));
EXPECT(old_array.ptr() == grow_old_array.data());
EXPECT(!thread->StoreBufferContains(grow_old_array.ptr()));
EXPECT(old_array.ptr() == grow_new_array.data());
EXPECT(!thread->StoreBufferContains(grow_new_array.ptr()));
// Store Smis into the old object.
for (int i = -128; i < 128; i++) {
smi = Smi::New(i);
TEST_CODE(smi.raw(), grow_old_array.raw(), thread);
EXPECT(static_cast<ArrayPtr>(smi.raw()) == grow_old_array.data());
EXPECT(!thread->StoreBufferContains(grow_old_array.raw()));
TEST_CODE(smi.ptr(), grow_old_array.ptr(), thread);
EXPECT(static_cast<ArrayPtr>(smi.ptr()) == grow_old_array.data());
EXPECT(!thread->StoreBufferContains(grow_old_array.ptr()));
}
// Store an old object into the old object.
TEST_CODE(old_array.raw(), grow_old_array.raw(), thread);
EXPECT(old_array.raw() == grow_old_array.data());
EXPECT(!thread->StoreBufferContains(grow_old_array.raw()));
TEST_CODE(old_array.ptr(), grow_old_array.ptr(), thread);
EXPECT(old_array.ptr() == grow_old_array.data());
EXPECT(!thread->StoreBufferContains(grow_old_array.ptr()));
// Store a new object into the old object.
TEST_CODE(new_array.raw(), grow_old_array.raw(), thread);
EXPECT(new_array.raw() == grow_old_array.data());
EXPECT(thread->StoreBufferContains(grow_old_array.raw()));
TEST_CODE(new_array.ptr(), grow_old_array.ptr(), thread);
EXPECT(new_array.ptr() == grow_old_array.data());
EXPECT(thread->StoreBufferContains(grow_old_array.ptr()));
// Store a new object into the new object.
TEST_CODE(new_array.raw(), grow_new_array.raw(), thread);
EXPECT(new_array.raw() == grow_new_array.data());
EXPECT(!thread->StoreBufferContains(grow_new_array.raw()));
TEST_CODE(new_array.ptr(), grow_new_array.ptr(), thread);
EXPECT(new_array.ptr() == grow_new_array.data());
EXPECT(!thread->StoreBufferContains(grow_new_array.ptr()));
// Store an old object into the new object.
TEST_CODE(old_array.raw(), grow_new_array.raw(), thread);
EXPECT(old_array.raw() == grow_new_array.data());
EXPECT(!thread->StoreBufferContains(grow_new_array.raw()));
TEST_CODE(old_array.ptr(), grow_new_array.ptr(), thread);
EXPECT(old_array.ptr() == grow_new_array.data());
EXPECT(!thread->StoreBufferContains(grow_new_array.ptr()));
}
} // namespace dart

View file

@ -1390,14 +1390,14 @@ void Assembler::StoreIntoObject(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare ObjectLayout::StorePointer.
// Compare UntaggedObject::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
testq(value, Immediate(kSmiTagMask));
j(ZERO, &done, kNearJump);
}
movb(TMP, FieldAddress(object, target::Object::tags_offset()));
shrl(TMP, Immediate(target::ObjectLayout::kBarrierOverlapShift));
shrl(TMP, Immediate(target::UntaggedObject::kBarrierOverlapShift));
andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
j(ZERO, &done, kNearJump);
@ -1442,14 +1442,14 @@ void Assembler::StoreIntoArray(Register object,
// in progress
// If so, call the WriteBarrier stub, which will either add object to the
// store buffer (case 1) or add value to the marking stack (case 2).
// Compare ObjectLayout::StorePointer.
// Compare UntaggedObject::StorePointer.
Label done;
if (can_be_smi == kValueCanBeSmi) {
testq(value, Immediate(kSmiTagMask));
j(ZERO, &done, kNearJump);
}
movb(TMP, FieldAddress(object, target::Object::tags_offset()));
shrl(TMP, Immediate(target::ObjectLayout::kBarrierOverlapShift));
shrl(TMP, Immediate(target::UntaggedObject::kBarrierOverlapShift));
andl(TMP, Address(THR, target::Thread::write_barrier_mask_offset()));
testb(FieldAddress(value, target::Object::tags_offset()), TMP);
j(ZERO, &done, kNearJump);
@ -1477,7 +1477,7 @@ void Assembler::StoreIntoObjectNoBarrier(Register object,
StoreIntoObjectFilter(object, value, &done, kValueCanBeSmi, kJumpToNoUpdate);
testb(FieldAddress(object, target::Object::tags_offset()),
Immediate(1 << target::ObjectLayout::kOldAndNotRememberedBit));
Immediate(1 << target::UntaggedObject::kOldAndNotRememberedBit));
j(ZERO, &done, Assembler::kNearJump);
Stop("Store buffer update is required");
@ -2160,29 +2160,29 @@ void Assembler::EmitGenericShift(bool wide,
}
void Assembler::ExtractClassIdFromTags(Register result, Register tags) {
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
movl(result, tags);
shrl(result, Immediate(target::ObjectLayout::kClassIdTagPos));
shrl(result, Immediate(target::UntaggedObject::kClassIdTagPos));
}
void Assembler::ExtractInstanceSizeFromTags(Register result, Register tags) {
ASSERT(target::ObjectLayout::kSizeTagPos == 8);
ASSERT(target::ObjectLayout::kSizeTagSize == 8);
ASSERT(target::UntaggedObject::kSizeTagPos == 8);
ASSERT(target::UntaggedObject::kSizeTagSize == 8);
movzxw(result, tags);
shrl(result, Immediate(target::ObjectLayout::kSizeTagPos -
shrl(result, Immediate(target::UntaggedObject::kSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
AndImmediate(result,
Immediate(Utils::NBitMask(target::ObjectLayout::kSizeTagSize)
Immediate(Utils::NBitMask(target::UntaggedObject::kSizeTagSize)
<< target::ObjectAlignment::kObjectAlignmentLog2));
}
void Assembler::LoadClassId(Register result, Register object) {
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
movzxw(result, FieldAddress(object, class_id_offset));
}
@ -2207,11 +2207,11 @@ void Assembler::SmiUntagOrCheckClass(Register object,
intptr_t class_id,
Label* is_smi) {
ASSERT(kSmiTagShift == 1);
ASSERT(target::ObjectLayout::kClassIdTagPos == 16);
ASSERT(target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(target::UntaggedObject::kClassIdTagPos == 16);
ASSERT(target::UntaggedObject::kClassIdTagSize == 16);
const intptr_t class_id_offset =
target::Object::tags_offset() +
target::ObjectLayout::kClassIdTagPos / kBitsPerByte;
target::UntaggedObject::kClassIdTagPos / kBitsPerByte;
// Untag optimistically. Tag bit is shifted into the CARRY.
SmiUntag(object);

View file

@ -1037,7 +1037,7 @@ class Assembler : public AssemblerBase {
// before the code can be used.
//
// The neccessary information for the "linker" (i.e. the relocation
// information) is stored in [CodeLayout::static_calls_target_table_]: an
// information) is stored in [UntaggedCode::static_calls_target_table_]: an
// entry of the form
//
// (Code::kPcRelativeCall & pc_offset, <target-code>, <target-function>)

View file

@ -309,20 +309,20 @@ void Disassembler::DisassembleCodeHelper(const char* function_fullname,
String& var_name = String::Handle(zone);
for (intptr_t i = 0; i < var_desc_length; i++) {
var_name = var_descriptors.GetName(i);
LocalVarDescriptorsLayout::VarInfo var_info;
UntaggedLocalVarDescriptors::VarInfo var_info;
var_descriptors.GetInfo(i, &var_info);
const int8_t kind = var_info.kind();
if (kind == LocalVarDescriptorsLayout::kSavedCurrentContext) {
if (kind == UntaggedLocalVarDescriptors::kSavedCurrentContext) {
THR_Print(" saved current CTX reg offset %d\n", var_info.index());
} else {
if (kind == LocalVarDescriptorsLayout::kContextLevel) {
if (kind == UntaggedLocalVarDescriptors::kContextLevel) {
THR_Print(" context level %d scope %d", var_info.index(),
var_info.scope_id);
} else if (kind == LocalVarDescriptorsLayout::kStackVar) {
} else if (kind == UntaggedLocalVarDescriptors::kStackVar) {
THR_Print(" stack var '%s' offset %d", var_name.ToCString(),
var_info.index());
} else {
ASSERT(kind == LocalVarDescriptorsLayout::kContextVar);
ASSERT(kind == UntaggedLocalVarDescriptors::kContextVar);
THR_Print(" context var '%s' level %d offset %d",
var_name.ToCString(), var_info.scope_id, var_info.index());
}
@ -390,9 +390,9 @@ void Disassembler::DisassembleCodeHelper(const char* function_fullname,
dst_type = AbstractType::null();
if (object.IsAbstractType()) {
dst_type = AbstractType::Cast(object).raw();
dst_type = AbstractType::Cast(object).ptr();
} else if (object.IsCode()) {
code = Code::Cast(object).raw();
code = Code::Cast(object).ptr();
}
auto kind = Code::KindField::decode(kind_type_and_offset.Value());

View file

@ -66,7 +66,7 @@ class BlockBuilder : public ValueObject {
const auto representation = FlowGraph::ReturnRepresentationOf(function);
ReturnInstr* instr = new ReturnInstr(
Source(), value, CompilerState::Current().GetNextDeoptId(),
PcDescriptorsLayout::kInvalidYieldIndex, representation);
UntaggedPcDescriptors::kInvalidYieldIndex, representation);
AddInstruction(instr);
entry_->set_last_instruction(instr);
return instr;

View file

@ -68,12 +68,12 @@ bool ConstantPropagator::SetValue(Definition* definition, const Object& value) {
//
// ASSERT(IsUnknown(definition->constant_value()) ||
// IsNonConstant(value) ||
// (definition->constant_value().raw() == value.raw()));
// (definition->constant_value().ptr() == value.ptr()));
//
// But the final disjunct is not true (e.g., mint or double constants are
// heap-allocated and so not necessarily pointer-equal on each iteration).
if (definition->constant_value().raw() != value.raw()) {
definition->constant_value() = value.raw();
if (definition->constant_value().ptr() != value.ptr()) {
definition->constant_value() = value.ptr();
if (definition->input_use_list() != NULL) {
definition_worklist_.Add(definition);
}
@ -86,7 +86,7 @@ static bool IsIdenticalConstants(const Object& left, const Object& right) {
// This should be kept in line with Identical_comparison (identical.cc)
// (=> Instance::IsIdenticalTo in object.cc).
if (left.raw() == right.raw()) return true;
if (left.ptr() == right.ptr()) return true;
if (left.GetClassId() != right.GetClassId()) return false;
if (left.IsInteger()) {
return Integer::Cast(left).Equals(Integer::Cast(right));
@ -107,7 +107,7 @@ void ConstantPropagator::Join(Object* left, const Object& right) {
// Join(unknown, X) = X
// Join(X, non-constant) = non-constant
if (IsUnknown(*left) || IsNonConstant(right)) {
*left = right.raw();
*left = right.ptr();
return;
}
@ -115,7 +115,7 @@ void ConstantPropagator::Join(Object* left, const Object& right) {
if (IsIdenticalConstants(*left, right)) return;
// Join(X, Y) = non-constant
*left = non_constant_.raw();
*left = non_constant_.ptr();
}
// --------------------------------------------------------------------------
@ -254,7 +254,7 @@ void ConstantPropagator::VisitBranch(BranchInstr* instr) {
if (IsNonConstant(value)) {
SetReachable(instr->true_successor());
SetReachable(instr->false_successor());
} else if (value.raw() == Bool::True().raw()) {
} else if (value.ptr() == Bool::True().ptr()) {
SetReachable(instr->true_successor());
} else if (!IsUnknown(value)) { // Any other constant.
SetReachable(instr->false_successor());
@ -875,7 +875,7 @@ void ConstantPropagator::VisitBooleanNegate(BooleanNegateInstr* instr) {
return;
}
if (value.IsBool()) {
bool val = value.raw() != Bool::True().raw();
bool val = value.ptr() != Bool::True().ptr();
SetValue(instr, Bool::Get(val));
} else {
SetValue(instr, non_constant_);
@ -908,7 +908,7 @@ void ConstantPropagator::VisitInstanceOf(InstanceOfInstr* instr) {
SetValue(instr, non_constant_);
}
} else if (IsConstant(value)) {
if (value.IsInstance() && (value.raw() != Object::sentinel().raw())) {
if (value.IsInstance() && (value.ptr() != Object::sentinel().ptr())) {
const Instance& instance = Instance::Cast(value);
if (instr->instantiator_type_arguments()->BindsToConstantNull() &&
instr->function_type_arguments()->BindsToConstantNull()) {
@ -1001,7 +1001,7 @@ void ConstantPropagator::VisitLoadField(LoadFieldInstr* instr) {
} else {
Object& value = Object::Handle();
if (instr->Evaluate(constant, &value)) {
SetValue(instr, Object::ZoneHandle(Z, value.raw()));
SetValue(instr, Object::ZoneHandle(Z, value.ptr()));
return;
}
}
@ -1021,7 +1021,7 @@ void ConstantPropagator::VisitInstantiateType(InstantiateTypeInstr* instr) {
return;
}
if (instantiator_type_args_obj.IsTypeArguments()) {
instantiator_type_args ^= instantiator_type_args_obj.raw();
instantiator_type_args ^= instantiator_type_args_obj.ptr();
} else {
SetValue(instr, non_constant_);
return;
@ -1035,7 +1035,7 @@ void ConstantPropagator::VisitInstantiateType(InstantiateTypeInstr* instr) {
return;
}
if (function_type_args_obj.IsTypeArguments()) {
function_type_args ^= function_type_args_obj.raw();
function_type_args ^= function_type_args_obj.ptr();
} else {
SetValue(instr, non_constant_);
return;
@ -1086,7 +1086,7 @@ void ConstantPropagator::VisitInstantiateTypeArguments(
SetValue(instr, non_constant_);
return;
}
instantiator_type_args ^= instantiator_type_args_obj.raw();
instantiator_type_args ^= instantiator_type_args_obj.ptr();
if (instr->CanShareInstantiatorTypeArguments()) {
SetValue(instr, instantiator_type_args);
return;
@ -1105,7 +1105,7 @@ void ConstantPropagator::VisitInstantiateTypeArguments(
SetValue(instr, non_constant_);
return;
}
function_type_args ^= function_type_args_obj.raw();
function_type_args ^= function_type_args_obj.ptr();
if (instr->CanShareFunctionTypeArguments()) {
SetValue(instr, function_type_args);
return;
@ -1148,7 +1148,7 @@ void ConstantPropagator::VisitBinaryIntegerOp(BinaryIntegerOpInstr* binary_op) {
binary_op->is_truncating(),
binary_op->representation(), T));
if (!result.IsNull()) {
SetValue(binary_op, Integer::ZoneHandle(Z, result.raw()));
SetValue(binary_op, Integer::ZoneHandle(Z, result.ptr()));
return;
}
}
@ -1218,7 +1218,7 @@ void ConstantPropagator::VisitUnaryIntegerOp(UnaryIntegerOpInstr* unary_op) {
Z, Evaluator::UnaryIntegerEvaluate(value, unary_op->op_kind(),
unary_op->representation(), T));
if (!result.IsNull()) {
SetValue(unary_op, Integer::ZoneHandle(Z, result.raw()));
SetValue(unary_op, Integer::ZoneHandle(Z, result.ptr()));
return;
}
}
@ -1712,7 +1712,7 @@ bool ConstantPropagator::TransformDefinition(Definition* defn) {
THR_Print("Constant v%" Pd " = %s\n", defn->ssa_temp_index(),
defn->constant_value().ToCString());
}
constant_value_ = defn->constant_value().raw();
constant_value_ = defn->constant_value().ptr();
if ((constant_value_.IsString() || constant_value_.IsMint() ||
constant_value_.IsDouble()) &&
!constant_value_.IsCanonical()) {

View file

@ -36,7 +36,7 @@ class ConstantPropagator : public FlowGraphVisitor {
static void OptimizeBranches(FlowGraph* graph);
// Used to initialize the abstract value of definitions.
static ObjectPtr Unknown() { return Object::unknown_constant().raw(); }
static ObjectPtr Unknown() { return Object::unknown_constant().ptr(); }
private:
void Analyze();
@ -60,9 +60,9 @@ class ConstantPropagator : public FlowGraphVisitor {
// first one.
void Join(Object* left, const Object& right);
bool IsUnknown(const Object& value) { return value.raw() == unknown_.raw(); }
bool IsUnknown(const Object& value) { return value.ptr() == unknown_.ptr(); }
bool IsNonConstant(const Object& value) {
return value.raw() == non_constant_.raw();
return value.ptr() == non_constant_.ptr();
}
bool IsConstant(const Object& value) {
return !IsNonConstant(value) && !IsUnknown(value);

View file

@ -118,7 +118,7 @@ IntegerPtr Evaluator::BinaryIntegerEvaluate(const Object& left,
result ^= result.Canonicalize(thread);
}
return result.raw();
return result.ptr();
}
IntegerPtr Evaluator::UnaryIntegerEvaluate(const Object& value,
@ -147,7 +147,7 @@ IntegerPtr Evaluator::UnaryIntegerEvaluate(const Object& value,
result ^= result.Canonicalize(thread);
}
return result.raw();
return result.ptr();
}
double Evaluator::EvaluateDoubleOp(const double left,

View file

@ -192,7 +192,7 @@ ConstantInstr* FlowGraph::GetConstant(const Object& object) {
if (constant == nullptr) {
// Otherwise, allocate and add it to the pool.
constant =
new (zone()) ConstantInstr(Object::ZoneHandle(zone(), object.raw()));
new (zone()) ConstantInstr(Object::ZoneHandle(zone(), object.ptr()));
constant->set_ssa_temp_index(alloc_ssa_temp_index());
if (NeedsPairLocation(constant->representation())) {
alloc_ssa_temp_index();
@ -481,7 +481,7 @@ bool FlowGraph::IsReceiver(Definition* def) const {
FlowGraph::ToCheck FlowGraph::CheckForInstanceCall(
InstanceCallInstr* call,
FunctionLayout::Kind kind) const {
UntaggedFunction::Kind kind) const {
if (!FLAG_use_cha_deopt && !isolate()->all_classes_finalized()) {
// Even if class or function are private, lazy class finalization
// may later add overriding methods.
@ -543,7 +543,7 @@ FlowGraph::ToCheck FlowGraph::CheckForInstanceCall(
}
const String& method_name =
(kind == FunctionLayout::kMethodExtractor)
(kind == UntaggedFunction::kMethodExtractor)
? String::Handle(zone(), Field::NameFromGetter(call->function_name()))
: call->function_name();

View file

@ -76,7 +76,7 @@ struct ConstantPoolTrait {
}
static inline bool IsKeyEqual(Pair kv, Key key) {
return kv->value().raw() == key.raw();
return kv->value().ptr() == key.ptr();
}
};
@ -234,7 +234,7 @@ class FlowGraph : public ZoneAllocated {
// Return value indicates that the call needs no check at all,
// just a null check, or a full class check.
ToCheck CheckForInstanceCall(InstanceCallInstr* call,
FunctionLayout::Kind kind) const;
UntaggedFunction::Kind kind) const;
Thread* thread() const { return thread_; }
Zone* zone() const { return thread()->zone(); }

View file

@ -24,8 +24,8 @@ DEFINE_FLAG(int,
// succ/pred/block links are not maintained.
static bool IsSpecialConstant(Definition* def) {
if (auto c = def->AsConstant()) {
return c->value().raw() == Symbols::OptimizedOut().raw() ||
c->value().raw() == Object::ZoneHandle().raw();
return c->value().ptr() == Symbols::OptimizedOut().ptr() ||
c->value().ptr() == Object::ZoneHandle().ptr();
}
return false;
}
@ -141,8 +141,8 @@ static void AssertArgumentsInEnv(FlowGraph* flow_graph, Definition* call) {
->OriginalDefinitionIgnoreBoxingAndConstraints();
ASSERT((arg_def == env_def) ||
(arg_def->IsConstant() && env_def->IsConstant() &&
arg_def->AsConstant()->value().raw() ==
env_def->AsConstant()->value().raw()));
arg_def->AsConstant()->value().ptr() ==
env_def->AsConstant()->value().ptr()));
}
}
}

View file

@ -175,8 +175,8 @@ FlowGraphCompiler::FlowGraphCompiler(
pending_deoptimization_env_(NULL),
deopt_id_to_ic_data_(deopt_id_to_ic_data),
edge_counters_array_(Array::ZoneHandle()) {
ASSERT(flow_graph->parsed_function().function().raw() ==
parsed_function.function().raw());
ASSERT(flow_graph->parsed_function().function().ptr() ==
parsed_function.function().ptr());
if (is_optimizing) {
// No need to collect extra ICData objects created during compilation.
deopt_id_to_ic_data_ = nullptr;
@ -194,8 +194,8 @@ FlowGraphCompiler::FlowGraphCompiler(
#endif
// Make sure that the function is at the position for inline_id 0.
ASSERT(inline_id_to_function.length() >= 1);
ASSERT(inline_id_to_function[0]->raw() ==
flow_graph->parsed_function().function().raw());
ASSERT(inline_id_to_function[0]->ptr() ==
flow_graph->parsed_function().function().ptr());
code_source_map_builder_ = new (zone_)
CodeSourceMapBuilder(zone_, stack_traces_only, caller_inline_id,
inline_id_to_token_pos, inline_id_to_function);
@ -273,7 +273,7 @@ void FlowGraphCompiler::InitCompiler() {
for (intptr_t i = 0; i < num_counters; ++i) {
edge_counters.SetAt(i, Object::smi_zero());
}
edge_counters_array_ = edge_counters.raw();
edge_counters_array_ = edge_counters.ptr();
}
}
@ -292,7 +292,7 @@ bool FlowGraphCompiler::CanOSRFunction() const {
void FlowGraphCompiler::InsertBSSRelocation(BSS::Relocation reloc) {
const intptr_t offset = assembler()->InsertAlignedRelocation(reloc);
AddDescriptor(PcDescriptorsLayout::kBSSRelocation, /*pc_offset=*/offset,
AddDescriptor(UntaggedPcDescriptors::kBSSRelocation, /*pc_offset=*/offset,
/*deopt_id=*/DeoptId::kNone, InstructionSource(),
/*try_index=*/-1);
}
@ -389,7 +389,7 @@ static CatchEntryMove CatchEntryMoveFor(compiler::Assembler* assembler,
intptr_t dst_index) {
if (src.IsConstant()) {
// Skip dead locations.
if (src.constant().raw() == Symbols::OptimizedOut().raw()) {
if (src.constant().ptr() == Symbols::OptimizedOut().ptr()) {
return CatchEntryMove();
}
const intptr_t pool_index =
@ -496,7 +496,7 @@ void FlowGraphCompiler::RecordCatchEntryMoves(Environment* env,
void FlowGraphCompiler::EmitCallsiteMetadata(const InstructionSource& source,
intptr_t deopt_id,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
Environment* env) {
AddCurrentDescriptor(kind, deopt_id, source);
@ -511,7 +511,8 @@ void FlowGraphCompiler::EmitCallsiteMetadata(const InstructionSource& source,
} else {
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after,
source);
}
}
}
@ -519,7 +520,7 @@ void FlowGraphCompiler::EmitCallsiteMetadata(const InstructionSource& source,
void FlowGraphCompiler::EmitYieldPositionMetadata(
const InstructionSource& source,
intptr_t yield_index) {
AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(),
AddDescriptor(UntaggedPcDescriptors::kOther, assembler()->CodeSize(),
DeoptId::kNone, source, CurrentTryIndex(), yield_index);
}
@ -529,7 +530,7 @@ void FlowGraphCompiler::EmitInstructionPrologue(Instruction* instr) {
// Instructions that can be deoptimization targets need to record kDeopt
// PcDescriptor corresponding to their deopt id. GotoInstr records its
// own so that it can control the placement.
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, instr->deopt_id(),
AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, instr->deopt_id(),
instr->source());
}
AllocateRegistersLocally(instr);
@ -545,7 +546,7 @@ void FlowGraphCompiler::EmitSourceLine(Instruction* instr) {
const Function& function =
*code_source_map_builder_->inline_id_to_function()[inlining_id];
ASSERT(instr->env() == nullptr ||
instr->env()->function().raw() == function.raw());
instr->env()->function().ptr() == function.ptr());
const auto& script = Script::Handle(zone(), function.script());
intptr_t line_nr;
if (script.GetTokenLocation(source.token_pos, &line_nr)) {
@ -802,7 +803,7 @@ void FlowGraphCompiler::SetNeedsStackTrace(intptr_t try_index) {
exception_handlers_list_->SetNeedsStackTrace(try_index);
}
void FlowGraphCompiler::AddDescriptor(PcDescriptorsLayout::Kind kind,
void FlowGraphCompiler::AddDescriptor(UntaggedPcDescriptors::Kind kind,
intptr_t pc_offset,
intptr_t deopt_id,
const InstructionSource& source,
@ -810,7 +811,7 @@ void FlowGraphCompiler::AddDescriptor(PcDescriptorsLayout::Kind kind,
intptr_t yield_index) {
code_source_map_builder_->NoteDescriptor(kind, pc_offset, source);
// Don't emit deopt-descriptors in AOT mode.
if (FLAG_precompiled_mode && (kind == PcDescriptorsLayout::kDeopt)) return;
if (FLAG_precompiled_mode && (kind == UntaggedPcDescriptors::kDeopt)) return;
// Use the token position of the original call in the root function if source
// has an inlining id.
const auto& root_pos = code_source_map_builder_->RootPosition(source);
@ -819,7 +820,7 @@ void FlowGraphCompiler::AddDescriptor(PcDescriptorsLayout::Kind kind,
}
// Uses current pc position and try-index.
void FlowGraphCompiler::AddCurrentDescriptor(PcDescriptorsLayout::Kind kind,
void FlowGraphCompiler::AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind,
intptr_t deopt_id,
const InstructionSource& source) {
AddDescriptor(kind, assembler()->CodeSize(), deopt_id, source,
@ -1200,7 +1201,7 @@ void FlowGraphCompiler::FinalizePcDescriptors(const Code& code) {
ArrayPtr FlowGraphCompiler::CreateDeoptInfo(compiler::Assembler* assembler) {
// No deopt information if we precompile (no deoptimization allowed).
if (FLAG_precompiled_mode) {
return Array::empty_array().raw();
return Array::empty_array().ptr();
}
// For functions with optional arguments, all incoming arguments are copied
// to spill slots. The deoptimization environment does not track them.
@ -1211,7 +1212,7 @@ ArrayPtr FlowGraphCompiler::CreateDeoptInfo(compiler::Assembler* assembler) {
intptr_t deopt_info_table_size = DeoptTable::SizeFor(deopt_infos_.length());
if (deopt_info_table_size == 0) {
return Object::empty_array().raw();
return Object::empty_array().ptr();
} else {
const Array& array =
Array::Handle(Array::New(deopt_info_table_size, Heap::kOld));
@ -1225,7 +1226,7 @@ ArrayPtr FlowGraphCompiler::CreateDeoptInfo(compiler::Assembler* assembler) {
deopt_infos_[i]->reason(), deopt_infos_[i]->flags());
DeoptTable::SetEntry(array, i, offset, info, reason_and_flags);
}
return array.raw();
return array.ptr();
}
}
@ -1255,8 +1256,8 @@ void FlowGraphCompiler::FinalizeVarDescriptors(const Code& code) {
// descriptor for IrregexpFunction.
ASSERT(parsed_function().scope() == nullptr);
var_descs = LocalVarDescriptors::New(1);
LocalVarDescriptorsLayout::VarInfo info;
info.set_kind(LocalVarDescriptorsLayout::kSavedCurrentContext);
UntaggedLocalVarDescriptors::VarInfo info;
info.set_kind(UntaggedLocalVarDescriptors::kSavedCurrentContext);
info.scope_id = 0;
info.begin_pos = TokenPosition::kMinSource;
info.end_pos = TokenPosition::kMinSource;
@ -1391,7 +1392,7 @@ bool FlowGraphCompiler::TryIntrinsifyHelper() {
void FlowGraphCompiler::GenerateStubCall(const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
intptr_t deopt_id,
Environment* env) {
@ -1485,16 +1486,16 @@ void FlowGraphCompiler::GenerateStaticCall(intptr_t deopt_id,
args_info.size_with_type_args, deopt_id, source,
locs, entry_kind);
} else {
ICData& call_ic_data = ICData::ZoneHandle(zone(), ic_data.raw());
ICData& call_ic_data = ICData::ZoneHandle(zone(), ic_data.ptr());
if (call_ic_data.IsNull()) {
const intptr_t kNumArgsChecked = 0;
call_ic_data =
GetOrAddStaticCallICData(deopt_id, function, arguments_descriptor,
kNumArgsChecked, rebind_rule)
->raw();
->ptr();
call_ic_data = call_ic_data.Original();
}
AddCurrentDescriptor(PcDescriptorsLayout::kRewind, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRewind, deopt_id, source);
EmitUnoptimizedStaticCall(args_info.size_with_type_args, deopt_id, source,
locs, call_ic_data, entry_kind);
}
@ -1972,12 +1973,12 @@ const ICData* FlowGraphCompiler::GetOrAddInstanceCallICData(
((*deopt_id_to_ic_data_)[deopt_id] != NULL)) {
const ICData* res = (*deopt_id_to_ic_data_)[deopt_id];
ASSERT(res->deopt_id() == deopt_id);
ASSERT(res->target_name() == target_name.raw());
ASSERT(res->target_name() == target_name.ptr());
ASSERT(res->NumArgsTested() == num_args_tested);
ASSERT(res->TypeArgsLen() ==
ArgumentsDescriptor(arguments_descriptor).TypeArgsLen());
ASSERT(!res->is_static_call());
ASSERT(res->receivers_static_type() == receiver_type.raw());
ASSERT(res->receivers_static_type() == receiver_type.ptr());
return res;
}
@ -2141,7 +2142,7 @@ bool FlowGraphCompiler::LookupMethodFor(int class_id,
Function::Handle(zone, Resolver::ResolveDynamicForReceiverClass(
cls, name, args_desc, allow_add));
if (target_function.IsNull()) return false;
*fn_return = target_function.raw();
*fn_return = target_function.ptr();
return true;
}
@ -2255,8 +2256,9 @@ void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
// Do not use the code from the function, but let the code be patched so
// that we can record the outgoing edges to other code.
const Function& function = *targets.TargetAt(smi_case)->target;
GenerateStaticDartCall(deopt_id, source_index, PcDescriptorsLayout::kOther,
locs, function, entry_kind);
GenerateStaticDartCall(deopt_id, source_index,
UntaggedPcDescriptors::kOther, locs, function,
entry_kind);
__ Drop(args_info.size_with_type_args);
if (match_found != NULL) {
__ Jump(match_found);
@ -2305,8 +2307,9 @@ void FlowGraphCompiler::EmitTestAndCall(const CallTargets& targets,
// Do not use the code from the function, but let the code be patched so
// that we can record the outgoing edges to other code.
const Function& function = *targets.TargetAt(i)->target;
GenerateStaticDartCall(deopt_id, source_index, PcDescriptorsLayout::kOther,
locs, function, entry_kind);
GenerateStaticDartCall(deopt_id, source_index,
UntaggedPcDescriptors::kOther, locs, function,
entry_kind);
__ Drop(args_info.size_with_type_args);
if (!is_last_check || add_megamorphic_call) {
__ Jump(match_found);
@ -2793,7 +2796,7 @@ void FlowGraphCompiler::GenerateInstanceOf(const InstructionSource& source,
__ LoadUniqueObject(TypeTestABI::kDstTypeReg, type);
__ LoadUniqueObject(TypeTestABI::kSubtypeTestCacheReg, test_cache);
GenerateStubCall(source, StubCode::InstanceOf(),
/*kind=*/PcDescriptorsLayout::kOther, locs, deopt_id);
/*kind=*/UntaggedPcDescriptors::kOther, locs, deopt_id);
__ Jump(&done, compiler::Assembler::kNearJump);
}
__ Bind(&is_not_instance);
@ -2835,7 +2838,7 @@ SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
}
GenerateBoolToJump(TypeTestABI::kSubtypeTestCacheResultReg, is_instance_lbl,
is_not_instance_lbl);
return type_test_cache.raw();
return type_test_cache.ptr();
}
// Generates an assignable check for a given object. Emits no code if the
@ -2928,7 +2931,7 @@ void FlowGraphCompiler::GenerateTTSCall(const InstructionSource& source,
} else {
GenerateIndirectTTSCall(assembler(), reg_with_type, sub_type_cache_index);
}
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kOther, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
// Optimize assignable type check by adding inlined tests for:
@ -3173,7 +3176,7 @@ void ThrowErrorSlowPathCode::EmitNativeCode(FlowGraphCompiler* compiler) {
__ CallRuntime(runtime_entry_, num_args);
}
const intptr_t deopt_id = instruction()->deopt_id();
compiler->AddDescriptor(PcDescriptorsLayout::kOther,
compiler->AddDescriptor(UntaggedPcDescriptors::kOther,
compiler->assembler()->CodeSize(), deopt_id,
instruction()->source(), try_index_);
AddMetadataForRuntimeCall(compiler);

View file

@ -619,27 +619,27 @@ class FlowGraphCompiler : public ValueObject {
void GenerateStubCall(const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
intptr_t deopt_id = DeoptId::kNone,
Environment* env = nullptr);
void GeneratePatchableCall(const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs);
void GenerateDartCall(intptr_t deopt_id,
const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
void GenerateStaticDartCall(
intptr_t deopt_id,
const InstructionSource& source,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind = Code::EntryKind::kNormal);
@ -802,7 +802,7 @@ class FlowGraphCompiler : public ValueObject {
// `pending_deoptimization_env`.
void EmitCallsiteMetadata(const InstructionSource& source,
intptr_t deopt_id,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
Environment* env = nullptr);
@ -841,16 +841,16 @@ class FlowGraphCompiler : public ValueObject {
const Array& handler_types,
bool needs_stacktrace);
void SetNeedsStackTrace(intptr_t try_index);
void AddCurrentDescriptor(PcDescriptorsLayout::Kind kind,
void AddCurrentDescriptor(UntaggedPcDescriptors::Kind kind,
intptr_t deopt_id,
const InstructionSource& source);
void AddDescriptor(
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
intptr_t pc_offset,
intptr_t deopt_id,
const InstructionSource& source,
intptr_t try_index,
intptr_t yield_index = PcDescriptorsLayout::kInvalidYieldIndex);
intptr_t yield_index = UntaggedPcDescriptors::kInvalidYieldIndex);
// Add NullCheck information for the current PC.
void AddNullCheck(const InstructionSource& source, const String& name);
@ -952,7 +952,7 @@ class FlowGraphCompiler : public ValueObject {
void AddStubCallTarget(const Code& code);
void AddDispatchTableCallTarget(const compiler::TableSelector* selector);
ArrayPtr edge_counters_array() const { return edge_counters_array_.raw(); }
ArrayPtr edge_counters_array() const { return edge_counters_array_.ptr(); }
ArrayPtr InliningIdToFunction() const;
@ -1134,7 +1134,7 @@ class FlowGraphCompiler : public ValueObject {
void CompactBlocks();
bool IsListClass(const Class& cls) const {
return cls.raw() == list_class_.raw();
return cls.ptr() == list_class_.ptr();
}
void EmitSourceLine(Instruction* instr);

View file

@ -429,7 +429,7 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
void FlowGraphCompiler::GeneratePatchableCall(const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs) {
__ BranchLinkPatchable(stub);
EmitCallsiteMetadata(source, DeoptId::kNone, kind, locs);
@ -438,7 +438,7 @@ void FlowGraphCompiler::GeneratePatchableCall(const InstructionSource& source,
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
@ -448,7 +448,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
const InstructionSource& source,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
@ -476,7 +476,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(const InstructionSource& source,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kOther, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
@ -522,7 +522,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
__ LoadObject(R8, parsed_function().function());
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
__ LoadUniqueObject(R9, ic_data);
GenerateDartCall(deopt_id, source, stub, PcDescriptorsLayout::kIcCall, locs,
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -545,7 +545,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ Call(compiler::FieldAddress(CODE_REG, entry_point_offset));
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kIcCall, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -599,16 +599,16 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
if (try_index == kInvalidTryIndex) {
try_index = CurrentTryIndex();
}
AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(),
AddDescriptor(UntaggedPcDescriptors::kOther, assembler()->CodeSize(),
DeoptId::kNone, source, try_index);
} else if (is_optimizing()) {
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
AddDeoptIndexAtCall(deopt_id_after);
} else {
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
}
RecordCatchEntryMoves(pending_deoptimization_env_, try_index);
__ Drop(args_desc.SizeWithTypeArgs());
@ -655,7 +655,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
__ LoadUniqueObject(R9, data);
CLOBBERS_LR(__ blx(LR));
EmitCallsiteMetadata(source, DeoptId::kNone, PcDescriptorsLayout::kOther,
EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -672,7 +672,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(R9, ic_data);
GenerateDartCall(deopt_id, source, stub,
PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind);
UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
__ Drop(size_with_type_args);
}
@ -695,7 +695,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, source, PcDescriptorsLayout::kOther, locs,
GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
function, entry_kind);
__ Drop(size_with_type_args);
}
@ -744,7 +744,7 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
} else {
__ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
// Stub returns result in flags (result of a cmp, we need Z computed).
__ Drop(1); // Discard constant.
__ Pop(reg); // Restore 'reg'.
@ -768,7 +768,7 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(
} else {
__ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
// Stub returns result in flags (result of a cmp, we need Z computed).
__ Pop(right);
__ Pop(left);

View file

@ -422,7 +422,7 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
void FlowGraphCompiler::GeneratePatchableCall(const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs) {
__ BranchLinkPatchable(stub);
EmitCallsiteMetadata(source, DeoptId::kNone, kind, locs);
@ -431,7 +431,7 @@ void FlowGraphCompiler::GeneratePatchableCall(const InstructionSource& source,
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
@ -441,7 +441,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
const InstructionSource& source,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
@ -469,7 +469,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(const InstructionSource& source,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kOther, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
void FlowGraphCompiler::EmitEdgeCounter(intptr_t edge_id) {
@ -506,7 +506,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
__ LoadObject(R6, parsed_function().function());
__ LoadFromOffset(R0, SP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize);
__ LoadUniqueObject(R5, ic_data);
GenerateDartCall(deopt_id, source, stub, PcDescriptorsLayout::kIcCall, locs,
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -535,7 +535,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ Call(compiler::FieldAddress(CODE_REG, entry_point_offset));
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kIcCall, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -585,16 +585,16 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
if (try_index == kInvalidTryIndex) {
try_index = CurrentTryIndex();
}
AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(),
AddDescriptor(UntaggedPcDescriptors::kOther, assembler()->CodeSize(),
DeoptId::kNone, source, try_index);
} else if (is_optimizing()) {
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
AddDeoptIndexAtCall(deopt_id_after);
} else {
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
}
RecordCatchEntryMoves(pending_deoptimization_env_, try_index);
__ Drop(args_desc.SizeWithTypeArgs());
@ -648,7 +648,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
}
CLOBBERS_LR(__ blr(LR));
EmitCallsiteMetadata(source, DeoptId::kNone, PcDescriptorsLayout::kOther,
EmitCallsiteMetadata(source, DeoptId::kNone, UntaggedPcDescriptors::kOther,
locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -665,7 +665,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(R5, ic_data);
GenerateDartCall(deopt_id, source, stub,
PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind);
UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
__ Drop(size_with_type_args);
}
@ -688,7 +688,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, source, PcDescriptorsLayout::kOther, locs,
GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
function, entry_kind);
__ Drop(size_with_type_args);
}
@ -723,7 +723,7 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
} else {
__ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
// Stub returns result in flags (result of a cmp, we need Z computed).
// Discard constant.
// Restore 'reg'.
@ -747,7 +747,7 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(
} else {
__ BranchLinkPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
// Stub returns result in flags (result of a cmp, we need Z computed).
__ PopPair(right, left);
} else {

View file

@ -249,7 +249,7 @@ SubtypeTestCachePtr FlowGraphCompiler::GenerateCallSubtypeTestStub(
__ Drop(1);
GenerateBoolToJump(TypeTestABI::kSubtypeTestCacheResultReg, is_instance_lbl,
is_not_instance_lbl);
return type_test_cache.raw();
return type_test_cache.ptr();
}
// If instanceof type test cannot be performed successfully at compile time and
@ -364,7 +364,7 @@ void FlowGraphCompiler::GenerateAssertAssignable(
GenerateStubCall(source,
null_safety ? StubCode::TypeIsTopTypeForSubtypingNullSafe()
: StubCode::TypeIsTopTypeForSubtyping(),
PcDescriptorsLayout::kOther, locs, deopt_id);
UntaggedPcDescriptors::kOther, locs, deopt_id);
// TypeTestABI::kSubtypeTestCacheReg is 0 if the type is a top type.
__ BranchIfZero(TypeTestABI::kSubtypeTestCacheReg, &is_assignable,
compiler::Assembler::kNearJump);
@ -372,7 +372,7 @@ void FlowGraphCompiler::GenerateAssertAssignable(
GenerateStubCall(source,
null_safety ? StubCode::NullIsAssignableToTypeNullSafe()
: StubCode::NullIsAssignableToType(),
PcDescriptorsLayout::kOther, locs, deopt_id);
UntaggedPcDescriptors::kOther, locs, deopt_id);
// TypeTestABI::kSubtypeTestCacheReg is 0 if the object is null and is
// assignable.
__ BranchIfZero(TypeTestABI::kSubtypeTestCacheReg, &is_assignable,
@ -539,7 +539,7 @@ void FlowGraphCompiler::EmitCallToStub(const Code& stub) {
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
@ -549,7 +549,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
const InstructionSource& source,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
@ -566,7 +566,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(const InstructionSource& source,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kOther, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
void FlowGraphCompiler::EmitUnoptimizedStaticCall(
@ -581,7 +581,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(ECX, ic_data);
GenerateDartCall(deopt_id, source, stub,
PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind);
UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
__ Drop(size_with_type_args);
}
@ -618,7 +618,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
__ movl(EBX, compiler::Address(
ESP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
__ LoadObject(ECX, ic_data);
GenerateDartCall(deopt_id, source, stub, PcDescriptorsLayout::kIcCall, locs,
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -643,7 +643,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kIcCall, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs);
__ Drop(ic_data.SizeWithTypeArgs());
}
@ -670,7 +670,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
__ call(compiler::FieldAddress(
CODE_REG, Code::entry_point_offset(Code::EntryKind::kMonomorphic)));
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
RecordSafepoint(locs, slow_path_argument_count);
const intptr_t deopt_id_after = DeoptId::ToDeoptAfter(deopt_id);
// Precompilation not implemented on ia32 platform.
@ -680,7 +680,7 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
} else {
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
}
RecordCatchEntryMoves(pending_deoptimization_env_, try_index);
__ Drop(args_desc.SizeWithTypeArgs());
@ -712,7 +712,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, source, PcDescriptorsLayout::kOther, locs,
GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
function, entry_kind);
__ Drop(size_with_type_args);
}
@ -747,7 +747,7 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
} else {
__ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
// Stub returns result in flags (result of a cmpl, we need ZF computed).
__ popl(reg); // Discard constant.
__ popl(reg); // Restore 'reg'.
@ -771,7 +771,7 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(
} else {
__ Call(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
// Stub returns result in flags (result of a cmpl, we need ZF computed).
__ popl(right);
__ popl(left);

View file

@ -425,7 +425,7 @@ void FlowGraphCompiler::EmitTailCallToStub(const Code& stub) {
void FlowGraphCompiler::GeneratePatchableCall(const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs) {
__ CallPatchable(stub);
EmitCallsiteMetadata(source, DeoptId::kNone, kind, locs);
@ -434,7 +434,7 @@ void FlowGraphCompiler::GeneratePatchableCall(const InstructionSource& source,
void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
const InstructionSource& source,
const Code& stub,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
Code::EntryKind entry_kind) {
ASSERT(CanCallDart());
@ -444,7 +444,7 @@ void FlowGraphCompiler::GenerateDartCall(intptr_t deopt_id,
void FlowGraphCompiler::GenerateStaticDartCall(intptr_t deopt_id,
const InstructionSource& source,
PcDescriptorsLayout::Kind kind,
UntaggedPcDescriptors::Kind kind,
LocationSummary* locs,
const Function& target,
Code::EntryKind entry_kind) {
@ -472,7 +472,7 @@ void FlowGraphCompiler::GenerateRuntimeCall(const InstructionSource& source,
intptr_t argument_count,
LocationSummary* locs) {
__ CallRuntime(entry, argument_count);
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kOther, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
}
void FlowGraphCompiler::EmitUnoptimizedStaticCall(
@ -487,7 +487,7 @@ void FlowGraphCompiler::EmitUnoptimizedStaticCall(
StubCode::UnoptimizedStaticCallEntry(ic_data.NumArgsTested());
__ LoadObject(RBX, ic_data);
GenerateDartCall(deopt_id, source, stub,
PcDescriptorsLayout::kUnoptStaticCall, locs, entry_kind);
UntaggedPcDescriptors::kUnoptStaticCall, locs, entry_kind);
__ Drop(size_with_type_args, RCX);
}
@ -525,7 +525,7 @@ void FlowGraphCompiler::EmitOptimizedInstanceCall(
__ movq(RDX, compiler::Address(
RSP, (ic_data.SizeWithoutTypeArgs() - 1) * kWordSize));
__ LoadUniqueObject(RBX, ic_data);
GenerateDartCall(deopt_id, source, stub, PcDescriptorsLayout::kIcCall, locs,
GenerateDartCall(deopt_id, source, stub, UntaggedPcDescriptors::kIcCall, locs,
entry_kind);
__ Drop(ic_data.SizeWithTypeArgs(), RCX);
}
@ -550,7 +550,7 @@ void FlowGraphCompiler::EmitInstanceCallJIT(const Code& stub,
? Code::entry_point_offset(Code::EntryKind::kMonomorphic)
: Code::entry_point_offset(Code::EntryKind::kMonomorphicUnchecked);
__ call(compiler::FieldAddress(CODE_REG, entry_point_offset));
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kIcCall, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kIcCall, locs);
__ Drop(ic_data.SizeWithTypeArgs(), RCX);
}
@ -601,16 +601,16 @@ void FlowGraphCompiler::EmitMegamorphicInstanceCall(
if (try_index == kInvalidTryIndex) {
try_index = CurrentTryIndex();
}
AddDescriptor(PcDescriptorsLayout::kOther, assembler()->CodeSize(),
AddDescriptor(UntaggedPcDescriptors::kOther, assembler()->CodeSize(),
DeoptId::kNone, source, try_index);
} else if (is_optimizing()) {
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
AddDeoptIndexAtCall(deopt_id_after);
} else {
AddCurrentDescriptor(PcDescriptorsLayout::kOther, DeoptId::kNone, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kOther, DeoptId::kNone, source);
// Add deoptimization continuation point after the call and before the
// arguments are removed.
AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id_after, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id_after, source);
}
RecordCatchEntryMoves(pending_deoptimization_env_, try_index);
__ Drop(args_desc.SizeWithTypeArgs(), RCX);
@ -653,7 +653,7 @@ void FlowGraphCompiler::EmitInstanceCallAOT(const ICData& ic_data,
__ LoadUniqueObject(RBX, data);
__ call(RCX);
EmitCallsiteMetadata(source, deopt_id, PcDescriptorsLayout::kOther, locs);
EmitCallsiteMetadata(source, deopt_id, UntaggedPcDescriptors::kOther, locs);
__ Drop(ic_data.SizeWithTypeArgs(), RCX);
}
@ -676,7 +676,7 @@ void FlowGraphCompiler::EmitOptimizedStaticCall(
}
// Do not use the code from the function, but let the code be patched so that
// we can record the outgoing edges to other code.
GenerateStaticDartCall(deopt_id, source, PcDescriptorsLayout::kOther, locs,
GenerateStaticDartCall(deopt_id, source, UntaggedPcDescriptors::kOther, locs,
function, entry_kind);
__ Drop(size_with_type_args, RCX);
}
@ -720,7 +720,7 @@ Condition FlowGraphCompiler::EmitEqualityRegConstCompare(
} else {
__ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
// Stub returns result in flags (result of a cmpq, we need ZF computed).
__ popq(reg); // Discard constant.
__ popq(reg); // Restore 'reg'.
@ -744,7 +744,7 @@ Condition FlowGraphCompiler::EmitEqualityRegRegCompare(
} else {
__ CallPatchable(StubCode::UnoptimizedIdenticalWithNumberCheck());
}
AddCurrentDescriptor(PcDescriptorsLayout::kRuntimeCall, deopt_id, source);
AddCurrentDescriptor(UntaggedPcDescriptors::kRuntimeCall, deopt_id, source);
// Stub returns result in flags (result of a cmpq, we need ZF computed).
__ popq(right);
__ popq(left);

View file

@ -217,7 +217,7 @@ void HierarchyInfo::BuildRangesFor(ClassTable* table,
test_succeeded = cls_type.IsSubtypeOf(dst_type, Heap::kNew);
} else {
while (!cls.IsObjectClass()) {
if (cls.raw() == klass.raw()) {
if (cls.ptr() == klass.ptr()) {
test_succeeded = true;
break;
}
@ -1018,7 +1018,7 @@ LocationSummary* AllocateTypedDataInstr::MakeLocationSummary(Zone* zone,
void AllocateTypedDataInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForTypedData(class_id()));
compiler->GenerateStubCall(source(), stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs());
}
@ -1058,15 +1058,15 @@ Instruction* StoreInstanceFieldInstr::Canonicalize(FlowGraph* flow_graph) {
}
bool GuardFieldClassInstr::AttributesEqual(Instruction* other) const {
return field().raw() == other->AsGuardFieldClass()->field().raw();
return field().ptr() == other->AsGuardFieldClass()->field().ptr();
}
bool GuardFieldLengthInstr::AttributesEqual(Instruction* other) const {
return field().raw() == other->AsGuardFieldLength()->field().raw();
return field().ptr() == other->AsGuardFieldLength()->field().ptr();
}
bool GuardFieldTypeInstr::AttributesEqual(Instruction* other) const {
return field().raw() == other->AsGuardFieldType()->field().raw();
return field().ptr() == other->AsGuardFieldType()->field().ptr();
}
Instruction* AssertSubtypeInstr::Canonicalize(FlowGraph* flow_graph) {
@ -1086,9 +1086,9 @@ Instruction* AssertSubtypeInstr::Canonicalize(FlowGraph* flow_graph) {
? TypeArguments::null_type_arguments()
: TypeArguments::Cast(function_type_arguments()->BoundConstant());
auto& constant_sub_type = AbstractType::Handle(
Z, AbstractType::Cast(sub_type()->BoundConstant()).raw());
Z, AbstractType::Cast(sub_type()->BoundConstant()).ptr());
auto& constant_super_type = AbstractType::Handle(
Z, AbstractType::Cast(super_type()->BoundConstant()).raw());
Z, AbstractType::Cast(super_type()->BoundConstant()).ptr());
ASSERT(!constant_super_type.IsTypeRef());
ASSERT(!constant_sub_type.IsTypeRef());
@ -1132,13 +1132,13 @@ bool LoadFieldInstr::AttributesEqual(Instruction* other) const {
bool LoadStaticFieldInstr::AttributesEqual(Instruction* other) const {
ASSERT(IsFieldInitialized());
return field().raw() == other->AsLoadStaticField()->field().raw();
return field().ptr() == other->AsLoadStaticField()->field().ptr();
}
bool LoadStaticFieldInstr::IsFieldInitialized() const {
const Field& field = this->field();
return (field.StaticValue() != Object::sentinel().raw()) &&
(field.StaticValue() != Object::transition_sentinel().raw());
return (field.StaticValue() != Object::sentinel().ptr()) &&
(field.StaticValue() != Object::transition_sentinel().ptr());
}
Definition* LoadStaticFieldInstr::Canonicalize(FlowGraph* flow_graph) {
@ -1167,7 +1167,7 @@ ConstantInstr::ConstantInstr(const Object& value,
// values, and sentinel values are canonical by construction and so we skip
// them here.
if (!value.IsNull() && !value.IsSmi() && value.IsInstance() &&
!value.IsCanonical() && (value.raw() != Object::sentinel().raw())) {
!value.IsCanonical() && (value.ptr() != Object::sentinel().ptr())) {
// The only allowed type for which IsCanonical() never answers true is
// TypeParameter. (They are treated as canonical due to how they are
// created, but there is no way to canonicalize a new TypeParameter
@ -1200,7 +1200,7 @@ ConstantInstr::ConstantInstr(const Object& value,
bool ConstantInstr::AttributesEqual(Instruction* other) const {
ConstantInstr* other_constant = other->AsConstant();
ASSERT(other_constant != NULL);
return (value().raw() == other_constant->value().raw() &&
return (value().ptr() == other_constant->value().ptr() &&
representation() == other_constant->representation());
}
@ -2880,10 +2880,10 @@ bool LoadFieldInstr::TryEvaluateLoad(const Object& instance,
// Check that instance really has the field which we
// are trying to load from.
Class& cls = Class::Handle(instance.clazz());
while (cls.raw() != Class::null() && cls.raw() != field.Owner()) {
while (cls.ptr() != Class::null() && cls.ptr() != field.Owner()) {
cls = cls.SuperClass();
}
if (cls.raw() != field.Owner()) {
if (cls.ptr() != field.Owner()) {
// Failed to find the field in class or its superclasses.
return false;
}
@ -3070,7 +3070,7 @@ Definition* AssertAssignableInstr::Canonicalize(FlowGraph* flow_graph) {
if (instantiator_type_arguments()->BindsToConstant()) {
const Object& val = instantiator_type_arguments()->BoundConstant();
instantiator_type_args = (val.raw() == TypeArguments::null())
instantiator_type_args = (val.ptr() == TypeArguments::null())
? &TypeArguments::null_type_arguments()
: &TypeArguments::Cast(val);
}
@ -3078,7 +3078,7 @@ Definition* AssertAssignableInstr::Canonicalize(FlowGraph* flow_graph) {
if (function_type_arguments()->BindsToConstant()) {
const Object& val = function_type_arguments()->BoundConstant();
function_type_args =
(val.raw() == TypeArguments::null())
(val.ptr() == TypeArguments::null())
? &TypeArguments::null_type_arguments()
: &TypeArguments::Cast(function_type_arguments()->BoundConstant());
}
@ -3463,10 +3463,10 @@ static Definition* CanonicalizeStrictCompare(StrictCompareInstr* compare,
PassiveObject& constant = PassiveObject::Handle();
Value* other = NULL;
if (compare->right()->BindsToConstant()) {
constant = compare->right()->BoundConstant().raw();
constant = compare->right()->BoundConstant().ptr();
other = compare->left();
} else if (compare->left()->BindsToConstant()) {
constant = compare->left()->BoundConstant().raw();
constant = compare->left()->BoundConstant().ptr();
other = compare->right();
} else {
return compare;
@ -3476,17 +3476,17 @@ static Definition* CanonicalizeStrictCompare(StrictCompareInstr* compare,
Definition* other_defn = other->definition();
Token::Kind kind = compare->kind();
// Handle e === true.
if ((kind == Token::kEQ_STRICT) && (constant.raw() == Bool::True().raw()) &&
if ((kind == Token::kEQ_STRICT) && (constant.ptr() == Bool::True().ptr()) &&
can_merge) {
return other_defn;
}
// Handle e !== false.
if ((kind == Token::kNE_STRICT) && (constant.raw() == Bool::False().raw()) &&
if ((kind == Token::kNE_STRICT) && (constant.ptr() == Bool::False().ptr()) &&
can_merge) {
return other_defn;
}
// Handle e !== true.
if ((kind == Token::kNE_STRICT) && (constant.raw() == Bool::True().raw()) &&
if ((kind == Token::kNE_STRICT) && (constant.ptr() == Bool::True().ptr()) &&
other_defn->IsComparison() && can_merge &&
other_defn->HasOnlyUse(other)) {
ComparisonInstr* comp = other_defn->AsComparison();
@ -3496,7 +3496,7 @@ static Definition* CanonicalizeStrictCompare(StrictCompareInstr* compare,
}
}
// Handle e === false.
if ((kind == Token::kEQ_STRICT) && (constant.raw() == Bool::False().raw()) &&
if ((kind == Token::kEQ_STRICT) && (constant.ptr() == Bool::False().ptr()) &&
other_defn->IsComparison() && can_merge &&
other_defn->HasOnlyUse(other)) {
ComparisonInstr* comp = other_defn->AsComparison();
@ -3902,7 +3902,7 @@ const CallTargets* CallTargets::CreateMonomorphic(Zone* zone,
CallTargets* targets = new (zone) CallTargets(zone);
const intptr_t count = 1;
targets->cid_ranges_.Add(new (zone) TargetInfo(
receiver_cid, receiver_cid, &Function::ZoneHandle(zone, target.raw()),
receiver_cid, receiver_cid, &Function::ZoneHandle(zone, target.ptr()),
count, StaticTypeExactnessState::NotTracking()));
return targets;
}
@ -3950,7 +3950,7 @@ const CallTargets* CallTargets::CreateAndExpand(Zone* zone,
bool class_is_abstract = false;
if (FlowGraphCompiler::LookupMethodFor(i, name, args_desc, &fn,
&class_is_abstract) &&
fn.raw() == target.raw()) {
fn.ptr() == target.ptr()) {
if (!class_is_abstract) {
target_info->cid_start = i;
target_info->exactness = StaticTypeExactnessState::NotTracking();
@ -3980,7 +3980,7 @@ const CallTargets* CallTargets::CreateAndExpand(Zone* zone,
bool class_is_abstract = false;
if (FlowGraphCompiler::LookupMethodFor(i, name, args_desc, &fn,
&class_is_abstract) &&
fn.raw() == target.raw()) {
fn.ptr() == target.ptr()) {
cid_end_including_abstract = i;
if (!class_is_abstract) {
target_info->cid_end = i;
@ -3997,7 +3997,7 @@ const CallTargets* CallTargets::CreateAndExpand(Zone* zone,
if ((cid_end_including_abstract > target_info->cid_end) &&
(idx < length - 1) &&
((cid_end_including_abstract + 1) == targets[idx + 1].cid_start) &&
(target.raw() == targets.TargetAt(idx + 1)->target->raw())) {
(target.ptr() == targets.TargetAt(idx + 1)->target->ptr())) {
target_info->cid_end = cid_end_including_abstract;
target_info->exactness = StaticTypeExactnessState::NotTracking();
}
@ -4019,7 +4019,7 @@ void CallTargets::MergeIntoRanges() {
for (int src = 1; src < length(); src++) {
const Function& target = *TargetAt(dest)->target;
if (TargetAt(dest)->cid_end + 1 >= TargetAt(src)->cid_start &&
target.raw() == TargetAt(src)->target->raw() &&
target.ptr() == TargetAt(src)->target->ptr() &&
!target.is_polymorphic_target()) {
TargetAt(dest)->cid_end = TargetAt(src)->cid_end;
TargetAt(dest)->count += TargetAt(src)->count;
@ -4066,7 +4066,7 @@ LocationSummary* JoinEntryInstr::MakeLocationSummary(Zone* zone,
void JoinEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(compiler->GetJumpLabel(this));
if (!compiler->is_optimizing()) {
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
InstructionSource());
}
if (HasParallelMove()) {
@ -4093,7 +4093,7 @@ void TargetEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The deoptimization descriptor points after the edge counter code for
// uniformity with ARM, where we can reuse pattern matching code that
// matches backwards from the end of the pattern.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
InstructionSource());
}
if (HasParallelMove()) {
@ -4167,7 +4167,7 @@ void FunctionEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// The deoptimization descriptor points after the edge counter code for
// uniformity with ARM, where we can reuse pattern matching code that
// matches backwards from the end of the pattern.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
InstructionSource());
}
if (HasParallelMove()) {
@ -4340,7 +4340,7 @@ void LoadStaticFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& init_static_field_stub = Code::ZoneHandle(
compiler->zone(), object_store->init_static_field_stub());
compiler->GenerateStubCall(source(), init_static_field_stub,
/*kind=*/PcDescriptorsLayout::kOther, locs(),
/*kind=*/UntaggedPcDescriptors::kOther, locs(),
deopt_id());
__ Bind(&no_call);
}
@ -4400,7 +4400,7 @@ void LoadFieldInstr::EmitNativeCodeForInitializerCall(
// so deoptimization environment has to be adjusted.
// This adjustment is done in FlowGraph::AttachEnvironment.
compiler->GenerateStubCall(source(), stub,
/*kind=*/PcDescriptorsLayout::kOther, locs(),
/*kind=*/UntaggedPcDescriptors::kOther, locs(),
deopt_id());
__ Bind(&no_call);
}
@ -4420,7 +4420,7 @@ void ThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Code::ZoneHandle(compiler->zone(), object_store->throw_stub());
compiler->GenerateStubCall(source(), throw_stub,
/*kind=*/PcDescriptorsLayout::kOther, locs(),
/*kind=*/UntaggedPcDescriptors::kOther, locs(),
deopt_id());
// Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint
// instruction: The ThrowInstr will terminate the current block. The very
@ -4448,7 +4448,7 @@ void ReThrowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler->SetNeedsStackTrace(catch_try_index());
compiler->GenerateStubCall(source(), re_throw_stub,
/*kind=*/PcDescriptorsLayout::kOther, locs(),
/*kind=*/UntaggedPcDescriptors::kOther, locs(),
deopt_id());
// Issue(dartbug.com/41353): Right now we have to emit an extra breakpoint
// instruction: The ThrowInstr will terminate the current block. The very
@ -4482,7 +4482,7 @@ void AssertBooleanInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ CompareObject(AssertBooleanABI::kObjectReg, Object::null_instance());
__ BranchIf(NOT_EQUAL, &done);
compiler->GenerateStubCall(source(), assert_boolean_stub,
/*kind=*/PcDescriptorsLayout::kOther, locs(),
/*kind=*/UntaggedPcDescriptors::kOther, locs(),
deopt_id());
__ Bind(&done);
}
@ -4781,11 +4781,11 @@ static CodePtr TwoArgsSmiOpInlineCacheEntry(Token::Kind kind) {
}
switch (kind) {
case Token::kADD:
return StubCode::SmiAddInlineCache().raw();
return StubCode::SmiAddInlineCache().ptr();
case Token::kLT:
return StubCode::SmiLessInlineCache().raw();
return StubCode::SmiLessInlineCache().ptr();
case Token::kEQ:
return StubCode::SmiEqualInlineCache().raw();
return StubCode::SmiEqualInlineCache().ptr();
default:
return Code::null();
}
@ -4857,7 +4857,7 @@ static FunctionPtr FindBinarySmiOp(Zone* zone, const String& name) {
smi_op_target = Resolver::ResolveDynamicAnyArgs(zone, smi_class, demangled);
}
#endif
return smi_op_target.raw();
return smi_op_target.ptr();
}
void InstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
@ -4882,14 +4882,14 @@ void InstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
AbstractType& receivers_static_type = AbstractType::Handle(zone);
if (receivers_static_type_ != nullptr) {
receivers_static_type = receivers_static_type_->raw();
receivers_static_type = receivers_static_type_->ptr();
}
call_ic_data = compiler->GetOrAddInstanceCallICData(
deopt_id(), function_name(), arguments_descriptor,
checked_argument_count(), receivers_static_type, binary_smi_op_target);
} else {
call_ic_data = &ICData::ZoneHandle(zone, ic_data()->raw());
call_ic_data = &ICData::ZoneHandle(zone, ic_data()->ptr());
}
if (compiler->is_optimizing() && HasICData()) {
@ -4907,7 +4907,7 @@ void InstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
} else {
// Unoptimized code.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kRewind, deopt_id(),
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kRewind, deopt_id(),
source());
// If the ICData contains a (Smi, Smi, <binary-smi-op-target>) stub already
@ -4920,7 +4920,7 @@ void InstanceCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
auto& target = Function::Handle();
call_ic_data->GetCheckAt(0, &class_ids, &target);
if (class_ids[0] == kSmiCid && class_ids[1] == kSmiCid &&
target.raw() == binary_smi_op_target.raw()) {
target.ptr() == binary_smi_op_target.ptr()) {
use_specialized_smi_ic_stub = true;
}
}
@ -5036,7 +5036,7 @@ void DispatchTableCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler->EmitDispatchTableCall(cid_reg, selector()->offset,
arguments_descriptor);
compiler->EmitCallsiteMetadata(source(), DeoptId::kNone,
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
if (selector()->called_on_null && !selector()->on_null_interface) {
Value* receiver = ArgumentValueAt(FirstArgIndex());
if (receiver->Type()->is_nullable()) {
@ -5107,7 +5107,7 @@ bool CallTargets::HasSingleRecognizedTarget() const {
bool CallTargets::HasSingleTarget() const {
if (length() == 0) return false;
for (int i = 0; i < length(); i++) {
if (TargetAt(i)->target->raw() != TargetAt(0)->target->raw()) return false;
if (TargetAt(i)->target->ptr() != TargetAt(0)->target->ptr()) return false;
}
return true;
}
@ -5140,7 +5140,7 @@ bool PolymorphicInstanceCallInstr::HasOnlyDispatcherOrImplicitAccessorTargets()
const intptr_t len = targets_.length();
Function& target = Function::Handle();
for (intptr_t i = 0; i < len; i++) {
target = targets_.TargetAt(i)->target->raw();
target = targets_.TargetAt(i)->target->ptr();
if (!target.IsDispatcherOrImplicitAccessor()) {
return false;
}
@ -5176,8 +5176,8 @@ TypePtr PolymorphicInstanceCallInstr::ComputeRuntimeType(
const intptr_t num_checks = targets.length();
for (intptr_t i = 0; i < num_checks; i++) {
ASSERT(targets.TargetAt(i)->target->raw() ==
targets.TargetAt(0)->target->raw());
ASSERT(targets.TargetAt(i)->target->ptr() ==
targets.TargetAt(0)->target->ptr());
const intptr_t start = targets[i].cid_start;
const intptr_t end = targets[i].cid_end;
for (intptr_t cid = start; cid <= end; cid++) {
@ -5319,7 +5319,7 @@ void StaticCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
deopt_id(), function(), arguments_descriptor, num_args_checked,
rebind_rule_);
} else {
call_ic_data = &ICData::ZoneHandle(ic_data()->raw());
call_ic_data = &ICData::ZoneHandle(ic_data()->ptr());
}
ArgumentsInfo args_info(type_args_len(), ArgumentCount(), ArgumentsSize(),
argument_names());
@ -5391,7 +5391,7 @@ void AssertSubtypeInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Drop(5);
#else
compiler->GenerateStubCall(source(), StubCode::AssertSubtype(),
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
#endif
}
@ -5902,7 +5902,7 @@ Definition* StringInterpolateInstr::Canonicalize(FlowGraph* flow_graph) {
// v8 <- StringInterpolate(v2)
// Don't compile-time fold when optimizing the interpolation function itself.
if (flow_graph->function().raw() == CallFunction().raw()) {
if (flow_graph->function().ptr() == CallFunction().ptr()) {
return this;
}

View file

@ -2066,7 +2066,7 @@ class CatchBlockEntryInstr : public BlockEntryWithInitialDefs {
/*stack_depth=*/0),
graph_entry_(graph_entry),
predecessor_(NULL),
catch_handler_types_(Array::ZoneHandle(handler_types.raw())),
catch_handler_types_(Array::ZoneHandle(handler_types.ptr())),
catch_try_index_(catch_try_index),
exception_var_(exception_var),
stacktrace_var_(stacktrace_var),
@ -2972,7 +2972,7 @@ class ReturnInstr : public TemplateInstruction<1, NoThrow> {
ReturnInstr(const InstructionSource& source,
Value* value,
intptr_t deopt_id,
intptr_t yield_index = PcDescriptorsLayout::kInvalidYieldIndex,
intptr_t yield_index = UntaggedPcDescriptors::kInvalidYieldIndex,
Representation representation = kTagged)
: TemplateInstruction(source, deopt_id),
token_pos_(source.token_pos),
@ -5276,7 +5276,7 @@ class RawStoreFieldInstr : public TemplateInstruction<2, NoThrow> {
class DebugStepCheckInstr : public TemplateInstruction<0, NoThrow> {
public:
DebugStepCheckInstr(const InstructionSource& source,
PcDescriptorsLayout::Kind stub_kind,
UntaggedPcDescriptors::Kind stub_kind,
intptr_t deopt_id)
: TemplateInstruction(source, deopt_id),
token_pos_(source.token_pos),
@ -5293,7 +5293,7 @@ class DebugStepCheckInstr : public TemplateInstruction<0, NoThrow> {
private:
const TokenPosition token_pos_;
const PcDescriptorsLayout::Kind stub_kind_;
const UntaggedPcDescriptors::Kind stub_kind_;
DISALLOW_COPY_AND_ASSIGN(DebugStepCheckInstr);
};
@ -5506,7 +5506,7 @@ class GuardFieldLengthInstr : public GuardFieldInstr {
// For a field of static type G<T0, ..., Tn> and a stored value of runtime
// type T checks that type arguments of T at G exactly match <T0, ..., Tn>
// and updates guarded state (FieldLayout::static_type_exactness_state_)
// and updates guarded state (UntaggedField::static_type_exactness_state_)
// accordingly.
//
// See StaticTypeExactnessState for more information.
@ -6138,7 +6138,7 @@ class AllocateObjectInstr : public AllocationInstr {
const Function& closure_function() const { return closure_function_; }
void set_closure_function(const Function& function) {
closure_function_ = function.raw();
closure_function_ = function.ptr();
}
virtual intptr_t InputCount() const {
@ -9613,7 +9613,7 @@ StringPtr TemplateDartCall<kExtraInputs>::Selector() {
if (auto static_call = this->AsStaticCall()) {
return static_call->function().name();
} else if (auto instance_call = this->AsInstanceCall()) {
return instance_call->function_name().raw();
return instance_call->function_name().ptr();
} else {
UNREACHABLE();
}
@ -9621,7 +9621,7 @@ StringPtr TemplateDartCall<kExtraInputs>::Selector() {
inline bool Value::CanBe(const Object& value) {
ConstantInstr* constant = definition()->AsConstant();
return (constant == nullptr) || constant->value().raw() == value.raw();
return (constant == nullptr) || constant->value().ptr() == value.ptr();
}
class SuccessorsIterable {

View file

@ -504,7 +504,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&stack_ok);
#endif
ASSERT(__ constant_pool_allowed());
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
if (yield_index() != UntaggedPcDescriptors::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(source(), yield_index());
}
__ LeaveDartFrameAndReturn(); // Disallows constant pool use.
@ -625,7 +625,7 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
__ blx(R2);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
__ Drop(argument_count);
}
@ -1270,9 +1270,9 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
: compiler::ObjectPoolBuilderEntry::kNotPatchable);
if (link_lazily()) {
compiler->GeneratePatchableCall(source(), *stub,
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
} else {
compiler->GenerateStubCall(source(), *stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), *stub, UntaggedPcDescriptors::kOther,
locs());
}
__ Pop(result);
@ -1315,7 +1315,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// instruction. Therefore we emit the metadata here, 8 bytes (2 instructions)
// after the original mov.
compiler->EmitCallsiteMetadata(InstructionSource(), deopt_id(),
PcDescriptorsLayout::Kind::kOther, locs());
UntaggedPcDescriptors::Kind::kOther, locs());
// Update information in the thread object and enter a safepoint.
if (CanExecuteGeneratedCodeInSafepoint()) {
@ -2359,9 +2359,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -2620,7 +2620,7 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
compiler->SaveLiveRegisters(locs);
compiler->GenerateStubCall(InstructionSource(), // No token position.
stub, PcDescriptorsLayout::kOther, locs);
stub, UntaggedPcDescriptors::kOther, locs);
__ MoveRegister(result_, R0);
compiler->RestoreLiveRegisters(locs);
__ b(exit_label());
@ -2836,9 +2836,9 @@ static void EnsureMutableBox(FlowGraphCompiler* compiler,
}
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
@ -3127,7 +3127,7 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// data area to be initialized.
// R8: null
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(ArrayLayout);
const intptr_t array_size = instance_size - sizeof(UntaggedArray);
__ LoadObject(R8, Object::null_object());
if (num_elements >= 2) {
__ mov(R9, compiler::Operand(R8));
@ -3137,7 +3137,7 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
__ LoadImmediate(R9, 0x1);
#endif // DEBUG
}
__ AddImmediate(R6, R0, sizeof(ArrayLayout) - kHeapObjectTag);
__ AddImmediate(R6, R0, sizeof(UntaggedArray) - kHeapObjectTag);
if (array_size < (kInlineArraySize * compiler::target::kWordSize)) {
__ InitializeFieldsNoBarrierUnrolled(
R0, R6, 0, num_elements * compiler::target::kWordSize, R8, R9);
@ -3180,7 +3180,7 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& allocate_array_stub =
Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
compiler->GenerateStubCall(source(), allocate_array_stub,
PcDescriptorsLayout::kOther, locs(), deopt_id());
UntaggedPcDescriptors::kOther, locs(), deopt_id());
__ Bind(&done);
ASSERT(locs()->out(0).reg() == kResultReg);
}
@ -3266,9 +3266,9 @@ LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
@ -3538,7 +3538,7 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
__ b(&type_arguments_instantiated, EQ);
}
// Lookup cache in stub before calling runtime.
compiler->GenerateStubCall(source(), GetStub(), PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), GetStub(), UntaggedPcDescriptors::kOther,
locs());
__ Bind(&type_arguments_instantiated);
}
@ -3579,7 +3579,7 @@ class AllocateContextSlowPath
compiler->zone(), object_store->allocate_context_stub());
__ LoadImmediate(R1, instruction()->num_context_variables());
compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
PcDescriptorsLayout::kOther, locs);
UntaggedPcDescriptors::kOther, locs);
ASSERT(instruction()->locs()->out(0).reg() == R0);
compiler->RestoreLiveRegisters(instruction()->locs());
__ b(exit_label());
@ -3629,7 +3629,7 @@ void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
__ LoadImmediate(R1, num_context_variables());
compiler->GenerateStubCall(source(), allocate_context_stub,
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
@ -3651,7 +3651,7 @@ void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& clone_context_stub =
Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
compiler->GenerateStubCall(source(), clone_context_stub,
/*kind=*/PcDescriptorsLayout::kOther, locs());
/*kind=*/UntaggedPcDescriptors::kOther, locs());
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
@ -3672,7 +3672,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id);
} else {
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id,
InstructionSource());
}
}
@ -3757,7 +3757,7 @@ class CheckStackOverflowSlowPath
compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
compiler->RecordCatchEntryMoves();
compiler->AddDescriptor(
PcDescriptorsLayout::kOther, compiler->assembler()->CodeSize(),
UntaggedPcDescriptors::kOther, compiler->assembler()->CodeSize(),
instruction()->deopt_id(), instruction()->source(),
compiler->CurrentTryIndex());
} else {
@ -3769,7 +3769,7 @@ class CheckStackOverflowSlowPath
if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
instruction()->deopt_id(),
InstructionSource());
}
@ -3811,7 +3811,7 @@ void CheckStackOverflowInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// the stub above).
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
PcDescriptorsLayout::kOther, locs(),
UntaggedPcDescriptors::kOther, locs(),
extended_env);
return;
}
@ -5010,10 +5010,10 @@ LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
auto object_store = IsolateGroup::Current()->object_store();
const bool stubs_in_vm_isolate =
object_store->allocate_mint_with_fpu_regs_stub()
->ptr()
->untag()
->InVMIsolateHeap() ||
object_store->allocate_mint_without_fpu_regs_stub()
->ptr()
->untag()
->InVMIsolateHeap();
const bool shared_slow_path_call = SlowPathSharingSupported(opt) &&
FLAG_use_bare_instructions &&
@ -5074,7 +5074,7 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(!locs()->live_registers()->ContainsRegister(
AllocateMintABI::kResultReg));
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->GenerateStubCall(source(), stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs(), DeoptId::kNone, extended_env);
} else {
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(),
@ -6631,7 +6631,7 @@ void CheckNullInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// the stub above).
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
PcDescriptorsLayout::kOther, locs(),
UntaggedPcDescriptors::kOther, locs(),
extended_env);
CheckNullInstr::AddMetadataForRuntimeCall(this, compiler);
return;
@ -7529,7 +7529,7 @@ void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
// Add a deoptimization descriptor for deoptimizing instructions that
// may be inserted before this instruction.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
InstructionSource());
}
if (HasParallelMove()) {
@ -7703,7 +7703,7 @@ void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
compiler->GenerateStubCall(source(), stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs());
}

View file

@ -412,7 +412,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&stack_ok);
#endif
ASSERT(__ constant_pool_allowed());
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
if (yield_index() != UntaggedPcDescriptors::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(source(), yield_index());
}
__ LeaveDartFrame(); // Disallows constant pool use.
@ -529,7 +529,7 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
__ blr(R2);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
__ Drop(argument_count);
}
@ -960,7 +960,7 @@ Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int64_t imm = static_cast<int64_t>(right.constant().raw());
const int64_t imm = static_cast<int64_t>(right.constant().ptr());
__ TestImmediate(left, imm);
} else {
__ tst(left, compiler::Operand(right.reg()));
@ -1103,9 +1103,9 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
: ObjectPool::Patchability::kNotPatchable);
if (link_lazily()) {
compiler->GeneratePatchableCall(source(), *stub,
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
} else {
compiler->GenerateStubCall(source(), *stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), *stub, UntaggedPcDescriptors::kOther,
locs());
}
__ Pop(result);
@ -1143,7 +1143,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// instruction.
__ adr(temp, compiler::Immediate(Instr::kInstrSize));
compiler->EmitCallsiteMetadata(source(), deopt_id(),
PcDescriptorsLayout::Kind::kOther, locs());
UntaggedPcDescriptors::Kind::kOther, locs());
__ StoreToOffset(temp, FPREG, kSavedCallerPcSlotFromFp * kWordSize);
@ -2063,9 +2063,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -2309,7 +2309,7 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
compiler->SaveLiveRegisters(locs);
compiler->GenerateStubCall(InstructionSource(), // No token position.
stub, PcDescriptorsLayout::kOther, locs);
stub, UntaggedPcDescriptors::kOther, locs);
__ MoveRegister(result_, R0);
compiler->RestoreLiveRegisters(locs);
__ b(exit_label());
@ -2395,9 +2395,9 @@ LocationSummary* StoreInstanceFieldInstr::MakeLocationSummary(Zone* zone,
}
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
@ -2673,9 +2673,9 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// data area to be initialized.
// R6: null
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(ArrayLayout);
const intptr_t array_size = instance_size - sizeof(UntaggedArray);
__ LoadObject(R6, Object::null_object());
__ AddImmediate(R8, R0, sizeof(ArrayLayout) - kHeapObjectTag);
__ AddImmediate(R8, R0, sizeof(UntaggedArray) - kHeapObjectTag);
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
while (current_offset < array_size) {
@ -2727,7 +2727,7 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& allocate_array_stub =
Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
compiler->GenerateStubCall(source(), allocate_array_stub,
PcDescriptorsLayout::kOther, locs(), deopt_id());
UntaggedPcDescriptors::kOther, locs(), deopt_id());
ASSERT(locs()->out(0).reg() == kResultReg);
__ Bind(&done);
}
@ -2804,9 +2804,9 @@ LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
@ -3051,7 +3051,7 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
}
// Lookup cache in stub before calling runtime.
compiler->GenerateStubCall(source(), GetStub(), PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), GetStub(), UntaggedPcDescriptors::kOther,
locs());
__ Bind(&type_arguments_instantiated);
}
@ -3093,7 +3093,7 @@ class AllocateContextSlowPath
__ LoadImmediate(R1, instruction()->num_context_variables());
compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
PcDescriptorsLayout::kOther, locs);
UntaggedPcDescriptors::kOther, locs);
ASSERT(instruction()->locs()->out(0).reg() == R0);
compiler->RestoreLiveRegisters(instruction()->locs());
__ b(exit_label());
@ -3143,7 +3143,7 @@ void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Code::ZoneHandle(compiler->zone(), object_store->allocate_context_stub());
__ LoadImmediate(R1, num_context_variables());
compiler->GenerateStubCall(source(), allocate_context_stub,
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
@ -3165,7 +3165,7 @@ void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& clone_context_stub =
Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
compiler->GenerateStubCall(source(), clone_context_stub,
/*kind=*/PcDescriptorsLayout::kOther, locs());
/*kind=*/UntaggedPcDescriptors::kOther, locs());
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
@ -3186,7 +3186,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id);
} else {
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id,
InstructionSource());
}
}
@ -3282,7 +3282,7 @@ class CheckStackOverflowSlowPath
compiler->RecordSafepoint(locs, kNumSlowPathArgs);
compiler->RecordCatchEntryMoves();
compiler->AddDescriptor(
PcDescriptorsLayout::kOther, compiler->assembler()->CodeSize(),
UntaggedPcDescriptors::kOther, compiler->assembler()->CodeSize(),
instruction()->deopt_id(), instruction()->source(),
compiler->CurrentTryIndex());
} else {
@ -3294,7 +3294,7 @@ class CheckStackOverflowSlowPath
if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
instruction()->deopt_id(),
InstructionSource());
}
@ -3777,7 +3777,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (locs()->in(1).IsConstant()) {
const Object& constant = locs()->in(1).constant();
ASSERT(constant.IsSmi());
const int64_t imm = static_cast<int64_t>(constant.raw());
const int64_t imm = static_cast<int64_t>(constant.ptr());
switch (op_kind()) {
case Token::kADD: {
if (deopt == NULL) {
@ -4236,10 +4236,10 @@ LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
auto object_store = IsolateGroup::Current()->object_store();
const bool stubs_in_vm_isolate =
object_store->allocate_mint_with_fpu_regs_stub()
->ptr()
->untag()
->InVMIsolateHeap() ||
object_store->allocate_mint_without_fpu_regs_stub()
->ptr()
->untag()
->InVMIsolateHeap();
const bool shared_slow_path_call = SlowPathSharingSupported(opt) &&
FLAG_use_bare_instructions &&
@ -4293,7 +4293,7 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(!locs()->live_registers()->ContainsRegister(
AllocateMintABI::kResultReg));
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->GenerateStubCall(source(), stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs(), DeoptId::kNone, extended_env);
} else {
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
@ -5667,7 +5667,7 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (index_loc.IsConstant()) {
const Register length = length_loc.reg();
const Smi& index = Smi::Cast(index_loc.constant());
__ CompareImmediate(length, static_cast<int64_t>(index.raw()));
__ CompareImmediate(length, static_cast<int64_t>(index.ptr()));
__ b(deopt, LS);
} else if (length_loc.IsConstant()) {
const Smi& length = Smi::Cast(length_loc.constant());
@ -5679,7 +5679,7 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ tst(index, compiler::Operand(index));
__ b(deopt, MI);
} else {
__ CompareImmediate(index, static_cast<int64_t>(length.raw()));
__ CompareImmediate(index, static_cast<int64_t>(length.ptr()));
__ b(deopt, CS);
}
} else {
@ -6567,7 +6567,7 @@ void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
// Add a deoptimization descriptor for deoptimizing instructions that
// may be inserted before this instruction.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
InstructionSource());
}
if (HasParallelMove()) {
@ -6734,7 +6734,7 @@ void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
compiler->GenerateStubCall(source(), stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs());
}

View file

@ -1018,10 +1018,10 @@ ConstantInstr* FlowGraphDeserializer::DeserializeConstant(
DebugStepCheckInstr* FlowGraphDeserializer::DeserializeDebugStepCheck(
SExpList* sexp,
const InstrInfo& info) {
auto kind = PcDescriptorsLayout::kAnyKind;
auto kind = UntaggedPcDescriptors::kAnyKind;
if (auto const kind_sexp = CheckSymbol(Retrieve(sexp, "stub_kind"))) {
if (!PcDescriptorsLayout::ParseKind(kind_sexp->value(), &kind)) {
StoreError(kind_sexp, "not a valid PcDescriptorsLayout::Kind name");
if (!UntaggedPcDescriptors::ParseKind(kind_sexp->value(), &kind)) {
StoreError(kind_sexp, "not a valid UntaggedPcDescriptors::Kind name");
return nullptr;
}
}
@ -1466,7 +1466,7 @@ bool FlowGraphDeserializer::ParseDartValue(SExpression* sexp, Object* out) {
// early if we parse one.
if (sym->Equals("null")) return true;
if (sym->Equals("sentinel")) {
*out = Object::sentinel().raw();
*out = Object::sentinel().ptr();
return true;
}
@ -1478,7 +1478,7 @@ bool FlowGraphDeserializer::ParseDartValue(SExpression* sexp, Object* out) {
StoreError(sym, "not a reference to a constant definition");
return false;
}
*out = val->BoundConstant().raw();
*out = val->BoundConstant().ptr();
// Values used in constant definitions have already been canonicalized,
// so just exit.
return true;
@ -1487,7 +1487,7 @@ bool FlowGraphDeserializer::ParseDartValue(SExpression* sexp, Object* out) {
// Other instance values may need to be canonicalized, so do that before
// returning.
if (auto const b = sexp->AsBool()) {
*out = Bool::Get(b->value()).raw();
*out = Bool::Get(b->value()).ptr();
} else if (auto const str = sexp->AsString()) {
*out = String::New(str->value(), Heap::kOld);
} else if (auto const i = sexp->AsInteger()) {
@ -1651,13 +1651,13 @@ bool FlowGraphDeserializer::ParseFunction(SExpList* list, Object* out) {
auto& function = Function::Cast(*out);
// Check the kind expected by the S-expression if one was specified.
if (auto const kind_sexp = CheckSymbol(list->ExtraLookupValue("kind"))) {
FunctionLayout::Kind kind;
if (!FunctionLayout::ParseKind(kind_sexp->value(), &kind)) {
UntaggedFunction::Kind kind;
if (!UntaggedFunction::ParseKind(kind_sexp->value(), &kind)) {
StoreError(kind_sexp, "unexpected function kind");
return false;
}
if (function.kind() != kind) {
auto const kind_str = FunctionLayout::KindToCString(function.kind());
auto const kind_str = UntaggedFunction::KindToCString(function.kind());
StoreError(list, "retrieved function has kind %s", kind_str);
return false;
}
@ -1697,7 +1697,7 @@ bool FlowGraphDeserializer::ParseFunctionType(SExpList* list, Object* out) {
sig.set_parameter_types(parameter_types);
sig.set_parameter_names(parameter_names);
sig.set_packed_fields(packed_fields);
*out = sig.raw();
*out = sig.ptr();
return true;
}
@ -1770,7 +1770,7 @@ bool FlowGraphDeserializer::ParseInstance(SExpList* list, Object* out) {
StoreError(list, "class for instance has non-final instance fields");
return false;
}
auto& fresh_handle = Field::Handle(zone(), instance_field_.raw());
auto& fresh_handle = Field::Handle(zone(), instance_field_.ptr());
final_fields.Add(&fresh_handle);
}
@ -1850,7 +1850,7 @@ bool FlowGraphDeserializer::ParseType(SExpression* sexp, Object* out) {
StoreError(sexp, "reference to non-constant definition");
return false;
}
*out = val->BoundConstant().raw();
*out = val->BoundConstant().ptr();
if (!out->IsType()) {
StoreError(sexp, "expected Type constant");
return false;
@ -1940,7 +1940,7 @@ bool FlowGraphDeserializer::ParseTypeArguments(SExpression* sexp, Object* out) {
StoreError(sexp, "reference to non-constant definition");
return false;
}
*out = val->BoundConstant().raw();
*out = val->BoundConstant().ptr();
if (!out->IsTypeArguments()) {
StoreError(sexp, "expected TypeArguments constant");
return false;
@ -2052,7 +2052,7 @@ bool FlowGraphDeserializer::ParseCanonicalName(SExpSymbol* sym, Object* obj) {
String::FromUTF8(reinterpret_cast<const uint8_t*>(name), lib_end - name);
name_library_ = Library::LookupLibrary(thread(), tmp_string_);
if (*lib_end == '\0') {
*obj = name_library_.raw();
*obj = name_library_.ptr();
return true;
}
const char* const class_start = lib_end + 1;
@ -2081,7 +2081,7 @@ bool FlowGraphDeserializer::ParseCanonicalName(SExpSymbol* sym, Object* obj) {
return false;
}
if (*class_end == '\0') {
*obj = name_class_.raw();
*obj = name_class_.ptr();
return true;
}
if (*class_end == '.') {
@ -2100,7 +2100,7 @@ bool FlowGraphDeserializer::ParseCanonicalName(SExpSymbol* sym, Object* obj) {
empty_name ? "at top level" : name_class_.ToCString());
return false;
}
*obj = name_field_.raw();
*obj = name_field_.ptr();
return true;
}
if (class_end[1] == '\0') {
@ -2177,7 +2177,7 @@ bool FlowGraphDeserializer::ParseCanonicalName(SExpSymbol* sym, Object* obj) {
}
func_start = func_end + 1;
}
*obj = name_function_.raw();
*obj = name_function_.ptr();
return true;
}

View file

@ -254,7 +254,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ int3();
__ Bind(&done);
#endif
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
if (yield_index() != UntaggedPcDescriptors::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(source(), yield_index());
}
__ LeaveFrame();
@ -448,7 +448,7 @@ void ConstantInstr::EmitMoveToLocation(FlowGraphCompiler* compiler,
} else {
if (compiler::Assembler::IsSafeSmi(value_) || value_.IsNull()) {
__ movl(LocationToStackSlotAddress(destination),
compiler::Immediate(static_cast<int32_t>(value_.raw())));
compiler::Immediate(static_cast<int32_t>(value_.ptr())));
} else {
__ pushl(EAX);
__ LoadObjectSafely(EAX, value_);
@ -834,7 +834,7 @@ Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int32_t imm = static_cast<int32_t>(right.constant().raw());
const int32_t imm = static_cast<int32_t>(right.constant().ptr());
__ testl(left, compiler::Immediate(imm));
} else {
__ testl(left, right.reg());
@ -977,7 +977,7 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const compiler::ExternalLabel label(
reinterpret_cast<uword>(native_c_function()));
__ movl(ECX, compiler::Immediate(label.address()));
compiler->GenerateStubCall(source(), *stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), *stub, UntaggedPcDescriptors::kOther,
locs());
__ popl(result);
@ -1017,7 +1017,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
compiler::Label get_pc;
__ call(&get_pc);
compiler->EmitCallsiteMetadata(InstructionSource(), deopt_id(),
PcDescriptorsLayout::Kind::kOther, locs());
UntaggedPcDescriptors::Kind::kOther, locs());
__ Bind(&get_pc);
__ popl(temp);
__ movl(compiler::Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), temp);
@ -1891,9 +1891,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -2141,7 +2141,7 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
compiler->SaveLiveRegisters(locs);
compiler->GenerateStubCall(InstructionSource(), stub,
PcDescriptorsLayout::kOther, locs);
UntaggedPcDescriptors::kOther, locs);
__ MoveRegister(result_, EAX);
compiler->RestoreLiveRegisters(locs);
__ jmp(exit_label());
@ -2225,9 +2225,9 @@ static void EnsureMutableBox(FlowGraphCompiler* compiler,
}
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
@ -2487,10 +2487,10 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// EDI: iterator which initially points to the start of the variable
// data area to be initialized.
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(ArrayLayout);
const intptr_t array_size = instance_size - sizeof(UntaggedArray);
const compiler::Immediate& raw_null =
compiler::Immediate(static_cast<intptr_t>(Object::null()));
__ leal(EDI, compiler::FieldAddress(EAX, sizeof(ArrayLayout)));
__ leal(EDI, compiler::FieldAddress(EAX, sizeof(UntaggedArray)));
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
__ movl(EBX, raw_null);
@ -2534,7 +2534,7 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& allocate_array_stub =
Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
compiler->GenerateStubCall(source(), allocate_array_stub,
PcDescriptorsLayout::kOther, locs(), deopt_id());
UntaggedPcDescriptors::kOther, locs(), deopt_id());
__ Bind(&done);
ASSERT(locs()->out(0).reg() == kResultReg);
}
@ -2609,9 +2609,9 @@ LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
@ -2847,7 +2847,7 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
__ Bind(&non_null_type_args);
}
// Lookup cache in stub before calling runtime.
compiler->GenerateStubCall(source(), GetStub(), PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), GetStub(), UntaggedPcDescriptors::kOther,
locs());
__ Bind(&type_arguments_instantiated);
}
@ -2885,7 +2885,7 @@ class AllocateContextSlowPath
__ movl(EDX, compiler::Immediate(instruction()->num_context_variables()));
compiler->GenerateStubCall(instruction()->source(),
StubCode::AllocateContext(),
PcDescriptorsLayout::kOther, locs);
UntaggedPcDescriptors::kOther, locs);
ASSERT(instruction()->locs()->out(0).reg() == EAX);
compiler->RestoreLiveRegisters(instruction()->locs());
__ jmp(exit_label());
@ -2933,7 +2933,7 @@ void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ movl(EDX, compiler::Immediate(num_context_variables()));
compiler->GenerateStubCall(source(), StubCode::AllocateContext(),
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
@ -2952,7 +2952,7 @@ void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(locs()->out(0).reg() == EAX);
compiler->GenerateStubCall(source(), StubCode::CloneContext(),
/*kind=*/PcDescriptorsLayout::kOther, locs());
/*kind=*/UntaggedPcDescriptors::kOther, locs());
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
@ -2973,7 +2973,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id);
} else {
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id,
InstructionSource());
}
}
@ -3047,7 +3047,7 @@ class CheckStackOverflowSlowPath
if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
instruction()->deopt_id(),
InstructionSource());
}
@ -5698,17 +5698,17 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ testl(index, index);
__ j(NEGATIVE, deopt);
} else {
__ cmpl(index, compiler::Immediate(static_cast<int32_t>(length.raw())));
__ cmpl(index, compiler::Immediate(static_cast<int32_t>(length.ptr())));
__ j(ABOVE_EQUAL, deopt);
}
} else if (index_loc.IsConstant()) {
const Smi& index = Smi::Cast(index_loc.constant());
if (length_loc.IsStackSlot()) {
const compiler::Address& length = LocationToStackSlotAddress(length_loc);
__ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.raw())));
__ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.ptr())));
} else {
Register length = length_loc.reg();
__ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.raw())));
__ cmpl(length, compiler::Immediate(static_cast<int32_t>(index.ptr())));
}
__ j(BELOW_EQUAL, deopt);
} else if (length_loc.IsStackSlot()) {
@ -6448,7 +6448,7 @@ void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
// Add a deoptimization descriptor for deoptimizing instructions that
// may be inserted before this instruction.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
InstructionSource());
}
if (HasParallelMove()) {
@ -6636,7 +6636,7 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ xorl(ECX, ECX);
__ call(EBX);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
__ Drop(argument_count);
}
@ -6685,7 +6685,7 @@ LocationSummary* AllocateObjectInstr::MakeLocationSummary(Zone* zone,
void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
compiler->GenerateStubCall(source(), stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs());
}

View file

@ -127,7 +127,7 @@ static void PrintTargetsHelper(BaseTextBuffer* f,
const CidRange& range = targets[i];
const auto target_info = targets.TargetAt(i);
const intptr_t count = target_info->count;
target = target_info->target->raw();
target = target_info->target->ptr();
if (i > 0) {
f->AddString(" | ");
}
@ -1059,7 +1059,7 @@ void NativeEntryInstr::PrintTo(BaseTextBuffer* f) const {
void ReturnInstr::PrintOperandsTo(BaseTextBuffer* f) const {
Instruction::PrintOperandsTo(f);
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
if (yield_index() != UntaggedPcDescriptors::kInvalidYieldIndex) {
f->Printf(", yield_index = %" Pd "", yield_index());
}
}
@ -1152,7 +1152,7 @@ void TailCallInstr::PrintOperandsTo(BaseTextBuffer* f) const {
} else {
const Object& owner = Object::Handle(code_.owner());
if (owner.IsFunction()) {
name = Function::Handle(Function::RawCast(owner.raw()))
name = Function::Handle(Function::RawCast(owner.ptr()))
.ToFullyQualifiedCString();
}
}

View file

@ -435,7 +435,7 @@ SExpression* FlowGraphSerializer::AbstractTypeToSExp(const AbstractType& t) {
open_recursive_types_.Insert(hash, &t);
}
if (t.IsFunctionType()) {
const auto& sig = FunctionType::Handle(zone(), FunctionType::Cast(t).raw());
const auto& sig = FunctionType::Handle(zone(), FunctionType::Cast(t).ptr());
AddSymbol(sexp, "FunctionType");
function_type_args_ = sig.type_parameters();
if (auto const ta_sexp = NonEmptyTypeArgumentsToSExp(function_type_args_)) {
@ -584,9 +584,9 @@ SExpression* FlowGraphSerializer::FunctionToSExp(const Function& func) {
AddExtraSymbol(sexp, "native_name", tmp_string_.ToCString());
}
}
if (func.kind() != FunctionLayout::Kind::kRegularFunction ||
if (func.kind() != UntaggedFunction::Kind::kRegularFunction ||
FLAG_verbose_flow_graph_serialization) {
AddExtraSymbol(sexp, "kind", FunctionLayout::KindToCString(func.kind()));
AddExtraSymbol(sexp, "kind", UntaggedFunction::KindToCString(func.kind()));
}
function_type_args_ = func.type_parameters();
if (auto const ta_sexp = NonEmptyTypeArgumentsToSExp(function_type_args_)) {
@ -676,7 +676,7 @@ SExpression* FlowGraphSerializer::ObjectToSExp(const Object& dartval) {
if (dartval.IsNull()) {
return new (zone()) SExpSymbol("null");
}
if (dartval.raw() == Object::sentinel().raw()) {
if (dartval.ptr() == Object::sentinel().ptr()) {
return new (zone()) SExpSymbol("sentinel");
}
if (dartval.IsString()) {
@ -1079,9 +1079,10 @@ void DebugStepCheckInstr::AddExtraInfoToSExpression(
SExpList* sexp,
FlowGraphSerializer* s) const {
Instruction::AddExtraInfoToSExpression(sexp, s);
if (stub_kind_ != PcDescriptorsLayout::kAnyKind ||
if (stub_kind_ != UntaggedPcDescriptors::kAnyKind ||
FLAG_verbose_flow_graph_serialization) {
auto const stub_kind_name = PcDescriptorsLayout::KindToCString(stub_kind_);
auto const stub_kind_name =
UntaggedPcDescriptors::KindToCString(stub_kind_);
ASSERT(stub_kind_name != nullptr);
s->AddExtraSymbol(sexp, "stub_kind", stub_kind_name);
}
@ -1259,8 +1260,8 @@ void InstanceCallBaseInstr::AddExtraInfoToSExpression(
}
} else {
if (interface_target().IsNull() ||
(function_name().raw() != interface_target().name() &&
function_name().raw() != tearoff_interface_target().name())) {
(function_name().ptr() != interface_target().name() &&
function_name().ptr() != tearoff_interface_target().name())) {
s->AddExtraString(sexp, "function_name", function_name().ToCString());
}
}

View file

@ -143,10 +143,10 @@ class FlowGraphSerializer : ValueObject {
static bool ReportStats() { return false; }
static bool IsMatch(const Object& a, const Object& b) {
return a.raw() == b.raw();
return a.ptr() == b.ptr();
}
static uword Hash(const Object& obj) {
if (obj.IsSmi()) return static_cast<uword>(obj.raw());
if (obj.IsSmi()) return static_cast<uword>(obj.ptr());
if (obj.IsInstance()) return Instance::Cast(obj).CanonicalizeHash();
return obj.GetClassId();
}

View file

@ -35,7 +35,7 @@ LibraryPtr LoadTestScript(const char* script,
auto& lib = Library::Handle();
lib ^= Api::UnwrapHandle(api_lib);
EXPECT(!lib.IsNull());
return lib.raw();
return lib.ptr();
}
FunctionPtr GetFunction(const Library& lib, const char* name) {
@ -43,7 +43,7 @@ FunctionPtr GetFunction(const Library& lib, const char* name) {
const auto& func = Function::Handle(lib.LookupFunctionAllowPrivate(
String::Handle(Symbols::New(thread, name))));
EXPECT(!func.IsNull());
return func.raw();
return func.ptr();
}
ClassPtr GetClass(const Library& lib, const char* name) {
@ -51,14 +51,14 @@ ClassPtr GetClass(const Library& lib, const char* name) {
const auto& cls = Class::Handle(
lib.LookupClassAllowPrivate(String::Handle(Symbols::New(thread, name))));
EXPECT(!cls.IsNull());
return cls.raw();
return cls.ptr();
}
TypeParameterPtr GetClassTypeParameter(const Class& klass, const char* name) {
const auto& param = TypeParameter::Handle(
klass.LookupTypeParameter(String::Handle(String::New(name))));
EXPECT(!param.IsNull());
return param.raw();
return param.ptr();
}
TypeParameterPtr GetFunctionTypeParameter(const Function& fun,
@ -67,12 +67,12 @@ TypeParameterPtr GetFunctionTypeParameter(const Function& fun,
const auto& param = TypeParameter::Handle(
fun.LookupTypeParameter(String::Handle(String::New(name)), &fun_level));
EXPECT(!param.IsNull());
return param.raw();
return param.ptr();
}
ObjectPtr Invoke(const Library& lib, const char* name) {
Thread* thread = Thread::Current();
Dart_Handle api_lib = Api::NewHandle(thread, lib.raw());
Dart_Handle api_lib = Api::NewHandle(thread, lib.ptr());
Dart_Handle result;
{
TransitionVMToNative transition(thread);
@ -97,7 +97,7 @@ FlowGraph* TestPipeline::RunPasses(
auto pipeline = CompilationPipeline::New(zone, function_);
parsed_function_ = new (zone)
ParsedFunction(thread, Function::ZoneHandle(zone, function_.raw()));
ParsedFunction(thread, Function::ZoneHandle(zone, function_.ptr()));
pipeline->ParseFunction(parsed_function_);
// Extract type feedback before the graph is built, as the graph

View file

@ -317,7 +317,7 @@ class FlowGraphBuilderHelper {
FunctionType::ZoneHandle(FunctionType::New());
const Function& func = Function::ZoneHandle(Function::New(
signature, String::Handle(Symbols::New(thread, "dummy")),
FunctionLayout::kRegularFunction,
UntaggedFunction::kRegularFunction,
/*is_static=*/true,
/*is_const=*/false,
/*is_abstract=*/false,

View file

@ -326,7 +326,7 @@ void ReturnInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ Bind(&done);
#endif
ASSERT(__ constant_pool_allowed());
if (yield_index() != PcDescriptorsLayout::kInvalidYieldIndex) {
if (yield_index() != UntaggedPcDescriptors::kInvalidYieldIndex) {
compiler->EmitYieldPositionMetadata(source(), yield_index());
}
__ LeaveDartFrame(); // Disallows constant pool use.
@ -899,7 +899,7 @@ Condition TestSmiInstr::EmitComparisonCode(FlowGraphCompiler* compiler,
Location right = locs()->in(1);
if (right.IsConstant()) {
ASSERT(right.constant().IsSmi());
const int64_t imm = static_cast<int64_t>(right.constant().raw());
const int64_t imm = static_cast<int64_t>(right.constant().ptr());
__ TestImmediate(left_reg, compiler::Immediate(imm));
} else {
__ testq(left_reg, right.reg());
@ -1024,7 +1024,7 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadNativeEntry(RBX, &label,
compiler::ObjectPoolBuilderEntry::kPatchable);
compiler->GeneratePatchableCall(source(), *stub,
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
} else {
if (is_bootstrap_native()) {
stub = &StubCode::CallBootstrapNative();
@ -1037,7 +1037,7 @@ void NativeCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
reinterpret_cast<uword>(native_c_function()));
__ LoadNativeEntry(RBX, &label,
compiler::ObjectPoolBuilderEntry::kNotPatchable);
compiler->GenerateStubCall(source(), *stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), *stub, UntaggedPcDescriptors::kOther,
locs());
}
__ popq(result);
@ -1075,7 +1075,7 @@ void FfiCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
// 'movq'.
__ leaq(TMP, compiler::Address::AddressRIPRelative(0));
compiler->EmitCallsiteMetadata(InstructionSource(), deopt_id(),
PcDescriptorsLayout::Kind::kOther, locs());
UntaggedPcDescriptors::Kind::kOther, locs());
__ movq(compiler::Address(FPREG, kSavedCallerPcSlotFromFp * kWordSize), TMP);
if (CanExecuteGeneratedCodeInSafepoint()) {
@ -1519,7 +1519,7 @@ class BoxAllocationSlowPath : public TemplateSlowPathCode<Instruction> {
compiler->SaveLiveRegisters(locs);
compiler->GenerateStubCall(InstructionSource(), // No token position.
stub, PcDescriptorsLayout::kOther, locs);
stub, UntaggedPcDescriptors::kOther, locs);
__ MoveRegister(result_, RAX);
compiler->RestoreLiveRegisters(locs);
__ jmp(exit_label());
@ -2044,9 +2044,9 @@ LocationSummary* GuardFieldClassInstr::MakeLocationSummary(Zone* zone,
}
void GuardFieldClassInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const intptr_t value_cid = value()->Type()->ToCid();
const intptr_t field_cid = field().guarded_cid();
@ -2404,9 +2404,9 @@ static void EnsureMutableBox(FlowGraphCompiler* compiler,
}
void StoreInstanceFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
compiler::Label skip_store;
@ -2694,9 +2694,9 @@ static void InlineArrayAllocation(FlowGraphCompiler* compiler,
// RDI: iterator which initially points to the start of the variable
// data area to be initialized.
if (num_elements > 0) {
const intptr_t array_size = instance_size - sizeof(ArrayLayout);
const intptr_t array_size = instance_size - sizeof(UntaggedArray);
__ LoadObject(R12, Object::null_object());
__ leaq(RDI, compiler::FieldAddress(RAX, sizeof(ArrayLayout)));
__ leaq(RDI, compiler::FieldAddress(RAX, sizeof(UntaggedArray)));
if (array_size < (kInlineArraySize * kWordSize)) {
intptr_t current_offset = 0;
while (current_offset < array_size) {
@ -2747,7 +2747,7 @@ void CreateArrayInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& allocate_array_stub =
Code::ZoneHandle(compiler->zone(), object_store->allocate_array_stub());
compiler->GenerateStubCall(source(), allocate_array_stub,
PcDescriptorsLayout::kOther, locs(), deopt_id());
UntaggedPcDescriptors::kOther, locs(), deopt_id());
__ Bind(&done);
ASSERT(locs()->out(0).reg() == kResultReg);
}
@ -2826,9 +2826,9 @@ LocationSummary* LoadFieldInstr::MakeLocationSummary(Zone* zone,
}
void LoadFieldInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(compiler::target::ObjectLayout::kClassIdTagSize == 16);
ASSERT(sizeof(FieldLayout::guarded_cid_) == 2);
ASSERT(sizeof(FieldLayout::is_nullable_) == 2);
ASSERT(compiler::target::UntaggedObject::kClassIdTagSize == 16);
ASSERT(sizeof(UntaggedField::guarded_cid_) == 2);
ASSERT(sizeof(UntaggedField::is_nullable_) == 2);
const Register instance_reg = locs()->in(0).reg();
if (slot().representation() != kTagged) {
@ -3080,7 +3080,7 @@ void InstantiateTypeArgumentsInstr::EmitNativeCode(
__ Bind(&non_null_type_args);
}
// Lookup cache in stub before calling runtime.
compiler->GenerateStubCall(source(), GetStub(), PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), GetStub(), UntaggedPcDescriptors::kOther,
locs());
__ Bind(&type_arguments_instantiated);
}
@ -3122,7 +3122,7 @@ class AllocateContextSlowPath
__ LoadImmediate(
R10, compiler::Immediate(instruction()->num_context_variables()));
compiler->GenerateStubCall(instruction()->source(), allocate_context_stub,
PcDescriptorsLayout::kOther, locs);
UntaggedPcDescriptors::kOther, locs);
ASSERT(instruction()->locs()->out(0).reg() == RAX);
compiler->RestoreLiveRegisters(instruction()->locs());
__ jmp(exit_label());
@ -3173,7 +3173,7 @@ void AllocateContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ LoadImmediate(R10, compiler::Immediate(num_context_variables()));
compiler->GenerateStubCall(source(), allocate_context_stub,
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
}
LocationSummary* CloneContextInstr::MakeLocationSummary(Zone* zone,
@ -3195,7 +3195,7 @@ void CloneContextInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
const auto& clone_context_stub =
Code::ZoneHandle(compiler->zone(), object_store->clone_context_stub());
compiler->GenerateStubCall(source(), clone_context_stub,
/*kind=*/PcDescriptorsLayout::kOther, locs());
/*kind=*/UntaggedPcDescriptors::kOther, locs());
}
LocationSummary* CatchBlockEntryInstr::MakeLocationSummary(Zone* zone,
@ -3216,7 +3216,7 @@ void CatchBlockEntryInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (compiler->is_optimizing()) {
compiler->AddDeoptIndexAtCall(deopt_id);
} else {
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, deopt_id,
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, deopt_id,
InstructionSource());
}
}
@ -3299,7 +3299,7 @@ class CheckStackOverflowSlowPath
compiler->RecordSafepoint(instruction()->locs(), kNumSlowPathArgs);
compiler->RecordCatchEntryMoves();
compiler->AddDescriptor(
PcDescriptorsLayout::kOther, compiler->assembler()->CodeSize(),
UntaggedPcDescriptors::kOther, compiler->assembler()->CodeSize(),
instruction()->deopt_id(), instruction()->source(),
compiler->CurrentTryIndex());
} else {
@ -3311,7 +3311,7 @@ class CheckStackOverflowSlowPath
if (compiler->isolate_group()->use_osr() && !compiler->is_optimizing() &&
instruction()->in_loop()) {
// In unoptimized code, record loop stack checks as possible OSR entries.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kOsrEntry,
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kOsrEntry,
instruction()->deopt_id(),
InstructionSource());
}
@ -3788,7 +3788,7 @@ void CheckedSmiComparisonInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
static bool CanBeImmediate(const Object& constant) {
return constant.IsSmi() &&
compiler::Immediate(static_cast<int64_t>(constant.raw())).is_int32();
compiler::Immediate(static_cast<int64_t>(constant.ptr())).is_int32();
}
static bool IsSmiValue(const Object& constant, intptr_t value) {
@ -3897,7 +3897,7 @@ void BinarySmiOpInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
if (locs()->in(1).IsConstant()) {
const Object& constant = locs()->in(1).constant();
ASSERT(constant.IsSmi());
const int64_t imm = static_cast<int64_t>(constant.raw());
const int64_t imm = static_cast<int64_t>(constant.ptr());
switch (op_kind()) {
case Token::kADD: {
__ AddImmediate(left, compiler::Immediate(imm));
@ -4507,10 +4507,10 @@ LocationSummary* BoxInt64Instr::MakeLocationSummary(Zone* zone,
auto object_store = IsolateGroup::Current()->object_store();
const bool stubs_in_vm_isolate =
object_store->allocate_mint_with_fpu_regs_stub()
->ptr()
->untag()
->InVMIsolateHeap() ||
object_store->allocate_mint_without_fpu_regs_stub()
->ptr()
->untag()
->InVMIsolateHeap();
const bool shared_slow_path_call = SlowPathSharingSupported(opt) &&
FLAG_use_bare_instructions &&
@ -4564,7 +4564,7 @@ void BoxInt64Instr::EmitNativeCode(FlowGraphCompiler* compiler) {
ASSERT(!locs()->live_registers()->ContainsRegister(
AllocateMintABI::kResultReg));
auto extended_env = compiler->SlowPathEnvironmentFor(this, 0);
compiler->GenerateStubCall(source(), stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs(), DeoptId::kNone, extended_env);
} else {
BoxAllocationSlowPath::Allocate(compiler, this, compiler->mint_class(), out,
@ -5960,7 +5960,7 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
Register length = length_loc.reg();
const Smi& index = Smi::Cast(index_loc.constant());
__ CompareImmediate(length,
compiler::Immediate(static_cast<int64_t>(index.raw())));
compiler::Immediate(static_cast<int64_t>(index.ptr())));
__ j(BELOW_EQUAL, deopt);
} else if (length_loc.IsConstant()) {
const Smi& length = Smi::Cast(length_loc.constant());
@ -5973,7 +5973,7 @@ void CheckArrayBoundInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
__ j(NEGATIVE, deopt);
} else {
__ CompareImmediate(
index, compiler::Immediate(static_cast<int64_t>(length.raw())));
index, compiler::Immediate(static_cast<int64_t>(length.ptr())));
__ j(ABOVE_EQUAL, deopt);
}
} else {
@ -6848,7 +6848,7 @@ void GotoInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
// Add a deoptimization descriptor for deoptimizing instructions that
// may be inserted before this instruction.
compiler->AddCurrentDescriptor(PcDescriptorsLayout::kDeopt, GetDeoptId(),
compiler->AddCurrentDescriptor(UntaggedPcDescriptors::kDeopt, GetDeoptId(),
InstructionSource());
}
if (HasParallelMove()) {
@ -6979,7 +6979,7 @@ void ClosureCallInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
__ call(RCX);
compiler->EmitCallsiteMetadata(source(), deopt_id(),
PcDescriptorsLayout::kOther, locs());
UntaggedPcDescriptors::kOther, locs());
__ Drop(argument_count);
}
@ -7035,7 +7035,7 @@ void AllocateObjectInstr::EmitNativeCode(FlowGraphCompiler* compiler) {
}
const Code& stub = Code::ZoneHandle(
compiler->zone(), StubCode::GetAllocationStubForClass(cls()));
compiler->GenerateStubCall(source(), stub, PcDescriptorsLayout::kOther,
compiler->GenerateStubCall(source(), stub, UntaggedPcDescriptors::kOther,
locs());
}

View file

@ -111,7 +111,7 @@ static bool IsSmiValue(Value* val, intptr_t* int_val) {
static bool IsCallRecursive(const Function& function, Definition* call) {
Environment* env = call->env();
while (env != NULL) {
if (function.raw() == env->function().raw()) {
if (function.ptr() == env->function().ptr()) {
return true;
}
env = env->outer();
@ -421,11 +421,11 @@ class CallSites : public ValueObject {
if (current->IsPolymorphicInstanceCall()) {
PolymorphicInstanceCallInstr* instance_call =
current->AsPolymorphicInstanceCall();
target = instance_call->targets().FirstTarget().raw();
target = instance_call->targets().FirstTarget().ptr();
call = instance_call;
} else if (current->IsStaticCall()) {
StaticCallInstr* static_call = current->AsStaticCall();
target = static_call->function().raw();
target = static_call->function().ptr();
call = static_call;
} else if (current->IsClosureCall()) {
// TODO(srdjan): Add data for closure calls.
@ -1281,7 +1281,7 @@ class CallSiteInliner : public ValueObject {
if (error.IsLanguageError() &&
(LanguageError::Cast(error).kind() == Report::kBailout)) {
if (error.raw() == Object::background_compilation_error().raw()) {
if (error.ptr() == Object::background_compilation_error().ptr()) {
// Fall through to exit the compilation, and retry it later.
} else {
TRACE_INLINING(
@ -1338,7 +1338,7 @@ class CallSiteInliner : public ValueObject {
continue;
}
if ((info.inlined_depth == depth) &&
(info.caller->raw() == caller.raw()) &&
(info.caller->ptr() == caller.ptr()) &&
!Contains(call_instructions_printed, info.call_instr->GetDeoptId())) {
for (int t = 0; t < depth; t++) {
THR_Print(" ");
@ -1357,7 +1357,7 @@ class CallSiteInliner : public ValueObject {
continue;
}
if ((info.inlined_depth == depth) &&
(info.caller->raw() == caller.raw()) &&
(info.caller->ptr() == caller.ptr()) &&
!Contains(call_instructions_printed, info.call_instr->GetDeoptId())) {
for (int t = 0; t < depth; t++) {
THR_Print(" ");
@ -1396,7 +1396,7 @@ class CallSiteInliner : public ValueObject {
// TODO(zerny): Use a hash map for the cache.
for (intptr_t i = 0; i < function_cache_.length(); ++i) {
ParsedFunction* parsed_function = function_cache_[i];
if (parsed_function->function().raw() == function.raw()) {
if (parsed_function->function().ptr() == function.ptr()) {
*in_cache = true;
return parsed_function;
}
@ -1480,7 +1480,7 @@ class CallSiteInliner : public ValueObject {
call->Receiver()->definition()->OriginalDefinition();
if (AllocateObjectInstr* alloc = receiver->AsAllocateObject()) {
if (!alloc->closure_function().IsNull()) {
target = alloc->closure_function().raw();
target = alloc->closure_function().ptr();
ASSERT(alloc->cls().IsClosureClass());
}
} else if (ConstantInstr* constant = receiver->AsConstant()) {
@ -1686,7 +1686,7 @@ intptr_t PolymorphicInliner::AllocateBlockId() const {
// * JoinEntry: the inlined body is shared and this is a subsequent variant.
bool PolymorphicInliner::CheckInlinedDuplicate(const Function& target) {
for (intptr_t i = 0; i < inlined_variants_.length(); ++i) {
if ((target.raw() == inlined_variants_.TargetAt(i)->target->raw()) &&
if ((target.ptr() == inlined_variants_.TargetAt(i)->target->ptr()) &&
!target.is_polymorphic_target()) {
// The call target is shared with a previous inlined variant. Share
// the graph. This requires a join block at the entry, and edge-split
@ -1743,7 +1743,7 @@ bool PolymorphicInliner::CheckInlinedDuplicate(const Function& target) {
bool PolymorphicInliner::CheckNonInlinedDuplicate(const Function& target) {
for (intptr_t i = 0; i < non_inlined_variants_->length(); ++i) {
if (target.raw() == non_inlined_variants_->TargetAt(i)->target->raw()) {
if (target.ptr() == non_inlined_variants_->TargetAt(i)->target->ptr()) {
return true;
}
}
@ -1768,7 +1768,7 @@ bool PolymorphicInliner::TryInliningPoly(const TargetInfo& target_info) {
Array::ZoneHandle(Z, call_->GetArgumentsDescriptor());
InlinedCallData call_data(call_, arguments_descriptor, call_->FirstArgIndex(),
&arguments, caller_function_);
Function& target = Function::ZoneHandle(zone(), target_info.target->raw());
Function& target = Function::ZoneHandle(zone(), target_info.target->ptr());
if (!owner_->TryInlining(target, call_->argument_names(), &call_data,
false)) {
return false;
@ -2257,10 +2257,10 @@ void FlowGraphInliner::SetInliningId(FlowGraph* flow_graph,
// Use function name to determine if inlineable operator.
// Add names as necessary.
static bool IsInlineableOperator(const Function& function) {
return (function.name() == Symbols::IndexToken().raw()) ||
(function.name() == Symbols::AssignIndexToken().raw()) ||
(function.name() == Symbols::Plus().raw()) ||
(function.name() == Symbols::Minus().raw());
return (function.name() == Symbols::IndexToken().ptr()) ||
(function.name() == Symbols::AssignIndexToken().ptr()) ||
(function.name() == Symbols::Plus().ptr()) ||
(function.name() == Symbols::Minus().ptr());
}
bool FlowGraphInliner::FunctionHasPreferInlinePragma(const Function& function) {
@ -2286,7 +2286,7 @@ bool FlowGraphInliner::AlwaysInline(const Function& function) {
// replace them with inline FG before inlining introduces any superfluous
// AssertAssignable instructions.
if (function.IsDispatcherOrImplicitAccessor() &&
!(function.kind() == FunctionLayout::kDynamicInvocationForwarder &&
!(function.kind() == UntaggedFunction::kDynamicInvocationForwarder &&
function.IsRecognized())) {
// Smaller or same size as the call.
return true;
@ -2299,7 +2299,7 @@ bool FlowGraphInliner::AlwaysInline(const Function& function) {
if (function.IsGetterFunction() || function.IsSetterFunction() ||
IsInlineableOperator(function) ||
(function.kind() == FunctionLayout::kConstructor)) {
(function.kind() == UntaggedFunction::kConstructor)) {
const intptr_t count = function.optimized_instruction_count();
if ((count != 0) && (count < FLAG_inline_getters_setters_smaller_than)) {
return true;

View file

@ -1756,7 +1756,7 @@ class LoadOptimizer : public ValueObject {
value = store_static->value();
}
return value != nullptr && value->BindsToConstant() &&
(value->BoundConstant().raw() == Object::sentinel().raw());
(value->BoundConstant().ptr() == Object::sentinel().ptr());
}
// This optimization pass tries to get rid of lazy initializer calls in

View file

@ -364,8 +364,8 @@ bool Slot::Equals(const Slot* other) const {
case Kind::kCapturedVariable:
return (offset_in_bytes_ == other->offset_in_bytes_) &&
(flags_ == other->flags_) &&
(DataAs<const String>()->raw() ==
other->DataAs<const String>()->raw());
(DataAs<const String>()->ptr() ==
other->DataAs<const String>()->ptr());
case Kind::kDartField:
return (offset_in_bytes_ == other->offset_in_bytes_) &&

View file

@ -52,17 +52,18 @@ class ParsedFunction;
// (i.e. initialized once at construction time and does not change after
// that) or like a non-final field.
#define NULLABLE_BOXED_NATIVE_SLOTS_LIST(V) \
V(Function, FunctionLayout, signature, FunctionType, FINAL) \
V(Context, ContextLayout, parent, Context, FINAL) \
V(Closure, ClosureLayout, instantiator_type_arguments, TypeArguments, FINAL) \
V(Closure, ClosureLayout, delayed_type_arguments, TypeArguments, FINAL) \
V(Closure, ClosureLayout, function_type_arguments, TypeArguments, FINAL) \
V(ClosureData, ClosureDataLayout, default_type_arguments, TypeArguments, \
V(Function, UntaggedFunction, signature, FunctionType, FINAL) \
V(Context, UntaggedContext, parent, Context, FINAL) \
V(Closure, UntaggedClosure, instantiator_type_arguments, TypeArguments, \
FINAL) \
V(Type, TypeLayout, arguments, TypeArguments, FINAL) \
V(FunctionType, FunctionTypeLayout, type_parameters, TypeArguments, FINAL) \
V(WeakProperty, WeakPropertyLayout, key, Dynamic, VAR) \
V(WeakProperty, WeakPropertyLayout, value, Dynamic, VAR)
V(Closure, UntaggedClosure, delayed_type_arguments, TypeArguments, FINAL) \
V(Closure, UntaggedClosure, function_type_arguments, TypeArguments, FINAL) \
V(ClosureData, UntaggedClosureData, default_type_arguments, TypeArguments, \
FINAL) \
V(Type, UntaggedType, arguments, TypeArguments, FINAL) \
V(FunctionType, UntaggedFunctionType, type_parameters, TypeArguments, FINAL) \
V(WeakProperty, UntaggedWeakProperty, key, Dynamic, VAR) \
V(WeakProperty, UntaggedWeakProperty, value, Dynamic, VAR)
// The list of slots that correspond to non-nullable boxed fields of native
// objects in the following format:
@ -78,35 +79,35 @@ class ParsedFunction;
// (i.e. initialized once at construction time and does not change after
// that) or like a non-final field.
#define NONNULLABLE_BOXED_NATIVE_SLOTS_LIST(V) \
V(Array, ArrayLayout, length, Smi, FINAL) \
V(Closure, ClosureLayout, function, Function, FINAL) \
V(Closure, ClosureLayout, context, Context, FINAL) \
V(Closure, ClosureLayout, hash, Context, VAR) \
V(ClosureData, ClosureDataLayout, default_type_arguments_info, Smi, FINAL) \
V(Function, FunctionLayout, data, Dynamic, FINAL) \
V(Function, FunctionLayout, parameter_names, Array, FINAL) \
V(FunctionType, FunctionTypeLayout, parameter_types, Array, FINAL) \
V(GrowableObjectArray, GrowableObjectArrayLayout, length, Smi, VAR) \
V(GrowableObjectArray, GrowableObjectArrayLayout, data, Array, VAR) \
V(TypedDataBase, TypedDataBaseLayout, length, Smi, FINAL) \
V(TypedDataView, TypedDataViewLayout, offset_in_bytes, Smi, FINAL) \
V(TypedDataView, TypedDataViewLayout, data, Dynamic, FINAL) \
V(String, StringLayout, length, Smi, FINAL) \
V(LinkedHashMap, LinkedHashMapLayout, index, TypedDataUint32Array, VAR) \
V(LinkedHashMap, LinkedHashMapLayout, data, Array, VAR) \
V(LinkedHashMap, LinkedHashMapLayout, hash_mask, Smi, VAR) \
V(LinkedHashMap, LinkedHashMapLayout, used_data, Smi, VAR) \
V(LinkedHashMap, LinkedHashMapLayout, deleted_keys, Smi, VAR) \
V(ArgumentsDescriptor, ArrayLayout, type_args_len, Smi, FINAL) \
V(ArgumentsDescriptor, ArrayLayout, positional_count, Smi, FINAL) \
V(ArgumentsDescriptor, ArrayLayout, count, Smi, FINAL) \
V(ArgumentsDescriptor, ArrayLayout, size, Smi, FINAL) \
V(PointerBase, PointerBaseLayout, data_field, Dynamic, FINAL) \
V(TypeArguments, TypeArgumentsLayout, length, Smi, FINAL) \
V(TypeParameter, TypeParameterLayout, bound, Dynamic, FINAL) \
V(TypeParameter, TypeParameterLayout, name, Dynamic, FINAL) \
V(UnhandledException, UnhandledExceptionLayout, exception, Dynamic, FINAL) \
V(UnhandledException, UnhandledExceptionLayout, stacktrace, Dynamic, FINAL)
V(Array, UntaggedArray, length, Smi, FINAL) \
V(Closure, UntaggedClosure, function, Function, FINAL) \
V(Closure, UntaggedClosure, context, Context, FINAL) \
V(Closure, UntaggedClosure, hash, Context, VAR) \
V(ClosureData, UntaggedClosureData, default_type_arguments_info, Smi, FINAL) \
V(Function, UntaggedFunction, data, Dynamic, FINAL) \
V(Function, UntaggedFunction, parameter_names, Array, FINAL) \
V(FunctionType, UntaggedFunctionType, parameter_types, Array, FINAL) \
V(GrowableObjectArray, UntaggedGrowableObjectArray, length, Smi, VAR) \
V(GrowableObjectArray, UntaggedGrowableObjectArray, data, Array, VAR) \
V(TypedDataBase, UntaggedTypedDataBase, length, Smi, FINAL) \
V(TypedDataView, UntaggedTypedDataView, offset_in_bytes, Smi, FINAL) \
V(TypedDataView, UntaggedTypedDataView, data, Dynamic, FINAL) \
V(String, UntaggedString, length, Smi, FINAL) \
V(LinkedHashMap, UntaggedLinkedHashMap, index, TypedDataUint32Array, VAR) \
V(LinkedHashMap, UntaggedLinkedHashMap, data, Array, VAR) \
V(LinkedHashMap, UntaggedLinkedHashMap, hash_mask, Smi, VAR) \
V(LinkedHashMap, UntaggedLinkedHashMap, used_data, Smi, VAR) \
V(LinkedHashMap, UntaggedLinkedHashMap, deleted_keys, Smi, VAR) \
V(ArgumentsDescriptor, UntaggedArray, type_args_len, Smi, FINAL) \
V(ArgumentsDescriptor, UntaggedArray, positional_count, Smi, FINAL) \
V(ArgumentsDescriptor, UntaggedArray, count, Smi, FINAL) \
V(ArgumentsDescriptor, UntaggedArray, size, Smi, FINAL) \
V(PointerBase, UntaggedPointerBase, data_field, Dynamic, FINAL) \
V(TypeArguments, UntaggedTypeArguments, length, Smi, FINAL) \
V(TypeParameter, UntaggedTypeParameter, bound, Dynamic, FINAL) \
V(TypeParameter, UntaggedTypeParameter, name, Dynamic, FINAL) \
V(UnhandledException, UntaggedUnhandledException, exception, Dynamic, FINAL) \
V(UnhandledException, UntaggedUnhandledException, stacktrace, Dynamic, FINAL)
// List of slots that correspond to unboxed fields of native objects in the
// following format:
@ -124,10 +125,10 @@ class ParsedFunction;
//
// Note: As the underlying field is unboxed, these slots cannot be nullable.
#define UNBOXED_NATIVE_SLOTS_LIST(V) \
V(Function, FunctionLayout, kind_tag, Uint32, FINAL) \
V(Function, FunctionLayout, packed_fields, Uint32, FINAL) \
V(FunctionType, FunctionTypeLayout, packed_fields, Uint32, FINAL) \
V(TypeParameter, TypeParameterLayout, flags, Uint8, FINAL)
V(Function, UntaggedFunction, kind_tag, Uint32, FINAL) \
V(Function, UntaggedFunction, packed_fields, Uint32, FINAL) \
V(FunctionType, UntaggedFunctionType, packed_fields, Uint32, FINAL) \
V(TypeParameter, UntaggedTypeParameter, flags, Uint8, FINAL)
// For uses that do not need the exact_type (boxed) or representation (unboxed)
// or whether a boxed native slot is nullable. (Generally, such users only need

View file

@ -49,7 +49,7 @@ TEST_CASE(SlotFromGuardedField) {
const FunctionType& signature = FunctionType::ZoneHandle(FunctionType::New());
const Function& dummy_function = Function::ZoneHandle(
Function::New(signature, String::Handle(Symbols::New(thread, "foo")),
FunctionLayout::kRegularFunction, false, false, false,
UntaggedFunction::kRegularFunction, false, false, false,
false, false, dummy_class, TokenPosition::kMinSource));
const Field& field = Field::Handle(
@ -83,8 +83,8 @@ TEST_CASE(SlotFromGuardedField) {
// Check that the field was added (once) to the list of guarded fields.
EXPECT_EQ(1, parsed_function->guarded_fields()->length());
EXPECT_EQ(parsed_function->guarded_fields()->At(0)->raw(),
field_clone_1.raw());
EXPECT_EQ(parsed_function->guarded_fields()->At(0)->ptr(),
field_clone_1.ptr());
// Change the guarded state of the field to "unknown" - emulating concurrent
// modification of the guarded state in mutator) and create a new clone of
@ -100,8 +100,8 @@ TEST_CASE(SlotFromGuardedField) {
const Slot& slot3 = Slot::Get(field_clone_3, parsed_function2);
EXPECT_EQ(&slot1, &slot3);
EXPECT_EQ(1, parsed_function2->guarded_fields()->length());
EXPECT_EQ(parsed_function2->guarded_fields()->At(0)->raw(),
field_clone_1.raw());
EXPECT_EQ(parsed_function2->guarded_fields()->At(0)->ptr(),
field_clone_1.ptr());
}
} // namespace dart

View file

@ -393,7 +393,7 @@ void FlowGraphTypePropagator::VisitBranch(BranchInstr* instr) {
} else if ((is_simple_instance_of || (instance_of != NULL)) &&
comparison->InputAt(1)->BindsToConstant() &&
comparison->InputAt(1)->BoundConstant().IsBool()) {
if (comparison->InputAt(1)->BoundConstant().raw() == Bool::False().raw()) {
if (comparison->InputAt(1)->BoundConstant().ptr() == Bool::False().ptr()) {
negated = !negated;
}
BlockEntryInstr* true_successor =
@ -863,7 +863,7 @@ static bool CanPotentiallyBeSmi(const AbstractType& type, bool recurse) {
// *not* assignable to it (because int implements Comparable<num> and not
// Comparable<int>).
if (type.IsFutureOrType() ||
type.type_class() == CompilerState::Current().ComparableClass().raw()) {
type.type_class() == CompilerState::Current().ComparableClass().ptr()) {
const auto& args = TypeArguments::Handle(Type::Cast(type).arguments());
const auto& arg0 = AbstractType::Handle(args.TypeAt(0));
return !recurse || CanPotentiallyBeSmi(arg0, /*recurse=*/true);
@ -1146,7 +1146,7 @@ CompileType ParameterInstr::ComputeType() const {
// Do not trust static parameter type of 'operator ==' as it is a
// non-nullable Object but VM handles comparison with null in
// the callee, so 'operator ==' can take null as an argument.
if ((function.name() != Symbols::EqualOperator().raw()) &&
if ((function.name() != Symbols::EqualOperator().ptr()) &&
(param->was_type_checked_by_caller() ||
(is_unchecked_entry_param &&
!param->is_explicit_covariant_parameter()))) {
@ -1500,7 +1500,7 @@ CompileType LoadClassIdInstr::ComputeType() const {
CompileType LoadFieldInstr::ComputeType() const {
const AbstractType& field_type = slot().static_type();
CompileType compile_type_cid = slot().ComputeCompileType();
if (field_type.raw() == AbstractType::null()) {
if (field_type.ptr() == AbstractType::null()) {
return compile_type_cid;
}
@ -1749,7 +1749,7 @@ static AbstractTypePtr ExtractElementTypeFromArrayType(
AbstractType::Handle(TypeParameter::Cast(array_type).bound()));
}
if (!array_type.IsType()) {
return Object::dynamic_type().raw();
return Object::dynamic_type().ptr();
}
const intptr_t cid = array_type.type_class_id();
if (cid == kGrowableObjectArrayCid || cid == kArrayCid ||
@ -1759,7 +1759,7 @@ static AbstractTypePtr ExtractElementTypeFromArrayType(
const auto& type_args = TypeArguments::Handle(array_type.arguments());
return type_args.TypeAtNullSafe(Array::kElementTypeTypeArgPos);
}
return Object::dynamic_type().raw();
return Object::dynamic_type().ptr();
}
static AbstractTypePtr GetElementTypeFromArray(Value* array) {
@ -1770,7 +1770,7 @@ static AbstractTypePtr GetElementTypeFromArray(Value* array) {
auto& elem_type = AbstractType::Handle(ExtractElementTypeFromArrayType(
*(array->definition()->Type()->ToAbstractType())));
if (!elem_type.IsDynamicType()) {
return elem_type.raw();
return elem_type.ptr();
}
}
return ExtractElementTypeFromArrayType(*(array->Type()->ToAbstractType()));

View file

@ -172,7 +172,7 @@ ISOLATE_UNIT_TEST_CASE(TypePropagator_Refinement) {
const FunctionType& signature = FunctionType::Handle(FunctionType::New());
const Function& target_func = Function::ZoneHandle(Function::New(
signature, String::Handle(Symbols::New(thread, "dummy2")),
FunctionLayout::kRegularFunction,
UntaggedFunction::kRegularFunction,
/*is_static=*/true,
/*is_const=*/false,
/*is_abstract=*/false,

View file

@ -135,10 +135,10 @@ ISOLATE_UNIT_TEST_CASE(IRTest_TypedDataAOT_NotInlining) {
kMatchReturn,
}));
EXPECT(length_call->Selector() == Symbols::GetLength().raw());
EXPECT(length_call->Selector() == Symbols::GetLength().ptr());
EXPECT(pusharg1->InputAt(0)->definition()->IsParameter());
EXPECT(pusharg2->InputAt(0)->definition()->IsParameter());
EXPECT(index_get_call->Selector() == Symbols::IndexToken().raw());
EXPECT(index_get_call->Selector() == Symbols::IndexToken().ptr());
}
// This test asserts that we are inlining get:length, [] and []= for all typed

Some files were not shown because too many files have changed in this diff Show more