dart-sdk/runtime/vm/deferred_objects.cc
Tess Strickland 291051e02d [vm/compiler] Add an instruction to adjust unsafe untagged addresses.
ComputeElementAddress takes a base untagged address, an index, an index
scale, and an offset and returns base + (index * scale) + offset as an
untagged address.

This removes the last conversions between untagged and unboxed integers
for untagged GC-movable pointers. The only remaining conversions are
the following cases, which all involve non-GC-movable pointers:

1. Calls to the FFI resolver (on IA32 only).
2. Returning nullptr when a pointer to memory is expected in
   exceptional returns from FFI callbacks.
3. Converting user-provided integers to FFI pointer objects and back.

In addition, we no longer add the data field to materializations of
typed data views, but instead recompute the data field in
DeferredObject::Fill(). This removes the last source of unsafe untagged
pointers that may have arbitrary lifetimes in the flow graph.

Thus, we can now verify in the FlowGraphChecker that there are no
GC-triggering instructions between the creation of an untagged
GC-movable pointer and its use (including the use itself). To do this,
this CL adds a predicate MayCreateUnsafeUntaggedPointer to definitions,
which by default returns true for kUntagged results, false otherwise,
and should only be overwritten in cases where the result can be proven
to not be a untagged GC-movable pointer.

TEST=vm/dart/regress_54710_il_test
     vm/cc/AllocationSinking_NoViewDataMaterialization

Fixes: https://github.com/dart-lang/sdk/issues/54710
Cq-Include-Trybots: luci.dart.try:vm-aot-android-release-arm64c-try,vm-aot-linux-debug-x64-try,vm-aot-linux-debug-x64c-try,vm-aot-mac-release-arm64-try,vm-aot-mac-release-x64-try,vm-aot-obfuscate-linux-release-x64-try,vm-aot-optimization-level-linux-release-x64-try,vm-appjit-linux-debug-x64-try,vm-asan-linux-release-x64-try,vm-checked-mac-release-arm64-try,vm-eager-optimization-linux-release-ia32-try,vm-eager-optimization-linux-release-x64-try,vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64c-try,vm-ffi-qemu-linux-release-arm-try,vm-ffi-qemu-linux-release-riscv64-try,vm-fuchsia-release-x64-try,vm-linux-debug-ia32-try,vm-linux-debug-x64c-try,vm-mac-debug-arm64-try,vm-mac-debug-x64-try,vm-msan-linux-release-x64-try,vm-reload-linux-debug-x64-try,vm-reload-rollback-linux-debug-x64-try,vm-ubsan-linux-release-x64-try,vm-win-release-ia32-try
Change-Id: Ie172a8bd0330a728a4f151478664a530f8d9b38a
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/354862
Commit-Queue: Tess Strickland <sstrickl@google.com>
Reviewed-by: Alexander Markov <alexmarkov@google.com>
2024-03-22 19:03:31 +00:00

527 lines
20 KiB
C++

// Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
// for details. All rights reserved. Use of this source code is governed by a
// BSD-style license that can be found in the LICENSE file.
#if !defined(DART_PRECOMPILED_RUNTIME)
#include "vm/deferred_objects.h"
#include "vm/code_patcher.h"
#include "vm/compiler/jit/compiler.h"
#include "vm/deopt_instructions.h"
#include "vm/flags.h"
#include "vm/object.h"
#include "vm/object_store.h"
namespace dart {
DECLARE_FLAG(bool, trace_deoptimization);
DECLARE_FLAG(bool, trace_deoptimization_verbose);
void DeferredDouble::Materialize(DeoptContext* deopt_context) {
DoublePtr* double_slot = reinterpret_cast<DoublePtr*>(slot());
*double_slot = Double::New(value());
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing double at %" Px ": %g\n",
reinterpret_cast<uword>(slot()), value());
}
}
void DeferredMint::Materialize(DeoptContext* deopt_context) {
MintPtr* mint_slot = reinterpret_cast<MintPtr*>(slot());
ASSERT(!Smi::IsValid(value()));
Mint& mint = Mint::Handle();
mint ^= Integer::New(value());
*mint_slot = mint.ptr();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing mint at %" Px ": %" Pd64 "\n",
reinterpret_cast<uword>(slot()), value());
}
}
void DeferredFloat32x4::Materialize(DeoptContext* deopt_context) {
Float32x4Ptr* float32x4_slot = reinterpret_cast<Float32x4Ptr*>(slot());
Float32x4Ptr raw_float32x4 = Float32x4::New(value());
*float32x4_slot = raw_float32x4;
if (FLAG_trace_deoptimization_verbose) {
float x = raw_float32x4->untag()->x();
float y = raw_float32x4->untag()->y();
float z = raw_float32x4->untag()->z();
float w = raw_float32x4->untag()->w();
OS::PrintErr("materializing Float32x4 at %" Px ": %g,%g,%g,%g\n",
reinterpret_cast<uword>(slot()), x, y, z, w);
}
}
void DeferredFloat64x2::Materialize(DeoptContext* deopt_context) {
Float64x2Ptr* float64x2_slot = reinterpret_cast<Float64x2Ptr*>(slot());
Float64x2Ptr raw_float64x2 = Float64x2::New(value());
*float64x2_slot = raw_float64x2;
if (FLAG_trace_deoptimization_verbose) {
double x = raw_float64x2->untag()->x();
double y = raw_float64x2->untag()->y();
OS::PrintErr("materializing Float64x2 at %" Px ": %g,%g\n",
reinterpret_cast<uword>(slot()), x, y);
}
}
void DeferredInt32x4::Materialize(DeoptContext* deopt_context) {
Int32x4Ptr* int32x4_slot = reinterpret_cast<Int32x4Ptr*>(slot());
Int32x4Ptr raw_int32x4 = Int32x4::New(value());
*int32x4_slot = raw_int32x4;
if (FLAG_trace_deoptimization_verbose) {
uint32_t x = raw_int32x4->untag()->x();
uint32_t y = raw_int32x4->untag()->y();
uint32_t z = raw_int32x4->untag()->z();
uint32_t w = raw_int32x4->untag()->w();
OS::PrintErr("materializing Int32x4 at %" Px ": %x,%x,%x,%x\n",
reinterpret_cast<uword>(slot()), x, y, z, w);
}
}
void DeferredObjectRef::Materialize(DeoptContext* deopt_context) {
DeferredObject* obj = deopt_context->GetDeferredObject(index());
*slot() = obj->object();
if (FLAG_trace_deoptimization_verbose) {
const Class& cls = Class::Handle(IsolateGroup::Current()->class_table()->At(
Object::Handle(obj->object()).GetClassId()));
OS::PrintErr("writing instance of class %s ref at %" Px ".\n",
cls.ToCString(), reinterpret_cast<uword>(slot()));
}
}
void DeferredRetAddr::Materialize(DeoptContext* deopt_context) {
Thread* thread = deopt_context->thread();
Zone* zone = deopt_context->zone();
Function& function = Function::Handle(zone);
function ^= deopt_context->ObjectAt(index_);
const Error& error =
Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, function));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
const Code& code = Code::Handle(zone, function.unoptimized_code());
uword continue_at_pc =
code.GetPcForDeoptId(deopt_id_, UntaggedPcDescriptors::kDeopt);
if (continue_at_pc == 0) {
FATAL("Can't locate continuation PC for deoptid %" Pd " within %s\n",
deopt_id_, function.ToFullyQualifiedCString());
}
uword* dest_addr = reinterpret_cast<uword*>(slot());
*dest_addr = continue_at_pc;
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing return addr at 0x%" Px ": 0x%" Px "\n",
reinterpret_cast<uword>(slot()), continue_at_pc);
}
uword pc = code.GetPcForDeoptId(deopt_id_, UntaggedPcDescriptors::kIcCall);
if (pc != 0) {
// If the deoptimization happened at an IC call, update the IC data
// to avoid repeated deoptimization at the same site next time around.
// We cannot use CodePatcher::GetInstanceCallAt because the call site
// may have switched to from referencing an ICData to a target Code or
// MegamorphicCache.
ICData& ic_data = ICData::Handle(zone, function.FindICData(deopt_id_));
ic_data.AddDeoptReason(deopt_context->deopt_reason());
// Propagate the reason to all ICData-s with same deopt_id since
// only unoptimized-code ICData (IC calls) are propagated.
function.SetDeoptReasonForAll(ic_data.deopt_id(),
deopt_context->deopt_reason());
} else {
if (deopt_context->HasDeoptFlag(ICData::kHoisted)) {
// Prevent excessive deoptimization.
function.SetProhibitsInstructionHoisting(true);
}
if (deopt_context->HasDeoptFlag(ICData::kGeneralized)) {
function.SetProhibitsBoundsCheckGeneralization(true);
}
}
}
void DeferredPcMarker::Materialize(DeoptContext* deopt_context) {
Thread* thread = deopt_context->thread();
Zone* zone = deopt_context->zone();
uword* dest_addr = reinterpret_cast<uword*>(slot());
Function& function = Function::Handle(zone);
function ^= deopt_context->ObjectAt(index_);
ASSERT(!function.IsNull());
SafepointWriteRwLocker ml(thread, thread->isolate_group()->program_lock());
const Error& error =
Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, function));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
const Code& code = Code::Handle(zone, function.unoptimized_code());
ASSERT(!code.IsNull());
ASSERT(function.HasCode());
*reinterpret_cast<ObjectPtr*>(dest_addr) = code.ptr();
if (FLAG_trace_deoptimization_verbose) {
THR_Print("materializing pc marker at 0x%" Px ": %s, %s\n",
reinterpret_cast<uword>(slot()), code.ToCString(),
function.ToCString());
}
// Increment the deoptimization counter. This effectively increments each
// function occurring in the optimized frame.
if (deopt_context->deoptimizing_code()) {
function.set_deoptimization_counter(function.deoptimization_counter() + 1);
}
if (FLAG_trace_deoptimization || FLAG_trace_deoptimization_verbose) {
THR_Print("Deoptimizing '%s' (count %d)\n",
function.ToFullyQualifiedCString(),
function.deoptimization_counter());
}
// Clear invocation counter so that hopefully the function gets reoptimized
// only after more feedback has been collected.
function.SetUsageCounter(0);
if (function.HasOptimizedCode()) {
function.SwitchToUnoptimizedCode();
}
}
void DeferredPp::Materialize(DeoptContext* deopt_context) {
Thread* thread = deopt_context->thread();
Zone* zone = deopt_context->zone();
Function& function = Function::Handle(zone);
function ^= deopt_context->ObjectAt(index_);
ASSERT(!function.IsNull());
const Error& error =
Error::Handle(zone, Compiler::EnsureUnoptimizedCode(thread, function));
if (!error.IsNull()) {
Exceptions::PropagateError(error);
}
const Code& code = Code::Handle(zone, function.unoptimized_code());
ASSERT(!code.IsNull());
ASSERT(code.GetObjectPool() != Object::null());
*slot() = code.GetObjectPool();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing pp at 0x%" Px ": 0x%" Px "\n",
reinterpret_cast<uword>(slot()),
static_cast<uword>(code.GetObjectPool()));
}
}
ObjectPtr DeferredObject::object() {
if (object_ == nullptr) {
Create();
}
return object_->ptr();
}
void DeferredObject::Create() {
if (object_ != nullptr) {
return;
}
Class& cls = Class::Handle();
cls ^= GetClass();
switch (cls.id()) {
case kContextCid: {
const intptr_t num_variables =
Smi::Cast(Object::Handle(GetLengthOrShape())).Value();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(
"materializing context of length %" Pd " (%" Px ", %" Pd " vars)\n",
num_variables, reinterpret_cast<uword>(args_), field_count_);
}
object_ = &Context::ZoneHandle(Context::New(num_variables));
} break;
case kArrayCid: {
const intptr_t num_elements =
Smi::Cast(Object::Handle(GetLengthOrShape())).Value();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing array of length %" Pd " (%" Px ", %" Pd
" elements)\n",
num_elements, reinterpret_cast<uword>(args_),
field_count_);
}
object_ = &Array::ZoneHandle(Array::New(num_elements));
} break;
case kRecordCid: {
const RecordShape shape(Smi::RawCast(GetLengthOrShape()));
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(
"materializing record of shape %" Px " (%" Px ", %" Pd " fields)\n",
shape.AsInt(), reinterpret_cast<uword>(args_), field_count_);
}
object_ = &Record::ZoneHandle(Record::New(shape));
} break;
default:
if (IsTypedDataClassId(cls.id())) {
const intptr_t num_elements =
Smi::Cast(Object::Handle(GetLengthOrShape())).Value();
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr("materializing typed data cid %" Pd " of length %" Pd
" (%" Px ", %" Pd " elements)\n",
cls.id(), num_elements, reinterpret_cast<uword>(args_),
field_count_);
}
object_ =
&TypedData::ZoneHandle(TypedData::New(cls.id(), num_elements));
} else {
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(
"materializing instance of %s (%" Px ", %" Pd " fields)\n",
cls.ToCString(), reinterpret_cast<uword>(args_), field_count_);
}
object_ = &Instance::ZoneHandle(Instance::New(cls));
}
}
}
static intptr_t ToContextIndex(intptr_t offset_in_bytes) {
intptr_t result = (offset_in_bytes - Context::variable_offset(0)) /
Context::kBytesPerElement;
ASSERT(result >= 0);
return result;
}
void DeferredObject::Fill() {
Create(); // Ensure instance is created.
Class& cls = Class::Handle();
cls ^= GetClass();
switch (cls.id()) {
case kContextCid: {
const Context& context = Context::Cast(*object_);
Smi& offset = Smi::Handle();
Object& value = Object::Handle();
for (intptr_t i = 0; i < field_count_; i++) {
offset ^= GetFieldOffset(i);
if (offset.Value() == Context::parent_offset()) {
// Copy parent.
Context& parent = Context::Handle();
parent ^= GetValue(i);
context.set_parent(parent);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" ctx@parent (offset %" Pd ") <- %s\n",
offset.Value(), parent.ToCString());
}
} else {
intptr_t context_index = ToContextIndex(offset.Value());
value = GetValue(i);
context.SetAt(context_index, value);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" ctx@%" Pd " (offset %" Pd ") <- %s\n",
context_index, offset.Value(), value.ToCString());
}
}
}
} break;
case kArrayCid: {
const Array& array = Array::Cast(*object_);
Smi& offset = Smi::Handle();
Object& value = Object::Handle();
for (intptr_t i = 0; i < field_count_; i++) {
offset ^= GetFieldOffset(i);
if (offset.Value() == Array::type_arguments_offset()) {
TypeArguments& type_args = TypeArguments::Handle();
type_args ^= GetValue(i);
array.SetTypeArguments(type_args);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" array@type_args (offset %" Pd ") <- %s\n",
offset.Value(), type_args.ToCString());
}
} else {
const intptr_t index = Array::index_at_offset(offset.Value());
value = GetValue(i);
array.SetAt(index, value);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" array@%" Pd " (offset %" Pd ") <- %s\n", index,
offset.Value(), value.ToCString());
}
}
}
} break;
case kPointerCid: {
auto* const zone = Thread::Current()->zone();
const int kDataIndex = 0;
const int kTypeArgIndex = 1;
ASSERT(field_count_ == 2);
ASSERT(Smi::Cast(Object::Handle(zone, GetFieldOffset(kDataIndex)))
.AsInt64Value() == PointerBase::data_offset());
ASSERT(Smi::Cast(Object::Handle(zone, GetFieldOffset(kTypeArgIndex)))
.AsInt64Value() == Pointer::type_arguments_offset());
const auto& pointer = Pointer::Cast(*object_);
const size_t address =
Integer::Cast(Object::Handle(zone, GetValue(kDataIndex)))
.AsInt64Value();
pointer.SetNativeAddress(address);
const auto& type_args = TypeArguments::Handle(
zone, IsolateGroup::Current()->object_store()->type_argument_never());
pointer.SetTypeArguments(type_args);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" pointer@data <- 0x%" Px "\n", address);
OS::PrintErr(" pointer@type_args <- %s\n", type_args.ToCString());
}
} break;
case kRecordCid: {
const Record& record = Record::Cast(*object_);
Smi& offset = Smi::Handle();
Object& value = Object::Handle();
for (intptr_t i = 0; i < field_count_; i++) {
offset ^= GetFieldOffset(i);
const intptr_t index = Record::field_index_at_offset(offset.Value());
value = GetValue(i);
record.SetFieldAt(index, value);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" record@%" Pd " (offset %" Pd ") <- %s\n", index,
offset.Value(), value.ToCString());
}
}
} break;
default:
if (IsTypedDataClassId(cls.id())) {
const TypedData& typed_data = TypedData::Cast(*object_);
Smi& offset = Smi::Handle();
Object& value = Object::Handle();
const auto cid = cls.id();
for (intptr_t i = 0; i < field_count_; i++) {
offset ^= GetFieldOffset(i);
const intptr_t element_offset = offset.Value();
value = GetValue(i);
switch (cid) {
case kTypedDataInt8ArrayCid:
typed_data.SetInt8(
element_offset,
static_cast<int8_t>(Integer::Cast(value).AsInt64Value()));
break;
case kTypedDataUint8ArrayCid:
case kTypedDataUint8ClampedArrayCid:
typed_data.SetUint8(
element_offset,
static_cast<uint8_t>(Integer::Cast(value).AsInt64Value()));
break;
case kTypedDataInt16ArrayCid:
typed_data.SetInt16(
element_offset,
static_cast<int16_t>(Integer::Cast(value).AsInt64Value()));
break;
case kTypedDataUint16ArrayCid:
typed_data.SetUint16(
element_offset,
static_cast<uint16_t>(Integer::Cast(value).AsInt64Value()));
break;
case kTypedDataInt32ArrayCid:
typed_data.SetInt32(
element_offset,
static_cast<int32_t>(Integer::Cast(value).AsInt64Value()));
break;
case kTypedDataUint32ArrayCid:
typed_data.SetUint32(
element_offset,
static_cast<uint32_t>(Integer::Cast(value).AsInt64Value()));
break;
case kTypedDataInt64ArrayCid:
typed_data.SetInt64(element_offset,
Integer::Cast(value).AsInt64Value());
break;
case kTypedDataUint64ArrayCid:
typed_data.SetUint64(
element_offset,
static_cast<uint64_t>(Integer::Cast(value).AsInt64Value()));
break;
case kTypedDataFloat32ArrayCid:
typed_data.SetFloat32(
element_offset,
static_cast<float>(Double::Cast(value).value()));
break;
case kTypedDataFloat64ArrayCid:
typed_data.SetFloat64(element_offset,
Double::Cast(value).value());
break;
case kTypedDataFloat32x4ArrayCid:
typed_data.SetFloat32x4(element_offset,
Float32x4::Cast(value).value());
break;
case kTypedDataInt32x4ArrayCid:
typed_data.SetInt32x4(element_offset,
Int32x4::Cast(value).value());
break;
case kTypedDataFloat64x2ArrayCid:
typed_data.SetFloat64x2(element_offset,
Float64x2::Cast(value).value());
break;
default:
UNREACHABLE();
}
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" typed_data (offset %" Pd ") <- %s\n",
element_offset, value.ToCString());
}
}
} else {
const Instance& obj = Instance::Cast(*object_);
Smi& offset = Smi::Handle();
Field& field = Field::Handle();
Object& value = Object::Handle();
const Array& offset_map = Array::Handle(cls.OffsetToFieldMap());
for (intptr_t i = 0; i < field_count_; i++) {
offset ^= GetFieldOffset(i);
field ^= offset_map.At(offset.Value() / kCompressedWordSize);
value = GetValue(i);
ASSERT((value.ptr() != Object::sentinel().ptr()) ||
(!field.IsNull() && field.is_late()));
if (!field.IsNull() && (value.ptr() != Object::sentinel().ptr())) {
obj.SetField(field, value);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(" %s <- %s\n",
String::Handle(field.name()).ToCString(),
value.ToCString());
}
} else {
// In addition to the type arguments vector we can also have lazy
// materialization of e.g. _ByteDataView objects which don't have
// explicit fields in Dart (all accesses to the fields are done via
// recognized native methods).
ASSERT(offset.Value() < cls.host_instance_size());
obj.SetFieldAtOffset(offset.Value(), value);
if (FLAG_trace_deoptimization_verbose) {
OS::PrintErr(
" %s @ offset(%" Pd ") <- %s\n",
(field.IsNull() ? "null Field"
: String::Handle(field.name()).ToCString()),
offset.Value(), value.ToCString());
}
}
}
if (obj.IsTypedDataView()) {
// The data field does not get materialized for typed data views
// because it is not a safe untagged pointer and must be recomputed.
TypedDataView::Cast(obj).RecomputeDataField();
}
}
break;
}
}
} // namespace dart
#endif // !defined(DART_PRECOMPILED_RUNTIME)