[vm] Ensure the alignment gap is initialized when writing the minimal array truncation filler object.

TEST=ci
Bug: https://github.com/dart-lang/sdk/issues/54495
Change-Id: Iaa9c5ea6ba7312c7ded70eb1c938d8cbb2488e94
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/345362
Reviewed-by: Siva Annamalai <asiva@google.com>
Commit-Queue: Ryan Macnak <rmacnak@google.com>
This commit is contained in:
Ryan Macnak 2024-01-10 17:51:35 +00:00 committed by Commit Queue
parent 6b6f333e00
commit 8938a7e499
3 changed files with 44 additions and 5 deletions

View file

@ -751,6 +751,31 @@ ISOLATE_UNIT_TEST_CASE(ArrayTruncationRaces) {
}
}
// See https://github.com/dart-lang/sdk/issues/54495
ISOLATE_UNIT_TEST_CASE(ArrayTruncationPadding) {
GrowableObjectArray& retain =
GrowableObjectArray::Handle(GrowableObjectArray::New());
Array& array = Array::Handle();
for (intptr_t big = 0; big < 256; big++) {
for (intptr_t small = 0; small < big; small++) {
array = Array::New(big);
// Fill the alignment gap with invalid pointers.
uword addr = UntaggedObject::ToAddr(array.ptr());
for (intptr_t offset = Array::UnroundedSize(big);
offset < Array::InstanceSize(big); offset += sizeof(uword)) {
*reinterpret_cast<uword*>(addr + offset) = kHeapObjectTag;
}
array.Truncate(small);
retain.Add(array);
}
}
IsolateGroup::Current()->heap()->Verify("truncation padding");
}
class ConcurrentForceGrowthScopeTask : public ThreadPool::Task {
public:
ConcurrentForceGrowthScopeTask(IsolateGroup* isolate_group,

View file

@ -1644,11 +1644,12 @@ void Object::MakeUnusedSpaceTraversable(const Object& obj,
// new array length, and so treat it as a pointer. Ensure it is a Smi so
// the marker won't dereference it.
ASSERT((new_tags & kSmiTagMask) == kSmiTag);
raw->untag()->tags_ = new_tags;
intptr_t leftover_len = (leftover_size - TypedData::InstanceSize(0));
ASSERT(TypedData::InstanceSize(leftover_len) == leftover_size);
raw->untag()->set_length(Smi::New(leftover_len));
raw->untag()->set_length<std::memory_order_release>(
Smi::New(leftover_len));
raw->untag()->tags_ = new_tags;
raw->untag()->RecomputeDataField();
} else {
// Update the leftover space as a basic object.
@ -1667,6 +1668,16 @@ void Object::MakeUnusedSpaceTraversable(const Object& obj,
// new array length, and so treat it as a pointer. Ensure it is a Smi so
// the marker won't dereference it.
ASSERT((new_tags & kSmiTagMask) == kSmiTag);
// The array might have an uninitialized alignment gap since the visitors
// for Arrays are precise based on element count, but the visitors for
// Instance are based on the size rounded to the allocation unit, so we
// need to ensure the alignment gap is initialized.
for (intptr_t offset = Instance::UnroundedSize();
offset < Instance::InstanceSize(); offset += sizeof(uword)) {
reinterpret_cast<std::atomic<uword>*>(addr + offset)
->store(0, std::memory_order_release);
}
raw->untag()->tags_ = new_tags;
}
}

View file

@ -8242,6 +8242,7 @@ class Instance : public Object {
// Equivalent to invoking identityHashCode with this instance.
IntegerPtr IdentityHashCode(Thread* thread) const;
static intptr_t UnroundedSize() { return sizeof(UntaggedInstance); }
static intptr_t InstanceSize() {
return RoundedAllocationSize(sizeof(UntaggedInstance));
}
@ -11044,13 +11045,15 @@ class Array : public Instance {
return 0;
}
static constexpr intptr_t InstanceSize(intptr_t len) {
static constexpr intptr_t UnroundedSize(intptr_t len) {
// Ensure that variable length data is not adding to the object length.
ASSERT(sizeof(UntaggedArray) ==
(sizeof(UntaggedInstance) + (2 * kBytesPerElement)));
ASSERT(IsValidLength(len));
return RoundedAllocationSize(sizeof(UntaggedArray) +
(len * kBytesPerElement));
return sizeof(UntaggedArray) + (len * kBytesPerElement);
}
static constexpr intptr_t InstanceSize(intptr_t len) {
return RoundedAllocationSize(UnroundedSize(len));
}
virtual void CanonicalizeFieldsLocked(Thread* thread) const;