mirror of
https://github.com/dart-lang/sdk
synced 2024-11-02 08:44:27 +00:00
[vm] Fix Zone corruption by GrowableArrayStorageTraits::Array.
HashTable<..., GrowableArrayStorageTraits> passes the zone to placement-new of GrowableArrayStorageTraits::Array. Because this class did not extend ZoneAllocated, the default placement-new interpreted the zone as the allocated address and clobbered the beginning of the zone. TEST=build Change-Id: I95805e00b3012f5282f11cfb6da1567f00d56c46 Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/208982 Commit-Queue: Ryan Macnak <rmacnak@google.com> Reviewed-by: Siva Annamalai <asiva@google.com>
This commit is contained in:
parent
e908c9c2e8
commit
479430235c
3 changed files with 43 additions and 37 deletions
|
@ -59,7 +59,7 @@ namespace {
|
|||
// StorageTrait for HashTable which allows to create hash tables backed by
|
||||
// zone memory. Used to compute cluster order for canonical clusters.
|
||||
struct GrowableArrayStorageTraits {
|
||||
class Array {
|
||||
class Array : public ZoneAllocated {
|
||||
public:
|
||||
explicit Array(Zone* zone, intptr_t length)
|
||||
: length_(length), array_(zone->Alloc<ObjectPtr>(length)) {}
|
||||
|
|
|
@ -163,23 +163,23 @@ void Zone::Segment::DecrementMemoryCapacity(uintptr_t size) {
|
|||
// is created within a new thread or ApiNativeScope when calculating high
|
||||
// watermarks or memory consumption.
|
||||
Zone::Zone()
|
||||
: initial_buffer_(buffer_, kInitialChunkSize),
|
||||
position_(initial_buffer_.start()),
|
||||
limit_(initial_buffer_.end()),
|
||||
: canary_(kCanary),
|
||||
position_(reinterpret_cast<uword>(&buffer_)),
|
||||
limit_(position_ + kInitialChunkSize),
|
||||
head_(NULL),
|
||||
large_segments_(NULL),
|
||||
handles_(),
|
||||
previous_(NULL) {
|
||||
previous_(NULL),
|
||||
handles_() {
|
||||
ASSERT(Utils::IsAligned(position_, kAlignment));
|
||||
Segment::IncrementMemoryCapacity(kInitialChunkSize);
|
||||
#ifdef DEBUG
|
||||
// Zap the entire initial buffer.
|
||||
memset(initial_buffer_.pointer(), kZapUninitializedByte,
|
||||
initial_buffer_.size());
|
||||
memset(&buffer_, kZapUninitializedByte, kInitialChunkSize);
|
||||
#endif
|
||||
}
|
||||
|
||||
Zone::~Zone() {
|
||||
ASSERT(canary_ == kCanary);
|
||||
if (FLAG_trace_zones) {
|
||||
DumpZoneSizes();
|
||||
}
|
||||
|
@ -198,10 +198,10 @@ void Zone::DeleteAll() {
|
|||
}
|
||||
// Reset zone state.
|
||||
#ifdef DEBUG
|
||||
memset(initial_buffer_.pointer(), kZapDeletedByte, initial_buffer_.size());
|
||||
memset(&buffer_, kZapDeletedByte, kInitialChunkSize);
|
||||
#endif
|
||||
position_ = initial_buffer_.start();
|
||||
limit_ = initial_buffer_.end();
|
||||
position_ = reinterpret_cast<uword>(&buffer_);
|
||||
limit_ = position_ + kInitialChunkSize;
|
||||
small_segment_capacity_ = 0;
|
||||
head_ = NULL;
|
||||
large_segments_ = NULL;
|
||||
|
@ -215,9 +215,9 @@ uintptr_t Zone::SizeInBytes() const {
|
|||
size += s->size();
|
||||
}
|
||||
if (head_ == NULL) {
|
||||
return size + (position_ - initial_buffer_.start());
|
||||
return size + (position_ - reinterpret_cast<uword>(&buffer_));
|
||||
}
|
||||
size += initial_buffer_.size();
|
||||
size += kInitialChunkSize;
|
||||
for (Segment* s = head_->next(); s != NULL; s = s->next()) {
|
||||
size += s->size();
|
||||
}
|
||||
|
@ -230,9 +230,9 @@ uintptr_t Zone::CapacityInBytes() const {
|
|||
size += s->size();
|
||||
}
|
||||
if (head_ == NULL) {
|
||||
return size + initial_buffer_.size();
|
||||
return size + kInitialChunkSize;
|
||||
}
|
||||
size += initial_buffer_.size();
|
||||
size += kInitialChunkSize;
|
||||
for (Segment* s = head_; s != NULL; s = s->next()) {
|
||||
size += s->size();
|
||||
}
|
||||
|
|
|
@ -141,13 +141,9 @@ class Zone {
|
|||
template <class ElementType>
|
||||
static inline void CheckLength(intptr_t len);
|
||||
|
||||
// This buffer is used for allocation before any segments.
|
||||
// This would act as the initial stack allocated chunk so that we don't
|
||||
// end up calling malloc/free on zone scopes that allocate less than
|
||||
// kChunkSize
|
||||
COMPILE_ASSERT(kAlignment <= 8);
|
||||
ALIGN8 uint8_t buffer_[kInitialChunkSize];
|
||||
MemoryRegion initial_buffer_;
|
||||
// Guard against `new (zone) DoesNotExtendZoneAllocated()`.
|
||||
static constexpr uint64_t kCanary = 0x656e6f7a74726164ull; // "dartzone"
|
||||
uint64_t canary_;
|
||||
|
||||
// The free region in the current (head) segment or the initial buffer is
|
||||
// represented as the half-open interval [position, limit). The 'position'
|
||||
|
@ -169,11 +165,18 @@ class Zone {
|
|||
// List of large segments allocated in this zone; may be NULL.
|
||||
Segment* large_segments_;
|
||||
|
||||
// Used for chaining zones in order to allow unwinding of stacks.
|
||||
Zone* previous_;
|
||||
|
||||
// Structure for managing handles allocation.
|
||||
VMHandles handles_;
|
||||
|
||||
// Used for chaining zones in order to allow unwinding of stacks.
|
||||
Zone* previous_;
|
||||
// This buffer is used for allocation before any segments.
|
||||
// This would act as the initial stack allocated chunk so that we don't
|
||||
// end up calling malloc/free on zone scopes that allocate less than
|
||||
// kChunkSize
|
||||
COMPILE_ASSERT(kAlignment <= 8);
|
||||
ALIGN8 uint8_t buffer_[kInitialChunkSize];
|
||||
|
||||
friend class StackZone;
|
||||
friend class ApiZone;
|
||||
|
@ -274,23 +277,26 @@ inline ElementType* Zone::Realloc(ElementType* old_data,
|
|||
intptr_t new_len) {
|
||||
CheckLength<ElementType>(new_len);
|
||||
const intptr_t kElementSize = sizeof(ElementType);
|
||||
uword old_end = reinterpret_cast<uword>(old_data) + (old_len * kElementSize);
|
||||
// Resize existing allocation if nothing was allocated in between...
|
||||
if (Utils::RoundUp(old_end, kAlignment) == position_) {
|
||||
uword new_end =
|
||||
reinterpret_cast<uword>(old_data) + (new_len * kElementSize);
|
||||
// ...and there is sufficient space.
|
||||
if (new_end <= limit_) {
|
||||
ASSERT(new_len >= old_len);
|
||||
position_ = Utils::RoundUp(new_end, kAlignment);
|
||||
if (old_data != nullptr) {
|
||||
uword old_end =
|
||||
reinterpret_cast<uword>(old_data) + (old_len * kElementSize);
|
||||
// Resize existing allocation if nothing was allocated in between...
|
||||
if (Utils::RoundUp(old_end, kAlignment) == position_) {
|
||||
uword new_end =
|
||||
reinterpret_cast<uword>(old_data) + (new_len * kElementSize);
|
||||
// ...and there is sufficient space.
|
||||
if (new_end <= limit_) {
|
||||
ASSERT(new_len >= old_len);
|
||||
position_ = Utils::RoundUp(new_end, kAlignment);
|
||||
return old_data;
|
||||
}
|
||||
}
|
||||
if (new_len <= old_len) {
|
||||
return old_data;
|
||||
}
|
||||
}
|
||||
if (new_len <= old_len) {
|
||||
return old_data;
|
||||
}
|
||||
ElementType* new_data = Alloc<ElementType>(new_len);
|
||||
if (old_data != 0) {
|
||||
if (old_data != nullptr) {
|
||||
memmove(reinterpret_cast<void*>(new_data),
|
||||
reinterpret_cast<void*>(old_data), old_len * kElementSize);
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue