[Cleanup] Simplifies use_slow_path handling to remove unreachable asm.

As a side-effect this should result in slightly less stub code when
--use_slow_path is passed.

Note:
Most of the delta is indentation of code now enclosed in:
if (!FLAG_use_slow_path) {...}
Functionally this hoists the flag check out to simply not emit the ASM
that would otherwise ultimately get unconditionally jmp'd over.

Cq-Include-Trybots: luci.dart.try:vm-ffi-android-debug-arm-try,vm-ffi-android-debug-arm64-try
Change-Id: I22dad5a33bbd66e0e5ab50517e9d96a278383479
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/137781
Commit-Queue: Clement Skau <cskau@google.com>
Reviewed-by: Daco Harkes <dacoharkes@google.com>
This commit is contained in:
Clement Skau 2020-03-06 12:44:44 +00:00 committed by commit-bot@chromium.org
parent b681bfd8d2
commit b0b8304b87
4 changed files with 449 additions and 477 deletions

View file

@ -1046,106 +1046,106 @@ void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
// R2: array length as Smi (must be preserved).
// The newly allocated object is returned in R0.
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
// RoundedAllocationSize(
// (array_length * kwordSize) + target::Array::header_size()).
__ mov(R3, Operand(R2)); // Array length.
// Check that length is a positive Smi.
__ tst(R3, Operand(kSmiTagMask));
if (FLAG_use_slow_path) {
__ b(&slow_case);
} else {
if (!FLAG_use_slow_path) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
// RoundedAllocationSize(
// (array_length * kwordSize) + target::Array::header_size()).
__ mov(R3, Operand(R2)); // Array length.
// Check that length is a positive Smi.
__ tst(R3, Operand(kSmiTagMask));
__ b(&slow_case, NE);
__ cmp(R3, Operand(0));
__ b(&slow_case, LT);
// Check for maximum allowed length.
const intptr_t max_len =
target::ToRawSmi(target::Array::kMaxNewSpaceElements);
__ CompareImmediate(R3, max_len);
__ b(&slow_case, GT);
const intptr_t cid = kArrayCid;
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R4, &slow_case));
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
target::ObjectAlignment::kObjectAlignment - 1;
__ LoadImmediate(R9, fixed_size_plus_alignment_padding);
__ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
ASSERT(kSmiTagShift == 1);
__ bic(R9, R9, Operand(target::ObjectAlignment::kObjectAlignment - 1));
// R9: Allocation size.
// Potential new object start.
__ ldr(R0, Address(THR, target::Thread::top_offset()));
__ adds(R3, R0, Operand(R9)); // Potential next object start.
__ b(&slow_case, CS); // Branch if unsigned overflow.
// Check if the allocation fits into the remaining space.
// R0: potential new object start.
// R3: potential next object start.
// R9: allocation size.
__ ldr(TMP, Address(THR, target::Thread::end_offset()));
__ cmp(R3, Operand(TMP));
__ b(&slow_case, CS);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ str(R3, Address(THR, target::Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
// Initialize the tags.
// R0: new object start as a tagged pointer.
// R3: new object end address.
// R9: allocation size.
{
const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R9, target::RawObject::kSizeTagMaxSizeTag);
__ mov(R8, Operand(R9, LSL, shift), LS);
__ mov(R8, Operand(0), HI);
// Get the class index and insert it into the tags.
// R8: size and bit tags.
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
__ orr(R8, R8, Operand(TMP));
__ str(R8,
FieldAddress(R0, target::Array::tags_offset())); // Store tags.
}
// R0: new object start as a tagged pointer.
// R3: new object end address.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
R0, FieldAddress(R0, target::Array::type_arguments_offset()), R1);
// Set the length field.
__ StoreIntoObjectNoBarrier(
R0, FieldAddress(R0, target::Array::length_offset()), R2);
// Initialize all array elements to raw_null.
// R0: new object start as a tagged pointer.
// R8, R9: null
// R4: iterator which initially points to the start of the variable
// data area to be initialized.
// R3: new object end address.
// R9: allocation size.
__ LoadObject(R8, NullObject());
__ mov(R9, Operand(R8));
__ AddImmediate(R4, R0, target::Array::header_size() - kHeapObjectTag);
__ InitializeFieldsNoBarrier(R0, R4, R3, R8, R9);
__ Ret(); // Returns the newly allocated object in R0.
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
}
__ cmp(R3, Operand(0));
__ b(&slow_case, LT);
// Check for maximum allowed length.
const intptr_t max_len =
target::ToRawSmi(target::Array::kMaxNewSpaceElements);
__ CompareImmediate(R3, max_len);
__ b(&slow_case, GT);
const intptr_t cid = kArrayCid;
NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(R4, &slow_case));
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() + target::ObjectAlignment::kObjectAlignment -
1;
__ LoadImmediate(R9, fixed_size_plus_alignment_padding);
__ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
ASSERT(kSmiTagShift == 1);
__ bic(R9, R9, Operand(target::ObjectAlignment::kObjectAlignment - 1));
// R9: Allocation size.
// Potential new object start.
__ ldr(R0, Address(THR, target::Thread::top_offset()));
__ adds(R3, R0, Operand(R9)); // Potential next object start.
__ b(&slow_case, CS); // Branch if unsigned overflow.
// Check if the allocation fits into the remaining space.
// R0: potential new object start.
// R3: potential next object start.
// R9: allocation size.
__ ldr(TMP, Address(THR, target::Thread::end_offset()));
__ cmp(R3, Operand(TMP));
__ b(&slow_case, CS);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ str(R3, Address(THR, target::Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
// Initialize the tags.
// R0: new object start as a tagged pointer.
// R3: new object end address.
// R9: allocation size.
{
const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R9, target::RawObject::kSizeTagMaxSizeTag);
__ mov(R8, Operand(R9, LSL, shift), LS);
__ mov(R8, Operand(0), HI);
// Get the class index and insert it into the tags.
// R8: size and bit tags.
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
__ orr(R8, R8, Operand(TMP));
__ str(R8, FieldAddress(R0, target::Array::tags_offset())); // Store tags.
}
// R0: new object start as a tagged pointer.
// R3: new object end address.
// Store the type argument field.
__ StoreIntoObjectNoBarrier(
R0, FieldAddress(R0, target::Array::type_arguments_offset()), R1);
// Set the length field.
__ StoreIntoObjectNoBarrier(
R0, FieldAddress(R0, target::Array::length_offset()), R2);
// Initialize all array elements to raw_null.
// R0: new object start as a tagged pointer.
// R8, R9: null
// R4: iterator which initially points to the start of the variable
// data area to be initialized.
// R3: new object end address.
// R9: allocation size.
__ LoadObject(R8, NullObject());
__ mov(R9, Operand(R8));
__ AddImmediate(R4, R0, target::Array::header_size() - kHeapObjectTag);
__ InitializeFieldsNoBarrier(R0, R4, R3, R8, R9);
__ Ret(); // Returns the newly allocated object in R0.
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
@ -1513,11 +1513,7 @@ static void GenerateAllocateContext(Assembler* assembler, Label* slow_case) {
// R3: potential next object start.
__ ldr(IP, Address(THR, target::Thread::end_offset()));
__ cmp(R3, Operand(IP));
if (FLAG_use_slow_path) {
__ b(slow_case);
} else {
__ b(slow_case, CS); // Branch if unsigned higher or equal.
}
__ b(slow_case, CS); // Branch if unsigned higher or equal.
// Successfully allocated the object, now update top to point to
// next object start and initialize the object.
@ -1565,7 +1561,7 @@ static void GenerateAllocateContext(Assembler* assembler, Label* slow_case) {
// Clobbered:
// Potentially any since is can go to runtime.
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (FLAG_inline_alloc) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
GenerateAllocateContext(assembler, &slow_case);
@ -1917,7 +1913,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
ASSERT(instance_size % target::ObjectAlignment::kObjectAlignment == 0);
__ LoadObject(kNullReg, NullObject());
if (FLAG_inline_alloc &&
if (!FLAG_use_slow_path && FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size) &&
!target::Class::TraceAllocation(cls)) {
Label slow_case;
@ -1930,11 +1926,8 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
__ ldrd(kInstanceReg, kEndReg, THR, target::Thread::top_offset());
__ AddImmediate(kEndOfInstanceReg, kInstanceReg, instance_size);
__ cmp(kEndOfInstanceReg, Operand(kEndReg));
if (FLAG_use_slow_path) {
__ b(&slow_case);
} else {
__ b(&slow_case, CS); // Unsigned higher or equal.
}
__ b(&slow_case, CS); // Unsigned higher or equal.
__ str(kEndOfInstanceReg, Address(THR, target::Thread::top_offset()));
// Set the tags.

View file

@ -1116,124 +1116,124 @@ void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
// NOTE: R2 cannot be clobbered here as the caller relies on it being saved.
// The newly allocated object is returned in R0.
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
// RoundedAllocationSize(
// (array_length * kwordSize) + target::Array::header_size()).
// Assert that length is a Smi.
__ tsti(R2, Immediate(kSmiTagMask));
if (FLAG_use_slow_path) {
__ b(&slow_case);
} else {
if (!FLAG_use_slow_path) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
// RoundedAllocationSize(
// (array_length * kwordSize) + target::Array::header_size()).
// Assert that length is a Smi.
__ tsti(R2, Immediate(kSmiTagMask));
__ b(&slow_case, NE);
__ cmp(R2, Operand(0));
__ b(&slow_case, LT);
// Check for maximum allowed length.
const intptr_t max_len =
target::ToRawSmi(target::Array::kMaxNewSpaceElements);
__ CompareImmediate(R2, max_len);
__ b(&slow_case, GT);
const intptr_t cid = kArrayCid;
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, R4, &slow_case));
// Calculate and align allocation size.
// Load new object start and calculate next object start.
// R1: array element type.
// R2: array length as Smi.
__ ldr(R0, Address(THR, target::Thread::top_offset()));
intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
target::ObjectAlignment::kObjectAlignment - 1;
__ LoadImmediate(R3, fixed_size_plus_alignment_padding);
__ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi.
ASSERT(kSmiTagShift == 1);
__ andi(R3, R3,
Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
// R0: potential new object start.
// R3: object size in bytes.
__ adds(R7, R3, Operand(R0));
__ b(&slow_case, CS); // Branch if unsigned overflow.
// Check if the allocation fits into the remaining space.
// R0: potential new object start.
// R1: array element type.
// R2: array length as Smi.
// R3: array size.
// R7: potential next object start.
__ LoadFromOffset(TMP, THR, target::Thread::end_offset());
__ CompareRegisters(R7, TMP);
__ b(&slow_case, CS); // Branch if unsigned higher or equal.
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
// R0: potential new object start.
// R3: array size.
// R7: potential next object start.
__ str(R7, Address(THR, target::Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
// R0: new object start as a tagged pointer.
// R1: array element type.
// R2: array length as Smi.
// R3: array size.
// R7: new object end address.
// Store the type argument field.
__ StoreIntoObjectOffsetNoBarrier(
R0, target::Array::type_arguments_offset(), R1);
// Set the length field.
__ StoreIntoObjectOffsetNoBarrier(R0, target::Array::length_offset(), R2);
// Calculate the size tag.
// R0: new object start as a tagged pointer.
// R2: array length as Smi.
// R3: array size.
// R7: new object end address.
const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R3, target::RawObject::kSizeTagMaxSizeTag);
// If no size tag overflow, shift R1 left, else set R1 to zero.
__ LslImmediate(TMP, R3, shift);
__ csel(R1, TMP, R1, LS);
__ csel(R1, ZR, R1, HI);
// Get the class index and insert it into the tags.
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
__ orr(R1, R1, Operand(TMP));
__ StoreFieldToOffset(R1, R0, target::Array::tags_offset());
// Initialize all array elements to raw_null.
// R0: new object start as a tagged pointer.
// R7: new object end address.
// R2: array length as Smi.
__ AddImmediate(R1, R0, target::Array::data_offset() - kHeapObjectTag);
// R1: iterator which initially points to the start of the variable
// data area to be initialized.
Label loop, done;
__ Bind(&loop);
// TODO(cshapiro): StoreIntoObjectNoBarrier
__ CompareRegisters(R1, R7);
__ b(&done, CS);
__ str(NULL_REG, Address(R1)); // Store if unsigned lower.
__ AddImmediate(R1, target::kWordSize);
__ b(&loop); // Loop until R1 == R7.
__ Bind(&done);
// Done allocating and initializing the array.
// R0: new object.
// R2: array length as Smi (preserved for the caller.)
__ ret();
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
}
__ cmp(R2, Operand(0));
__ b(&slow_case, LT);
// Check for maximum allowed length.
const intptr_t max_len =
target::ToRawSmi(target::Array::kMaxNewSpaceElements);
__ CompareImmediate(R2, max_len);
__ b(&slow_case, GT);
const intptr_t cid = kArrayCid;
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, R4, &slow_case));
// Calculate and align allocation size.
// Load new object start and calculate next object start.
// R1: array element type.
// R2: array length as Smi.
__ ldr(R0, Address(THR, target::Thread::top_offset()));
intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() + target::ObjectAlignment::kObjectAlignment -
1;
__ LoadImmediate(R3, fixed_size_plus_alignment_padding);
__ add(R3, R3, Operand(R2, LSL, 2)); // R2 is Smi.
ASSERT(kSmiTagShift == 1);
__ andi(R3, R3, Immediate(~(target::ObjectAlignment::kObjectAlignment - 1)));
// R0: potential new object start.
// R3: object size in bytes.
__ adds(R7, R3, Operand(R0));
__ b(&slow_case, CS); // Branch if unsigned overflow.
// Check if the allocation fits into the remaining space.
// R0: potential new object start.
// R1: array element type.
// R2: array length as Smi.
// R3: array size.
// R7: potential next object start.
__ LoadFromOffset(TMP, THR, target::Thread::end_offset());
__ CompareRegisters(R7, TMP);
__ b(&slow_case, CS); // Branch if unsigned higher or equal.
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
// R0: potential new object start.
// R3: array size.
// R7: potential next object start.
__ str(R7, Address(THR, target::Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
// R0: new object start as a tagged pointer.
// R1: array element type.
// R2: array length as Smi.
// R3: array size.
// R7: new object end address.
// Store the type argument field.
__ StoreIntoObjectOffsetNoBarrier(R0, target::Array::type_arguments_offset(),
R1);
// Set the length field.
__ StoreIntoObjectOffsetNoBarrier(R0, target::Array::length_offset(), R2);
// Calculate the size tag.
// R0: new object start as a tagged pointer.
// R2: array length as Smi.
// R3: array size.
// R7: new object end address.
const intptr_t shift = target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2;
__ CompareImmediate(R3, target::RawObject::kSizeTagMaxSizeTag);
// If no size tag overflow, shift R1 left, else set R1 to zero.
__ LslImmediate(TMP, R3, shift);
__ csel(R1, TMP, R1, LS);
__ csel(R1, ZR, R1, HI);
// Get the class index and insert it into the tags.
const uint32_t tags =
target::MakeTagWordForNewSpaceObject(cid, /*instance_size=*/0);
__ LoadImmediate(TMP, tags);
__ orr(R1, R1, Operand(TMP));
__ StoreFieldToOffset(R1, R0, target::Array::tags_offset());
// Initialize all array elements to raw_null.
// R0: new object start as a tagged pointer.
// R7: new object end address.
// R2: array length as Smi.
__ AddImmediate(R1, R0, target::Array::data_offset() - kHeapObjectTag);
// R1: iterator which initially points to the start of the variable
// data area to be initialized.
Label loop, done;
__ Bind(&loop);
// TODO(cshapiro): StoreIntoObjectNoBarrier
__ CompareRegisters(R1, R7);
__ b(&done, CS);
__ str(NULL_REG, Address(R1)); // Store if unsigned lower.
__ AddImmediate(R1, target::kWordSize);
__ b(&loop); // Loop until R1 == R7.
__ Bind(&done);
// Done allocating and initializing the array.
// R0: new object.
// R2: array length as Smi (preserved for the caller.)
__ ret();
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -1613,11 +1613,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// R3: potential next object start.
__ ldr(TMP, Address(THR, target::Thread::end_offset()));
__ CompareRegisters(R3, TMP);
if (FLAG_use_slow_path) {
__ b(slow_case);
} else {
__ b(slow_case, CS); // Branch if unsigned higher or equal.
}
__ b(slow_case, CS); // Branch if unsigned higher or equal.
// Successfully allocated the object, now update top to point to
// next object start and initialize the object.
@ -1663,7 +1659,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// Clobbered:
// R2, R3, R4, TMP
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (FLAG_inline_alloc) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
GenerateAllocateContextSpaceStub(assembler, &slow_case);

View file

@ -790,114 +790,113 @@ void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
// Assert that length is a Smi.
__ testl(EDX, Immediate(kSmiTagMask));
if (FLAG_use_slow_path) {
__ jmp(&slow_case);
} else {
if (!FLAG_use_slow_path) {
__ j(NOT_ZERO, &slow_case);
}
__ cmpl(EDX, Immediate(0));
__ j(LESS, &slow_case);
// Check for maximum allowed length.
const Immediate& max_len =
Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
__ cmpl(EDX, max_len);
__ j(GREATER, &slow_case);
__ cmpl(EDX, Immediate(0));
__ j(LESS, &slow_case);
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(kArrayCid, EAX, &slow_case, Assembler::kFarJump));
// Check for maximum allowed length.
const Immediate& max_len =
Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
__ cmpl(EDX, max_len);
__ j(GREATER, &slow_case);
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() + target::ObjectAlignment::kObjectAlignment -
1;
// EDX is Smi.
__ leal(EBX, Address(EDX, TIMES_2, fixed_size_plus_alignment_padding));
ASSERT(kSmiTagShift == 1);
__ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, EAX, &slow_case,
Assembler::kFarJump));
// ECX: array element type.
// EDX: array length as Smi.
// EBX: allocation size.
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
target::ObjectAlignment::kObjectAlignment - 1;
// EDX is Smi.
__ leal(EBX, Address(EDX, TIMES_2, fixed_size_plus_alignment_padding));
ASSERT(kSmiTagShift == 1);
__ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
const intptr_t cid = kArrayCid;
__ movl(EAX, Address(THR, target::Thread::top_offset()));
__ addl(EBX, EAX);
__ j(CARRY, &slow_case);
// ECX: array element type.
// EDX: array length as Smi.
// EBX: allocation size.
// Check if the allocation fits into the remaining space.
// EAX: potential new object start.
// EBX: potential next object start.
// ECX: array element type.
// EDX: array length as Smi).
__ cmpl(EBX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
const intptr_t cid = kArrayCid;
__ movl(EAX, Address(THR, target::Thread::top_offset()));
__ addl(EBX, EAX);
__ j(CARRY, &slow_case);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ movl(Address(THR, target::Thread::top_offset()), EBX);
__ subl(EBX, EAX);
__ addl(EAX, Immediate(kHeapObjectTag));
// Check if the allocation fits into the remaining space.
// EAX: potential new object start.
// EBX: potential next object start.
// ECX: array element type.
// EDX: array length as Smi).
__ cmpl(EBX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
// Initialize the tags.
// EAX: new object start as a tagged pointer.
// EBX: allocation size.
// ECX: array element type.
// EDX: array length as Smi.
{
Label size_tag_overflow, done;
__ movl(EDI, EBX);
__ cmpl(EDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shll(EDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ movl(Address(THR, target::Thread::top_offset()), EBX);
__ subl(EBX, EAX);
__ addl(EAX, Immediate(kHeapObjectTag));
__ Bind(&size_tag_overflow);
__ movl(EDI, Immediate(0));
// Initialize the tags.
// EAX: new object start as a tagged pointer.
// EBX: allocation size.
// ECX: array element type.
// EDX: array length as Smi.
{
Label size_tag_overflow, done;
__ movl(EDI, EBX);
__ cmpl(EDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shll(EDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);
__ Bind(&size_tag_overflow);
__ movl(EDI, Immediate(0));
__ Bind(&done);
// Get the class index and insert it into the tags.
uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orl(EDI, Immediate(tags));
__ movl(FieldAddress(EAX, target::Object::tags_offset()), EDI); // Tags.
}
// EAX: new object start as a tagged pointer.
// EBX: allocation size.
// ECX: array element type.
// EDX: Array length as Smi (preserved).
// Store the type argument field.
// No generational barrier needed, since we store into a new object.
__ StoreIntoObjectNoBarrier(
EAX, FieldAddress(EAX, target::Array::type_arguments_offset()), ECX);
// Set the length field.
__ StoreIntoObjectNoBarrier(
EAX, FieldAddress(EAX, target::Array::length_offset()), EDX);
// Initialize all array elements to raw_null.
// EAX: new object start as a tagged pointer.
// EBX: allocation size.
// EDI: iterator which initially points to the start of the variable
// data area to be initialized.
// ECX: array element type.
// EDX: array length as Smi.
__ leal(EBX, FieldAddress(EAX, EBX, TIMES_1, 0));
__ leal(EDI, FieldAddress(EAX, target::Array::header_size()));
Label done;
Label init_loop;
__ Bind(&init_loop);
__ cmpl(EDI, EBX);
__ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
// No generational barrier needed, since we are storing null.
__ StoreIntoObjectNoBarrier(EAX, Address(EDI, 0), NullObject());
__ addl(EDI, Immediate(target::kWordSize));
__ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
__ ret(); // returns the newly allocated object in EAX.
// Get the class index and insert it into the tags.
uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orl(EDI, Immediate(tags));
__ movl(FieldAddress(EAX, target::Object::tags_offset()), EDI); // Tags.
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
}
// EAX: new object start as a tagged pointer.
// EBX: allocation size.
// ECX: array element type.
// EDX: Array length as Smi (preserved).
// Store the type argument field.
// No generational barrier needed, since we store into a new object.
__ StoreIntoObjectNoBarrier(
EAX, FieldAddress(EAX, target::Array::type_arguments_offset()), ECX);
// Set the length field.
__ StoreIntoObjectNoBarrier(
EAX, FieldAddress(EAX, target::Array::length_offset()), EDX);
// Initialize all array elements to raw_null.
// EAX: new object start as a tagged pointer.
// EBX: allocation size.
// EDI: iterator which initially points to the start of the variable
// data area to be initialized.
// ECX: array element type.
// EDX: array length as Smi.
__ leal(EBX, FieldAddress(EAX, EBX, TIMES_1, 0));
__ leal(EDI, FieldAddress(EAX, target::Array::header_size()));
Label done;
Label init_loop;
__ Bind(&init_loop);
__ cmpl(EDI, EBX);
__ j(ABOVE_EQUAL, &done, Assembler::kNearJump);
// No generational barrier needed, since we are storing null.
__ StoreIntoObjectNoBarrier(EAX, Address(EDI, 0), NullObject());
__ addl(EDI, Immediate(target::kWordSize));
__ jmp(&init_loop, Assembler::kNearJump);
__ Bind(&done);
__ ret(); // returns the newly allocated object in EAX.
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -1198,53 +1197,49 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// EBX: potential next object start.
// EDX: number of context variables.
__ cmpl(EBX, Address(THR, target::Thread::end_offset()));
if (FLAG_use_slow_path) {
__ jmp(slow_case);
} else {
#if defined(DEBUG)
static const bool kJumpLength = Assembler::kFarJump;
#else
static const bool kJumpLength = Assembler::kNearJump;
#endif // DEBUG
__ j(ABOVE_EQUAL, slow_case, kJumpLength);
}
// Successfully allocated the object, now update top to point to
// next object start and initialize the object.
// EAX: new object.
// EBX: next object start.
// EDX: number of context variables.
__ movl(Address(THR, target::Thread::top_offset()), EBX);
// EBX: Size of allocation in bytes.
__ subl(EBX, EAX);
__ addl(EAX, Immediate(kHeapObjectTag));
// Generate isolate-independent code to allow sharing between isolates.
// Successfully allocated the object, now update top to point to
// next object start and initialize the object.
// EAX: new object.
// EBX: next object start.
// EDX: number of context variables.
__ movl(Address(THR, target::Thread::top_offset()), EBX);
// EBX: Size of allocation in bytes.
__ subl(EBX, EAX);
__ addl(EAX, Immediate(kHeapObjectTag));
// Generate isolate-independent code to allow sharing between isolates.
// Calculate the size tag.
// EAX: new object.
// EDX: number of context variables.
{
Label size_tag_overflow, done;
__ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
__ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
__ cmpl(EBX, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shll(EBX, Immediate(target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done);
__ Bind(&size_tag_overflow);
// Set overflow size tag value.
__ movl(EBX, Immediate(0));
__ Bind(&done);
// Calculate the size tag.
// EAX: new object.
// EDX: number of context variables.
// EBX: size and bit tags.
uint32_t tags = target::MakeTagWordForNewSpaceObject(kContextCid, 0);
__ orl(EBX, Immediate(tags));
__ movl(FieldAddress(EAX, target::Object::tags_offset()), EBX); // Tags.
}
{
Label size_tag_overflow, done;
__ leal(EBX, Address(EDX, TIMES_4, fixed_size_plus_alignment_padding));
__ andl(EBX, Immediate(-target::ObjectAlignment::kObjectAlignment));
__ cmpl(EBX, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shll(EBX, Immediate(target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done);
__ Bind(&size_tag_overflow);
// Set overflow size tag value.
__ movl(EBX, Immediate(0));
__ Bind(&done);
// EAX: new object.
// EDX: number of context variables.
// EBX: size and bit tags.
uint32_t tags = target::MakeTagWordForNewSpaceObject(kContextCid, 0);
__ orl(EBX, Immediate(tags));
__ movl(FieldAddress(EAX, target::Object::tags_offset()), EBX); // Tags.
}
// Setup up number of context variables field.
// EAX: new object.
@ -1260,7 +1255,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// Clobbered:
// EBX, EDX
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
if (FLAG_inline_alloc) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
GenerateAllocateContextSpaceStub(assembler, &slow_case);
@ -1559,7 +1554,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
static_assert(kAllocationStubTypeArgumentsReg == EDX,
"Adjust register allocation in the AllocationStub");
if (FLAG_inline_alloc &&
if (!FLAG_use_slow_path && FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size) &&
!target::Class::TraceAllocation(cls)) {
Label slow_case;
@ -1572,11 +1567,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
// EAX: potential new object start.
// EBX: potential next object start.
__ cmpl(EBX, Address(THR, target::Thread::end_offset()));
if (FLAG_use_slow_path) {
__ jmp(&slow_case);
} else {
__ j(ABOVE_EQUAL, &slow_case);
}
__ j(ABOVE_EQUAL, &slow_case);
__ movl(Address(THR, target::Thread::top_offset()), EBX);
// EAX: new object start (untagged).

View file

@ -1030,117 +1030,116 @@ void StubCodeCompiler::GenerateMegamorphicMissStub(Assembler* assembler) {
// NOTE: R10 cannot be clobbered here as the caller relies on it being saved.
// The newly allocated object is returned in RAX.
void StubCodeCompiler::GenerateAllocateArrayStub(Assembler* assembler) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
// RoundedAllocationSize(
// (array_length * target::kwordSize) + target::Array::header_size()).
__ movq(RDI, R10); // Array Length.
// Check that length is a positive Smi.
__ testq(RDI, Immediate(kSmiTagMask));
if (FLAG_use_slow_path) {
__ jmp(&slow_case);
} else {
if (!FLAG_use_slow_path) {
Label slow_case;
// Compute the size to be allocated, it is based on the array length
// and is computed as:
// RoundedAllocationSize(
// (array_length * target::kwordSize) + target::Array::header_size()).
__ movq(RDI, R10); // Array Length.
// Check that length is a positive Smi.
__ testq(RDI, Immediate(kSmiTagMask));
__ j(NOT_ZERO, &slow_case);
}
__ cmpq(RDI, Immediate(0));
__ j(LESS, &slow_case);
// Check for maximum allowed length.
const Immediate& max_len =
Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
__ cmpq(RDI, max_len);
__ j(GREATER, &slow_case);
// Check for allocation tracing.
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(kArrayCid, &slow_case, Assembler::kFarJump));
__ cmpq(RDI, Immediate(0));
__ j(LESS, &slow_case);
// Check for maximum allowed length.
const Immediate& max_len =
Immediate(target::ToRawSmi(target::Array::kMaxNewSpaceElements));
__ cmpq(RDI, max_len);
__ j(GREATER, &slow_case);
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() + target::ObjectAlignment::kObjectAlignment -
1;
// RDI is a Smi.
__ leaq(RDI, Address(RDI, TIMES_4, fixed_size_plus_alignment_padding));
ASSERT(kSmiTagShift == 1);
__ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
// Check for allocation tracing.
NOT_IN_PRODUCT(
__ MaybeTraceAllocation(kArrayCid, &slow_case, Assembler::kFarJump));
const intptr_t cid = kArrayCid;
__ movq(RAX, Address(THR, target::Thread::top_offset()));
const intptr_t fixed_size_plus_alignment_padding =
target::Array::header_size() +
target::ObjectAlignment::kObjectAlignment - 1;
// RDI is a Smi.
__ leaq(RDI, Address(RDI, TIMES_4, fixed_size_plus_alignment_padding));
ASSERT(kSmiTagShift == 1);
__ andq(RDI, Immediate(-target::ObjectAlignment::kObjectAlignment));
// RDI: allocation size.
__ movq(RCX, RAX);
__ addq(RCX, RDI);
__ j(CARRY, &slow_case);
const intptr_t cid = kArrayCid;
__ movq(RAX, Address(THR, target::Thread::top_offset()));
// Check if the allocation fits into the remaining space.
// RAX: potential new object start.
// RCX: potential next object start.
// RDI: allocation size.
__ cmpq(RCX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
// RDI: allocation size.
__ movq(RCX, RAX);
__ addq(RCX, RDI);
__ j(CARRY, &slow_case);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ movq(Address(THR, target::Thread::top_offset()), RCX);
__ addq(RAX, Immediate(kHeapObjectTag));
// Check if the allocation fits into the remaining space.
// RAX: potential new object start.
// RCX: potential next object start.
// RDI: allocation size.
__ cmpq(RCX, Address(THR, target::Thread::end_offset()));
__ j(ABOVE_EQUAL, &slow_case);
// Initialize the tags.
// RAX: new object start as a tagged pointer.
// RDI: allocation size.
{
Label size_tag_overflow, done;
__ cmpq(RDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shlq(RDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);
// Successfully allocated the object(s), now update top to point to
// next object start and initialize the object.
__ movq(Address(THR, target::Thread::top_offset()), RCX);
__ addq(RAX, Immediate(kHeapObjectTag));
__ Bind(&size_tag_overflow);
__ LoadImmediate(RDI, Immediate(0));
__ Bind(&done);
// Initialize the tags.
// RAX: new object start as a tagged pointer.
// RDI: allocation size.
{
Label size_tag_overflow, done;
__ cmpq(RDI, Immediate(target::RawObject::kSizeTagMaxSizeTag));
__ j(ABOVE, &size_tag_overflow, Assembler::kNearJump);
__ shlq(RDI, Immediate(target::RawObject::kTagBitsSizeTagPos -
target::ObjectAlignment::kObjectAlignmentLog2));
__ jmp(&done, Assembler::kNearJump);
// Get the class index and insert it into the tags.
uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orq(RDI, Immediate(tags));
__ movq(FieldAddress(RAX, target::Array::tags_offset()), RDI); // Tags.
}
__ Bind(&size_tag_overflow);
__ LoadImmediate(RDI, Immediate(0));
__ Bind(&done);
// RAX: new object start as a tagged pointer.
// Store the type argument field.
// No generational barrier needed, since we store into a new object.
__ StoreIntoObjectNoBarrier(
RAX, FieldAddress(RAX, target::Array::type_arguments_offset()), RBX);
// Get the class index and insert it into the tags.
uint32_t tags = target::MakeTagWordForNewSpaceObject(cid, 0);
__ orq(RDI, Immediate(tags));
__ movq(FieldAddress(RAX, target::Array::tags_offset()), RDI); // Tags.
}
// Set the length field.
__ StoreIntoObjectNoBarrier(
RAX, FieldAddress(RAX, target::Array::length_offset()), R10);
// RAX: new object start as a tagged pointer.
// Store the type argument field.
// No generational barrier needed, since we store into a new object.
__ StoreIntoObjectNoBarrier(
RAX, FieldAddress(RAX, target::Array::type_arguments_offset()), RBX);
// Initialize all array elements to raw_null.
// RAX: new object start as a tagged pointer.
// RCX: new object end address.
// RDI: iterator which initially points to the start of the variable
// data area to be initialized.
__ LoadObject(R12, NullObject());
__ leaq(RDI, FieldAddress(RAX, target::Array::header_size()));
Label done;
Label init_loop;
__ Bind(&init_loop);
__ cmpq(RDI, RCX);
// Set the length field.
__ StoreIntoObjectNoBarrier(
RAX, FieldAddress(RAX, target::Array::length_offset()), R10);
// Initialize all array elements to raw_null.
// RAX: new object start as a tagged pointer.
// RCX: new object end address.
// RDI: iterator which initially points to the start of the variable
// data area to be initialized.
__ LoadObject(R12, NullObject());
__ leaq(RDI, FieldAddress(RAX, target::Array::header_size()));
Label done;
Label init_loop;
__ Bind(&init_loop);
__ cmpq(RDI, RCX);
#if defined(DEBUG)
static const bool kJumpLength = Assembler::kFarJump;
static const bool kJumpLength = Assembler::kFarJump;
#else
static const bool kJumpLength = Assembler::kNearJump;
static const bool kJumpLength = Assembler::kNearJump;
#endif // DEBUG
__ j(ABOVE_EQUAL, &done, kJumpLength);
// No generational barrier needed, since we are storing null.
__ StoreIntoObjectNoBarrier(RAX, Address(RDI, 0), R12);
__ addq(RDI, Immediate(target::kWordSize));
__ jmp(&init_loop, kJumpLength);
__ Bind(&done);
__ ret(); // returns the newly allocated object in RAX.
__ j(ABOVE_EQUAL, &done, kJumpLength);
// No generational barrier needed, since we are storing null.
__ StoreIntoObjectNoBarrier(RAX, Address(RDI, 0), R12);
__ addq(RDI, Immediate(target::kWordSize));
__ jmp(&init_loop, kJumpLength);
__ Bind(&done);
__ ret(); // returns the newly allocated object in RAX.
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
// Unable to allocate the array using the fast inline code, just call
// into the runtime.
__ Bind(&slow_case);
}
// Create a stub frame as we are pushing some objects on the stack before
// calling into the runtime.
__ EnterStubFrame();
@ -1506,11 +1505,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// R13: potential next object start.
// R10: number of context variables.
__ cmpq(R13, Address(THR, target::Thread::end_offset()));
if (FLAG_use_slow_path) {
__ jmp(slow_case);
} else {
__ j(ABOVE_EQUAL, slow_case);
}
__ j(ABOVE_EQUAL, slow_case);
// Successfully allocated the object, now update top to point to
// next object start and initialize the object.
@ -1564,7 +1559,7 @@ static void GenerateAllocateContextSpaceStub(Assembler* assembler,
// R9, R13
void StubCodeCompiler::GenerateAllocateContextStub(Assembler* assembler) {
__ LoadObject(R9, NullObject());
if (FLAG_inline_alloc) {
if (!FLAG_use_slow_path && FLAG_inline_alloc) {
Label slow_case;
GenerateAllocateContextSpaceStub(assembler, &slow_case);
@ -1908,7 +1903,7 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
static_assert(kAllocationStubTypeArgumentsReg == RDX,
"Adjust register allocation in the AllocationStub");
if (FLAG_inline_alloc &&
if (!FLAG_use_slow_path && FLAG_inline_alloc &&
target::Heap::IsAllocatableInNewSpace(instance_size) &&
!target::Class::TraceAllocation(cls)) {
Label slow_case;
@ -1921,11 +1916,8 @@ void StubCodeCompiler::GenerateAllocationStubForClass(Assembler* assembler,
// RAX: potential new object start.
// RBX: potential next object start.
__ cmpq(RBX, Address(THR, target::Thread::end_offset()));
if (FLAG_use_slow_path) {
__ jmp(&slow_case);
} else {
__ j(ABOVE_EQUAL, &slow_case);
}
__ j(ABOVE_EQUAL, &slow_case);
__ movq(Address(THR, target::Thread::top_offset()), RBX);
// RAX: new object start (untagged).