[vm] Fix gcc build.

TEST=local build
Change-Id: I572aba5798a07ce5cb0cbda83d6eb3e44811ba5b
Reviewed-on: https://dart-review.googlesource.com/c/sdk/+/203287
Commit-Queue: Ryan Macnak <rmacnak@google.com>
Reviewed-by: Siva Annamalai <asiva@google.com>
This commit is contained in:
Ryan Macnak 2021-06-17 16:05:02 +00:00 committed by commit-bot@chromium.org
parent 72d0d9981e
commit d2e460675b
20 changed files with 519 additions and 526 deletions

File diff suppressed because it is too large Load diff

View file

@ -1265,9 +1265,6 @@ class MoveOperands : public ZoneAllocated {
public:
MoveOperands(Location dest, Location src) : dest_(dest), src_(src) {}
MoveOperands(const MoveOperands& other)
: dest_(other.dest_), src_(other.src_) {}
MoveOperands& operator=(const MoveOperands& other) {
dest_ = other.dest_;
src_ = other.src_;

View file

@ -1050,7 +1050,7 @@ class CallSiteInliner : public ValueObject {
} else if (PolymorphicInstanceCallInstr* instr =
call_data->call->AsPolymorphicInstanceCall()) {
entry_kind = instr->entry_kind();
} else if (ClosureCallInstr* instr = call_data->call->AsClosureCall()) {
} else if (call_data->call->IsClosureCall()) {
// Closure functions only have one entry point.
}
kernel::FlowGraphBuilder builder(

View file

@ -2739,7 +2739,7 @@ static bool AreLocationsAllTheSame(const GrowableArray<Location>& locs) {
// Emit move on the edge from |pred| to |succ|.
static void EmitMoveOnEdge(BlockEntryInstr* succ,
BlockEntryInstr* pred,
MoveOperands move) {
const MoveOperands& move) {
Instruction* last = pred->last_instruction();
if ((last->SuccessorCount() == 1) && !pred->IsGraphEntry()) {
ASSERT(last->IsGoto());

View file

@ -220,8 +220,8 @@ class Location : public ValueObject {
static Location Constant(const ConstantInstr* obj, int pair_index = 0) {
ASSERT((pair_index == 0) || (pair_index == 1));
Location loc(reinterpret_cast<uword>(obj) |
(pair_index != 0 ? kPairLocationTag : 0) |
kConstantTag);
(pair_index != 0 ? static_cast<uword>(kPairLocationTag) : 0) |
static_cast<uword>(kConstantTag));
ASSERT(obj == loc.constant_instruction());
ASSERT(loc.pair_index() == pair_index);
return loc;

View file

@ -3630,7 +3630,7 @@ void AllocationSinking::CreateMaterializationAt(
intptr_t num_elements = -1;
if (auto instr = alloc->AsAllocateObject()) {
cls = &(instr->cls());
} else if (auto instr = alloc->AsAllocateClosure()) {
} else if (alloc->IsAllocateClosure()) {
cls = &Class::ZoneHandle(
flow_graph_->isolate_group()->object_store()->closure_class());
} else if (auto instr = alloc->AsAllocateContext()) {

View file

@ -20,7 +20,7 @@ namespace ffi {
// Ranges are positive and non-empty.
//
// The end is exclusive.
class Range : public ValueObject {
class Range {
public:
// Constructs a Range from start (inclusive) and length.
//
@ -36,9 +36,6 @@ class Range : public ValueObject {
return Range(start_inclusive, end_exclusive);
}
Range(const Range& other)
: start_(other.start_), end_exclusive_(other.end_exclusive_) {}
intptr_t start() const { return start_; }
intptr_t end_exclusive() const { return end_exclusive_; }
intptr_t end_inclusive() const { return end_exclusive_ - 1; }
@ -84,10 +81,7 @@ class Range : public ValueObject {
private:
Range(intptr_t start_inclusive, intptr_t end_exclusive)
: start_(start_inclusive), end_exclusive_(end_exclusive) {
if (!(start_ >= 0 && end_exclusive_ > start_)) {
ASSERT(start_ >= 0);
ASSERT(end_exclusive_ > start_);
}
ASSERT(start_ < end_exclusive_);
}
const intptr_t start_;

View file

@ -1951,13 +1951,13 @@ UnboxingInfoMetadata* UnboxingInfoMetadataHelper::GetUnboxingInfoMetadata(
const auto info = new (helper_->zone_) UnboxingInfoMetadata();
info->SetArgsCount(num_args);
for (intptr_t i = 0; i < num_args; i++) {
const auto arg_info = helper_->ReadByte();
const intptr_t arg_info = helper_->ReadByte();
assert(arg_info >= UnboxingInfoMetadata::kBoxed &&
arg_info < UnboxingInfoMetadata::kUnboxingCandidate);
info->unboxed_args_info[i] =
static_cast<UnboxingInfoMetadata::UnboxingInfoTag>(arg_info);
}
const auto return_info = helper_->ReadByte();
const intptr_t return_info = helper_->ReadByte();
assert(return_info >= UnboxingInfoMetadata::kBoxed &&
return_info < UnboxingInfoMetadata::kUnboxingCandidate);
info->return_info =

View file

@ -430,7 +430,7 @@ ISOLATE_UNIT_TEST_CASE(CodeRelocator_OutOfRangeBackwardCall2) {
UNIT_TEST_CASE(PCRelativeCallPatterns) {
{
uint8_t instruction[PcRelativeCallPattern::kLengthInBytes];
uint8_t instruction[PcRelativeCallPattern::kLengthInBytes] = {};
PcRelativeCallPattern pattern(reinterpret_cast<uword>(&instruction));
@ -441,7 +441,7 @@ UNIT_TEST_CASE(PCRelativeCallPatterns) {
EXPECT_EQ(PcRelativeCallPattern::kUpperCallingRange, pattern.distance());
}
{
uint8_t instruction[PcRelativeTailCallPattern::kLengthInBytes];
uint8_t instruction[PcRelativeTailCallPattern::kLengthInBytes] = {};
PcRelativeTailCallPattern pattern(reinterpret_cast<uword>(&instruction));

View file

@ -71,15 +71,17 @@ ForceGrowthSafepointOperationScope::~ForceGrowthSafepointOperationScope() {
}
SafepointHandler::SafepointHandler(IsolateGroup* isolate_group)
: isolate_group_(isolate_group),
handlers_{
{isolate_group, SafepointLevel::kGC},
{isolate_group, SafepointLevel::kGCAndDeopt},
} {}
: isolate_group_(isolate_group) {
handlers_[SafepointLevel::kGC] =
new LevelHandler(isolate_group, SafepointLevel::kGC);
handlers_[SafepointLevel::kGCAndDeopt] =
new LevelHandler(isolate_group, SafepointLevel::kGCAndDeopt);
}
SafepointHandler::~SafepointHandler() {
for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
ASSERT(handlers_[level].owner_ == nullptr);
ASSERT(handlers_[level]->owner_ == nullptr);
delete handlers_[level];
}
}
@ -92,8 +94,8 @@ void SafepointHandler::SafepointThreads(Thread* T, SafepointLevel level) {
MonitorLocker tl(threads_lock());
// Allow recursive deopt safepoint operation.
if (handlers_[level].owner_ == T) {
handlers_[level].operation_count_++;
if (handlers_[level]->owner_ == T) {
handlers_[level]->operation_count_++;
// If we own this safepoint level already we have to own the lower levels
// as well.
AssertWeOwnLowerLevelSafepoints(T, level);
@ -112,17 +114,17 @@ void SafepointHandler::SafepointThreads(Thread* T, SafepointLevel level) {
// Wait until other safepoint operations are done & mark us as owning
// the safepoint - so no other thread can.
while (handlers_[level].SafepointInProgress()) {
while (handlers_[level]->SafepointInProgress()) {
tl.Wait();
}
handlers_[level].SetSafepointInProgress(T);
handlers_[level]->SetSafepointInProgress(T);
// Ensure a thread is at a safepoint or notify it to get to one.
handlers_[level].NotifyThreadsToGetToSafepointLevel(T);
handlers_[level]->NotifyThreadsToGetToSafepointLevel(T);
}
// Now wait for all threads that are not already at a safepoint to check-in.
handlers_[level].WaitUntilThreadsReachedSafepointLevel();
handlers_[level]->WaitUntilThreadsReachedSafepointLevel();
AcquireLowerLevelSafepoints(T, level);
}
@ -130,7 +132,7 @@ void SafepointHandler::SafepointThreads(Thread* T, SafepointLevel level) {
void SafepointHandler::AssertWeOwnLowerLevelSafepoints(Thread* T,
SafepointLevel level) {
for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
RELEASE_ASSERT(handlers_[lower_level].owner_ == T);
RELEASE_ASSERT(handlers_[lower_level]->owner_ == T);
}
}
@ -138,7 +140,7 @@ void SafepointHandler::AssertWeDoNotOwnLowerLevelSafepoints(
Thread* T,
SafepointLevel level) {
for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
RELEASE_ASSERT(handlers_[lower_level].owner_ != T);
RELEASE_ASSERT(handlers_[lower_level]->owner_ != T);
}
}
@ -166,19 +168,19 @@ void SafepointHandler::ResumeThreads(Thread* T, SafepointLevel level) {
{
MonitorLocker sl(threads_lock());
ASSERT(handlers_[level].SafepointInProgress());
ASSERT(handlers_[level].owner_ == T);
ASSERT(handlers_[level]->SafepointInProgress());
ASSERT(handlers_[level]->owner_ == T);
AssertWeOwnLowerLevelSafepoints(T, level);
// We allow recursive safepoints.
if (handlers_[level].operation_count_ > 1) {
handlers_[level].operation_count_--;
if (handlers_[level]->operation_count_ > 1) {
handlers_[level]->operation_count_--;
return;
}
ReleaseLowerLevelSafepoints(T, level);
handlers_[level].NotifyThreadsToContinue(T);
handlers_[level].ResetSafepointInProgress(T);
handlers_[level]->NotifyThreadsToContinue(T);
handlers_[level]->ResetSafepointInProgress(T);
sl.NotifyAll();
}
ExitSafepointUsingLock(T);
@ -207,20 +209,20 @@ void SafepointHandler::LevelHandler::WaitUntilThreadsReachedSafepointLevel() {
void SafepointHandler::AcquireLowerLevelSafepoints(Thread* T,
SafepointLevel level) {
MonitorLocker tl(threads_lock());
ASSERT(handlers_[level].owner_ == T);
ASSERT(handlers_[level]->owner_ == T);
for (intptr_t lower_level = level - 1; lower_level >= 0; --lower_level) {
while (handlers_[lower_level].SafepointInProgress()) {
while (handlers_[lower_level]->SafepointInProgress()) {
tl.Wait();
}
handlers_[lower_level].SetSafepointInProgress(T);
ASSERT(handlers_[lower_level].owner_ == T);
handlers_[lower_level]->SetSafepointInProgress(T);
ASSERT(handlers_[lower_level]->owner_ == T);
}
}
void SafepointHandler::ReleaseLowerLevelSafepoints(Thread* T,
SafepointLevel level) {
for (intptr_t lower_level = 0; lower_level < level; ++lower_level) {
handlers_[lower_level].ResetSafepointInProgress(T);
handlers_[lower_level]->ResetSafepointInProgress(T);
}
}
@ -272,7 +274,7 @@ void SafepointHandler::EnterSafepointLocked(Thread* T, MonitorLocker* tl) {
for (intptr_t level = T->current_safepoint_level(); level >= 0; --level) {
if (T->IsSafepointLevelRequestedLocked(
static_cast<SafepointLevel>(level))) {
handlers_[level].NotifyWeAreParked(T);
handlers_[level]->NotifyWeAreParked(T);
}
}
}

View file

@ -77,7 +77,7 @@ class SafepointHandler {
bool IsOwnedByTheThread(Thread* thread) {
for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
if (handlers_[level].owner_ == thread) {
if (handlers_[level]->owner_ == thread) {
return true;
}
}
@ -86,7 +86,7 @@ class SafepointHandler {
bool AnySafepointInProgress() {
for (intptr_t level = 0; level < SafepointLevel::kNumLevels; ++level) {
if (handlers_[level].SafepointInProgress()) {
if (handlers_[level]->SafepointInProgress()) {
return true;
}
}
@ -171,7 +171,7 @@ class SafepointHandler {
IsolateGroup* isolate_group_;
LevelHandler handlers_[SafepointLevel::kNumLevels];
LevelHandler* handlers_[SafepointLevel::kNumLevels];
friend class Isolate;
friend class IsolateGroup;

View file

@ -10247,8 +10247,7 @@ FunctionTypePtr FunctionType::New(intptr_t num_parent_type_arguments,
}
void FunctionType::set_type_state(uint8_t state) const {
ASSERT((state >= UntaggedFunctionType::kAllocated) &&
(state <= UntaggedFunctionType::kFinalizedUninstantiated));
ASSERT(state <= UntaggedFunctionType::kFinalizedUninstantiated);
StoreNonPointer(&untag()->type_state_, state);
}
@ -21054,8 +21053,7 @@ TypePtr Type::New(const Class& clazz,
}
void Type::set_type_state(uint8_t state) const {
ASSERT((state >= UntaggedType::kAllocated) &&
(state <= UntaggedType::kFinalizedUninstantiated));
ASSERT(state <= UntaggedType::kFinalizedUninstantiated);
StoreNonPointer(&untag()->type_state_, state);
}

View file

@ -5862,7 +5862,7 @@ class CompressedStackMaps : public Object {
/*uses_global_table=*/false);
}
class Iterator : public ValueObject {
class Iterator {
public:
Iterator(const CompressedStackMaps& maps,
const CompressedStackMaps& global_table);

View file

@ -4256,7 +4256,7 @@ static void AddVMMappings(JSONArray* rss_children) {
while (fgets(line, sizeof(line), fp) != nullptr) {
if (sscanf(line, "%zx-%zx", &start, &end) == 2) {
// Mapping line.
strncpy(path, strrchr(line, ' ') + 1, sizeof(path));
strncpy(path, strrchr(line, ' ') + 1, sizeof(path) - 1);
int len = strlen(path);
if ((len > 0) && path[len - 1] == '\n') {
path[len - 1] = 0;

View file

@ -210,7 +210,7 @@ class EntryFrame : public StackFrame {
// Windows- where it is needed for the profiler. It is the responsibility of
// users of StackFrameIterator to ensure that the thread given is not running
// concurrently.
class StackFrameIterator : public ValueObject {
class StackFrameIterator {
public:
enum CrossThreadPolicy {
kNoCrossThreadIteration = 0,
@ -219,9 +219,9 @@ class StackFrameIterator : public ValueObject {
// Iterators for iterating over all frames from the last ExitFrame to the
// first EntryFrame.
explicit StackFrameIterator(ValidationPolicy validation_policy,
Thread* thread,
CrossThreadPolicy cross_thread_policy);
StackFrameIterator(ValidationPolicy validation_policy,
Thread* thread,
CrossThreadPolicy cross_thread_policy);
StackFrameIterator(uword last_fp,
ValidationPolicy validation_policy,
Thread* thread,
@ -236,7 +236,7 @@ class StackFrameIterator : public ValueObject {
Thread* thread,
CrossThreadPolicy cross_thread_policy);
StackFrameIterator(const StackFrameIterator& orig);
explicit StackFrameIterator(const StackFrameIterator& orig);
// Checks if a next frame exists.
bool HasNextFrame() const { return frames_.fp_ != 0; }
@ -311,7 +311,7 @@ class StackFrameIterator : public ValueObject {
// it is only allowed on Windows- where it is needed for the profiler.
// It is the responsibility of users of DartFrameIterator to ensure that the
// isolate given is not running concurrently on another thread.
class DartFrameIterator : public ValueObject {
class DartFrameIterator {
public:
explicit DartFrameIterator(
Thread* thread,
@ -340,7 +340,8 @@ class DartFrameIterator : public ValueObject {
thread,
cross_thread_policy) {}
DartFrameIterator(const DartFrameIterator& orig) : frames_(orig.frames_) {}
explicit DartFrameIterator(const DartFrameIterator& orig)
: frames_(orig.frames_) {}
// Get next dart frame.
StackFrame* NextFrame() {

View file

@ -817,7 +817,7 @@ class Thread : public ThreadState {
return (state & SafepointRequestedField::mask_in_place()) != 0;
case SafepointLevel::kGCAndDeopt:
return (state & DeoptSafepointRequestedField::mask_in_place()) != 0;
case SafepointLevel::kNumLevels:
default:
UNREACHABLE();
}
}
@ -1169,7 +1169,7 @@ class Thread : public ThreadState {
case SafepointLevel::kGCAndDeopt:
return AtSafepointField::mask_in_place() |
AtDeoptSafepointField::mask_in_place();
case SafepointLevel::kNumLevels:
default:
UNREACHABLE();
}
}

View file

@ -207,7 +207,7 @@ CodePtr TypeTestingStubGenerator::OptimizedCodeForType(
static CodePtr RetryCompilationWithFarBranches(
Thread* thread,
std::function<CodePtr(compiler::Assembler&)> fun) {
bool use_far_branches = false;
volatile bool use_far_branches = false;
while (true) {
LongJumpScope jump;
if (setjmp(*jump.Set()) == 0) {

View file

@ -183,6 +183,8 @@ const char* V8SnapshotProfileWriter::ObjectId::IdSpaceToCString(IdSpace space) {
return "IsolateData";
case IdSpace::kArtificial:
return "Artificial";
default:
UNREACHABLE();
}
}

View file

@ -69,16 +69,14 @@ class V8SnapshotProfileWriter : public ZoneAllocated {
kElement,
kProperty,
} type;
union {
intptr_t offset; // kElement
const char* name; // kProperty
};
intptr_t offset; // kElement
const char* name; // kProperty
static Reference Element(intptr_t offset) {
return {Type::kElement, {.offset = offset}};
return {Type::kElement, offset, nullptr};
}
static Reference Property(const char* name) {
return {Type::kProperty, {.name = name}};
return {Type::kProperty, 0, name};
}
bool IsElement() const { return type == Type::kElement; }

View file

@ -318,9 +318,10 @@ extension on CType {
String cAllocateStatements(String variableName) {
switch (this.runtimeType) {
case FundamentalType:
return "${cType} ${variableName};\n";
case StructType:
case UnionType:
return "${cType} ${variableName};\n";
return "${cType} ${variableName} = {};\n";
}
throw Exception("Not implemented for ${this.runtimeType}");
@ -743,7 +744,7 @@ extension on FunctionType {
break;
case TestType.structReturn:
body = """
${returnValue.cType} result;
${returnValue.cType} result = {};
${arguments.copyValueStatements("", "result.")}
""";