disas/libvixl: Update to upstream VIXL 1.7

Update our copy of libvixl to upstream's 1.7 release.
This includes upstream's fix for the issue we had a local
patch for in commit 94cc44a9e.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Message-id: 1422274779-13359-2-git-send-email-peter.maydell@linaro.org
This commit is contained in:
Peter Maydell 2015-02-05 13:37:25 +00:00
parent 87c3d48615
commit d4eba98df4
11 changed files with 536 additions and 209 deletions

View file

@ -2,7 +2,7 @@
The code in this directory is a subset of libvixl: The code in this directory is a subset of libvixl:
https://github.com/armvixl/vixl https://github.com/armvixl/vixl
(specifically, it is the set of files needed for disassembly only, (specifically, it is the set of files needed for disassembly only,
taken from libvixl 1.6). taken from libvixl 1.7).
Bugfixes should preferably be sent upstream initially. Bugfixes should preferably be sent upstream initially.
The disassembler does not currently support the entire A64 instruction The disassembler does not currently support the entire A64 instruction

View file

@ -151,21 +151,21 @@ class CPURegister {
return Aliases(other) && (size_ == other.size_); return Aliases(other) && (size_ == other.size_);
} }
inline bool IsZero() const { bool IsZero() const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kZeroRegCode); return IsRegister() && (code_ == kZeroRegCode);
} }
inline bool IsSP() const { bool IsSP() const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return IsRegister() && (code_ == kSPRegInternalCode); return IsRegister() && (code_ == kSPRegInternalCode);
} }
inline bool IsRegister() const { bool IsRegister() const {
return type_ == kRegister; return type_ == kRegister;
} }
inline bool IsFPRegister() const { bool IsFPRegister() const {
return type_ == kFPRegister; return type_ == kFPRegister;
} }
@ -179,7 +179,7 @@ class CPURegister {
const FPRegister& S() const; const FPRegister& S() const;
const FPRegister& D() const; const FPRegister& D() const;
inline bool IsSameSizeAndType(const CPURegister& other) const { bool IsSameSizeAndType(const CPURegister& other) const {
return (size_ == other.size_) && (type_ == other.type_); return (size_ == other.size_) && (type_ == other.type_);
} }
@ -198,7 +198,7 @@ class CPURegister {
class Register : public CPURegister { class Register : public CPURegister {
public: public:
Register() : CPURegister() {} Register() : CPURegister() {}
inline explicit Register(const CPURegister& other) explicit Register(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) { : CPURegister(other.code(), other.size(), other.type()) {
VIXL_ASSERT(IsValidRegister()); VIXL_ASSERT(IsValidRegister());
} }
@ -213,10 +213,6 @@ class Register : public CPURegister {
static const Register& WRegFromCode(unsigned code); static const Register& WRegFromCode(unsigned code);
static const Register& XRegFromCode(unsigned code); static const Register& XRegFromCode(unsigned code);
// V8 compatibility.
static const int kNumRegisters = kNumberOfRegisters;
static const int kNumAllocatableRegisters = kNumberOfRegisters - 1;
private: private:
static const Register wregisters[]; static const Register wregisters[];
static const Register xregisters[]; static const Register xregisters[];
@ -225,12 +221,12 @@ class Register : public CPURegister {
class FPRegister : public CPURegister { class FPRegister : public CPURegister {
public: public:
inline FPRegister() : CPURegister() {} FPRegister() : CPURegister() {}
inline explicit FPRegister(const CPURegister& other) explicit FPRegister(const CPURegister& other)
: CPURegister(other.code(), other.size(), other.type()) { : CPURegister(other.code(), other.size(), other.type()) {
VIXL_ASSERT(IsValidFPRegister()); VIXL_ASSERT(IsValidFPRegister());
} }
inline FPRegister(unsigned code, unsigned size) FPRegister(unsigned code, unsigned size)
: CPURegister(code, size, kFPRegister) {} : CPURegister(code, size, kFPRegister) {}
bool IsValid() const { bool IsValid() const {
@ -241,10 +237,6 @@ class FPRegister : public CPURegister {
static const FPRegister& SRegFromCode(unsigned code); static const FPRegister& SRegFromCode(unsigned code);
static const FPRegister& DRegFromCode(unsigned code); static const FPRegister& DRegFromCode(unsigned code);
// V8 compatibility.
static const int kNumRegisters = kNumberOfFPRegisters;
static const int kNumAllocatableRegisters = kNumberOfFPRegisters - 1;
private: private:
static const FPRegister sregisters[]; static const FPRegister sregisters[];
static const FPRegister dregisters[]; static const FPRegister dregisters[];
@ -312,23 +304,23 @@ bool AreSameSizeAndType(const CPURegister& reg1,
// Lists of registers. // Lists of registers.
class CPURegList { class CPURegList {
public: public:
inline explicit CPURegList(CPURegister reg1, explicit CPURegList(CPURegister reg1,
CPURegister reg2 = NoCPUReg, CPURegister reg2 = NoCPUReg,
CPURegister reg3 = NoCPUReg, CPURegister reg3 = NoCPUReg,
CPURegister reg4 = NoCPUReg) CPURegister reg4 = NoCPUReg)
: list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()), : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
size_(reg1.size()), type_(reg1.type()) { size_(reg1.size()), type_(reg1.type()) {
VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4)); VIXL_ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
} }
inline CPURegList(CPURegister::RegisterType type, unsigned size, RegList list) CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
: list_(list), size_(size), type_(type) { : list_(list), size_(size), type_(type) {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
} }
inline CPURegList(CPURegister::RegisterType type, unsigned size, CPURegList(CPURegister::RegisterType type, unsigned size,
unsigned first_reg, unsigned last_reg) unsigned first_reg, unsigned last_reg)
: size_(size), type_(type) { : size_(size), type_(type) {
VIXL_ASSERT(((type == CPURegister::kRegister) && VIXL_ASSERT(((type == CPURegister::kRegister) &&
(last_reg < kNumberOfRegisters)) || (last_reg < kNumberOfRegisters)) ||
@ -340,7 +332,7 @@ class CPURegList {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
} }
inline CPURegister::RegisterType type() const { CPURegister::RegisterType type() const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return type_; return type_;
} }
@ -366,13 +358,13 @@ class CPURegList {
} }
// Variants of Combine and Remove which take a single register. // Variants of Combine and Remove which take a single register.
inline void Combine(const CPURegister& other) { void Combine(const CPURegister& other) {
VIXL_ASSERT(other.type() == type_); VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.size() == size_); VIXL_ASSERT(other.size() == size_);
Combine(other.code()); Combine(other.code());
} }
inline void Remove(const CPURegister& other) { void Remove(const CPURegister& other) {
VIXL_ASSERT(other.type() == type_); VIXL_ASSERT(other.type() == type_);
VIXL_ASSERT(other.size() == size_); VIXL_ASSERT(other.size() == size_);
Remove(other.code()); Remove(other.code());
@ -380,24 +372,51 @@ class CPURegList {
// Variants of Combine and Remove which take a single register by its code; // Variants of Combine and Remove which take a single register by its code;
// the type and size of the register is inferred from this list. // the type and size of the register is inferred from this list.
inline void Combine(int code) { void Combine(int code) {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ |= (UINT64_C(1) << code); list_ |= (UINT64_C(1) << code);
} }
inline void Remove(int code) { void Remove(int code) {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
VIXL_ASSERT(CPURegister(code, size_, type_).IsValid()); VIXL_ASSERT(CPURegister(code, size_, type_).IsValid());
list_ &= ~(UINT64_C(1) << code); list_ &= ~(UINT64_C(1) << code);
} }
inline RegList list() const { static CPURegList Union(const CPURegList& list_1, const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ | list_2.list_);
}
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Union(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2) {
VIXL_ASSERT(list_1.type_ == list_2.type_);
VIXL_ASSERT(list_1.size_ == list_2.size_);
return CPURegList(list_1.type_, list_1.size_, list_1.list_ & list_2.list_);
}
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3);
static CPURegList Intersection(const CPURegList& list_1,
const CPURegList& list_2,
const CPURegList& list_3,
const CPURegList& list_4);
RegList list() const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return list_; return list_;
} }
inline void set_list(RegList new_list) { void set_list(RegList new_list) {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
list_ = new_list; list_ = new_list;
} }
@ -417,38 +436,38 @@ class CPURegList {
static CPURegList GetCallerSaved(unsigned size = kXRegSize); static CPURegList GetCallerSaved(unsigned size = kXRegSize);
static CPURegList GetCallerSavedFP(unsigned size = kDRegSize); static CPURegList GetCallerSavedFP(unsigned size = kDRegSize);
inline bool IsEmpty() const { bool IsEmpty() const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return list_ == 0; return list_ == 0;
} }
inline bool IncludesAliasOf(const CPURegister& other) const { bool IncludesAliasOf(const CPURegister& other) const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return (type_ == other.type()) && ((other.Bit() & list_) != 0); return (type_ == other.type()) && ((other.Bit() & list_) != 0);
} }
inline bool IncludesAliasOf(int code) const { bool IncludesAliasOf(int code) const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return ((code & list_) != 0); return ((code & list_) != 0);
} }
inline int Count() const { int Count() const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return CountSetBits(list_, kRegListSizeInBits); return CountSetBits(list_, kRegListSizeInBits);
} }
inline unsigned RegisterSizeInBits() const { unsigned RegisterSizeInBits() const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return size_; return size_;
} }
inline unsigned RegisterSizeInBytes() const { unsigned RegisterSizeInBytes() const {
int size_in_bits = RegisterSizeInBits(); int size_in_bits = RegisterSizeInBits();
VIXL_ASSERT((size_in_bits % 8) == 0); VIXL_ASSERT((size_in_bits % 8) == 0);
return size_in_bits / 8; return size_in_bits / 8;
} }
inline unsigned TotalSizeInBytes() const { unsigned TotalSizeInBytes() const {
VIXL_ASSERT(IsValid()); VIXL_ASSERT(IsValid());
return RegisterSizeInBytes() * Count(); return RegisterSizeInBytes() * Count();
} }
@ -587,8 +606,10 @@ class Label {
VIXL_ASSERT(!IsLinked() || IsBound()); VIXL_ASSERT(!IsLinked() || IsBound());
} }
inline bool IsBound() const { return location_ >= 0; } bool IsBound() const { return location_ >= 0; }
inline bool IsLinked() const { return !links_.empty(); } bool IsLinked() const { return !links_.empty(); }
ptrdiff_t location() const { return location_; }
private: private:
// The list of linked instructions is stored in a stack-like structure. We // The list of linked instructions is stored in a stack-like structure. We
@ -647,22 +668,20 @@ class Label {
std::stack<ptrdiff_t> * links_extended_; std::stack<ptrdiff_t> * links_extended_;
}; };
inline ptrdiff_t location() const { return location_; } void Bind(ptrdiff_t location) {
inline void Bind(ptrdiff_t location) {
// Labels can only be bound once. // Labels can only be bound once.
VIXL_ASSERT(!IsBound()); VIXL_ASSERT(!IsBound());
location_ = location; location_ = location;
} }
inline void AddLink(ptrdiff_t instruction) { void AddLink(ptrdiff_t instruction) {
// If a label is bound, the assembler already has the information it needs // If a label is bound, the assembler already has the information it needs
// to write the instruction, so there is no need to add it to links_. // to write the instruction, so there is no need to add it to links_.
VIXL_ASSERT(!IsBound()); VIXL_ASSERT(!IsBound());
links_.push(instruction); links_.push(instruction);
} }
inline ptrdiff_t GetAndRemoveNextLink() { ptrdiff_t GetAndRemoveNextLink() {
VIXL_ASSERT(IsLinked()); VIXL_ASSERT(IsLinked());
ptrdiff_t link = links_.top(); ptrdiff_t link = links_.top();
links_.pop(); links_.pop();
@ -845,14 +864,14 @@ class Assembler {
// Return the address of an offset in the buffer. // Return the address of an offset in the buffer.
template <typename T> template <typename T>
inline T GetOffsetAddress(ptrdiff_t offset) { T GetOffsetAddress(ptrdiff_t offset) {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return buffer_->GetOffsetAddress<T>(offset); return buffer_->GetOffsetAddress<T>(offset);
} }
// Return the address of a bound label. // Return the address of a bound label.
template <typename T> template <typename T>
inline T GetLabelAddress(const Label * label) { T GetLabelAddress(const Label * label) {
VIXL_ASSERT(label->IsBound()); VIXL_ASSERT(label->IsBound());
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(label->location()); return GetOffsetAddress<T>(label->location());
@ -860,14 +879,14 @@ class Assembler {
// Return the address of the cursor. // Return the address of the cursor.
template <typename T> template <typename T>
inline T GetCursorAddress() { T GetCursorAddress() {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(CursorOffset()); return GetOffsetAddress<T>(CursorOffset());
} }
// Return the address of the start of the buffer. // Return the address of the start of the buffer.
template <typename T> template <typename T>
inline T GetStartAddress() { T GetStartAddress() {
VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t)); VIXL_STATIC_ASSERT(sizeof(T) >= sizeof(uintptr_t));
return GetOffsetAddress<T>(0); return GetOffsetAddress<T>(0);
} }
@ -1074,20 +1093,20 @@ class Assembler {
// Bfm aliases. // Bfm aliases.
// Bitfield insert. // Bitfield insert.
inline void bfi(const Register& rd, void bfi(const Register& rd,
const Register& rn, const Register& rn,
unsigned lsb, unsigned lsb,
unsigned width) { unsigned width) {
VIXL_ASSERT(width >= 1); VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size()); VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); bfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
} }
// Bitfield extract and insert low. // Bitfield extract and insert low.
inline void bfxil(const Register& rd, void bfxil(const Register& rd,
const Register& rn, const Register& rn,
unsigned lsb, unsigned lsb,
unsigned width) { unsigned width) {
VIXL_ASSERT(width >= 1); VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size()); VIXL_ASSERT(lsb + width <= rn.size());
bfm(rd, rn, lsb, lsb + width - 1); bfm(rd, rn, lsb, lsb + width - 1);
@ -1095,92 +1114,92 @@ class Assembler {
// Sbfm aliases. // Sbfm aliases.
// Arithmetic shift right. // Arithmetic shift right.
inline void asr(const Register& rd, const Register& rn, unsigned shift) { void asr(const Register& rd, const Register& rn, unsigned shift) {
VIXL_ASSERT(shift < rd.size()); VIXL_ASSERT(shift < rd.size());
sbfm(rd, rn, shift, rd.size() - 1); sbfm(rd, rn, shift, rd.size() - 1);
} }
// Signed bitfield insert with zero at right. // Signed bitfield insert with zero at right.
inline void sbfiz(const Register& rd, void sbfiz(const Register& rd,
const Register& rn, const Register& rn,
unsigned lsb, unsigned lsb,
unsigned width) { unsigned width) {
VIXL_ASSERT(width >= 1); VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size()); VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); sbfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
} }
// Signed bitfield extract. // Signed bitfield extract.
inline void sbfx(const Register& rd, void sbfx(const Register& rd,
const Register& rn, const Register& rn,
unsigned lsb, unsigned lsb,
unsigned width) { unsigned width) {
VIXL_ASSERT(width >= 1); VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size()); VIXL_ASSERT(lsb + width <= rn.size());
sbfm(rd, rn, lsb, lsb + width - 1); sbfm(rd, rn, lsb, lsb + width - 1);
} }
// Signed extend byte. // Signed extend byte.
inline void sxtb(const Register& rd, const Register& rn) { void sxtb(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 7); sbfm(rd, rn, 0, 7);
} }
// Signed extend halfword. // Signed extend halfword.
inline void sxth(const Register& rd, const Register& rn) { void sxth(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 15); sbfm(rd, rn, 0, 15);
} }
// Signed extend word. // Signed extend word.
inline void sxtw(const Register& rd, const Register& rn) { void sxtw(const Register& rd, const Register& rn) {
sbfm(rd, rn, 0, 31); sbfm(rd, rn, 0, 31);
} }
// Ubfm aliases. // Ubfm aliases.
// Logical shift left. // Logical shift left.
inline void lsl(const Register& rd, const Register& rn, unsigned shift) { void lsl(const Register& rd, const Register& rn, unsigned shift) {
unsigned reg_size = rd.size(); unsigned reg_size = rd.size();
VIXL_ASSERT(shift < reg_size); VIXL_ASSERT(shift < reg_size);
ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1); ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
} }
// Logical shift right. // Logical shift right.
inline void lsr(const Register& rd, const Register& rn, unsigned shift) { void lsr(const Register& rd, const Register& rn, unsigned shift) {
VIXL_ASSERT(shift < rd.size()); VIXL_ASSERT(shift < rd.size());
ubfm(rd, rn, shift, rd.size() - 1); ubfm(rd, rn, shift, rd.size() - 1);
} }
// Unsigned bitfield insert with zero at right. // Unsigned bitfield insert with zero at right.
inline void ubfiz(const Register& rd, void ubfiz(const Register& rd,
const Register& rn, const Register& rn,
unsigned lsb, unsigned lsb,
unsigned width) { unsigned width) {
VIXL_ASSERT(width >= 1); VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size()); VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1); ubfm(rd, rn, (rd.size() - lsb) & (rd.size() - 1), width - 1);
} }
// Unsigned bitfield extract. // Unsigned bitfield extract.
inline void ubfx(const Register& rd, void ubfx(const Register& rd,
const Register& rn, const Register& rn,
unsigned lsb, unsigned lsb,
unsigned width) { unsigned width) {
VIXL_ASSERT(width >= 1); VIXL_ASSERT(width >= 1);
VIXL_ASSERT(lsb + width <= rn.size()); VIXL_ASSERT(lsb + width <= rn.size());
ubfm(rd, rn, lsb, lsb + width - 1); ubfm(rd, rn, lsb, lsb + width - 1);
} }
// Unsigned extend byte. // Unsigned extend byte.
inline void uxtb(const Register& rd, const Register& rn) { void uxtb(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 7); ubfm(rd, rn, 0, 7);
} }
// Unsigned extend halfword. // Unsigned extend halfword.
inline void uxth(const Register& rd, const Register& rn) { void uxth(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 15); ubfm(rd, rn, 0, 15);
} }
// Unsigned extend word. // Unsigned extend word.
inline void uxtw(const Register& rd, const Register& rn) { void uxtw(const Register& rd, const Register& rn) {
ubfm(rd, rn, 0, 31); ubfm(rd, rn, 0, 31);
} }
@ -1230,7 +1249,7 @@ class Assembler {
void cneg(const Register& rd, const Register& rn, Condition cond); void cneg(const Register& rd, const Register& rn, Condition cond);
// Rotate right. // Rotate right.
inline void ror(const Register& rd, const Register& rs, unsigned shift) { void ror(const Register& rd, const Register& rs, unsigned shift) {
extr(rd, rs, rs, shift); extr(rd, rs, rs, shift);
} }
@ -1495,6 +1514,19 @@ class Assembler {
// Load-acquire register. // Load-acquire register.
void ldar(const Register& rt, const MemOperand& src); void ldar(const Register& rt, const MemOperand& src);
// Prefetch memory.
void prfm(PrefetchOperation op, const MemOperand& addr,
LoadStoreScalingOption option = PreferScaledOffset);
// Prefetch memory (with unscaled offset).
void prfum(PrefetchOperation op, const MemOperand& addr,
LoadStoreScalingOption option = PreferUnscaledOffset);
// Prefetch memory in the literal pool.
void prfm(PrefetchOperation op, RawLiteral* literal);
// Prefetch from pc + imm19 << 2.
void prfm(PrefetchOperation op, int imm19);
// Move instructions. The default shift of -1 indicates that the move // Move instructions. The default shift of -1 indicates that the move
// instruction will calculate an appropriate 16-bit immediate and left shift // instruction will calculate an appropriate 16-bit immediate and left shift
@ -1638,12 +1670,21 @@ class Assembler {
// FP round to integer (nearest with ties to away). // FP round to integer (nearest with ties to away).
void frinta(const FPRegister& fd, const FPRegister& fn); void frinta(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (implicit rounding).
void frinti(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (toward minus infinity). // FP round to integer (toward minus infinity).
void frintm(const FPRegister& fd, const FPRegister& fn); void frintm(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (nearest with ties to even). // FP round to integer (nearest with ties to even).
void frintn(const FPRegister& fd, const FPRegister& fn); void frintn(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (toward plus infinity).
void frintp(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (exact, implicit rounding).
void frintx(const FPRegister& fd, const FPRegister& fn);
// FP round to integer (towards zero). // FP round to integer (towards zero).
void frintz(const FPRegister& fd, const FPRegister& fn); void frintz(const FPRegister& fd, const FPRegister& fn);
@ -1705,16 +1746,16 @@ class Assembler {
// Emit generic instructions. // Emit generic instructions.
// Emit raw instructions into the instruction stream. // Emit raw instructions into the instruction stream.
inline void dci(Instr raw_inst) { Emit(raw_inst); } void dci(Instr raw_inst) { Emit(raw_inst); }
// Emit 32 bits of data into the instruction stream. // Emit 32 bits of data into the instruction stream.
inline void dc32(uint32_t data) { void dc32(uint32_t data) {
VIXL_ASSERT(buffer_monitor_ > 0); VIXL_ASSERT(buffer_monitor_ > 0);
buffer_->Emit32(data); buffer_->Emit32(data);
} }
// Emit 64 bits of data into the instruction stream. // Emit 64 bits of data into the instruction stream.
inline void dc64(uint64_t data) { void dc64(uint64_t data) {
VIXL_ASSERT(buffer_monitor_ > 0); VIXL_ASSERT(buffer_monitor_ > 0);
buffer_->Emit64(data); buffer_->Emit64(data);
} }
@ -1849,14 +1890,14 @@ class Assembler {
} }
} }
static inline Instr ImmS(unsigned imms, unsigned reg_size) { static Instr ImmS(unsigned imms, unsigned reg_size) {
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) || VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(imms)) ||
((reg_size == kWRegSize) && is_uint5(imms))); ((reg_size == kWRegSize) && is_uint5(imms)));
USE(reg_size); USE(reg_size);
return imms << ImmS_offset; return imms << ImmS_offset;
} }
static inline Instr ImmR(unsigned immr, unsigned reg_size) { static Instr ImmR(unsigned immr, unsigned reg_size) {
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) || VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr))); ((reg_size == kWRegSize) && is_uint5(immr)));
USE(reg_size); USE(reg_size);
@ -1864,7 +1905,7 @@ class Assembler {
return immr << ImmR_offset; return immr << ImmR_offset;
} }
static inline Instr ImmSetBits(unsigned imms, unsigned reg_size) { static Instr ImmSetBits(unsigned imms, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT(is_uint6(imms)); VIXL_ASSERT(is_uint6(imms));
VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3)); VIXL_ASSERT((reg_size == kXRegSize) || is_uint6(imms + 3));
@ -1872,7 +1913,7 @@ class Assembler {
return imms << ImmSetBits_offset; return imms << ImmSetBits_offset;
} }
static inline Instr ImmRotate(unsigned immr, unsigned reg_size) { static Instr ImmRotate(unsigned immr, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) || VIXL_ASSERT(((reg_size == kXRegSize) && is_uint6(immr)) ||
((reg_size == kWRegSize) && is_uint5(immr))); ((reg_size == kWRegSize) && is_uint5(immr)));
@ -1880,12 +1921,12 @@ class Assembler {
return immr << ImmRotate_offset; return immr << ImmRotate_offset;
} }
static inline Instr ImmLLiteral(int imm19) { static Instr ImmLLiteral(int imm19) {
VIXL_ASSERT(is_int19(imm19)); VIXL_ASSERT(is_int19(imm19));
return truncate_to_int19(imm19) << ImmLLiteral_offset; return truncate_to_int19(imm19) << ImmLLiteral_offset;
} }
static inline Instr BitN(unsigned bitn, unsigned reg_size) { static Instr BitN(unsigned bitn, unsigned reg_size) {
VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize)); VIXL_ASSERT((reg_size == kWRegSize) || (reg_size == kXRegSize));
VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0)); VIXL_ASSERT((reg_size == kXRegSize) || (bitn == 0));
USE(reg_size); USE(reg_size);
@ -1943,6 +1984,11 @@ class Assembler {
return shift_amount << ImmShiftLS_offset; return shift_amount << ImmShiftLS_offset;
} }
static Instr ImmPrefetchOperation(int imm5) {
VIXL_ASSERT(is_uint5(imm5));
return imm5 << ImmPrefetchOperation_offset;
}
static Instr ImmException(int imm16) { static Instr ImmException(int imm16) {
VIXL_ASSERT(is_uint16(imm16)); VIXL_ASSERT(is_uint16(imm16));
return imm16 << ImmException_offset; return imm16 << ImmException_offset;
@ -2003,12 +2049,32 @@ class Assembler {
return scale << FPScale_offset; return scale << FPScale_offset;
} }
// Immediate field checking helpers.
static bool IsImmAddSub(int64_t immediate);
static bool IsImmConditionalCompare(int64_t immediate);
static bool IsImmFP32(float imm);
static bool IsImmFP64(double imm);
static bool IsImmLogical(uint64_t value,
unsigned width,
unsigned* n = NULL,
unsigned* imm_s = NULL,
unsigned* imm_r = NULL);
static bool IsImmLSPair(int64_t offset, LSDataSize size);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmMovn(uint64_t imm, unsigned reg_size);
static bool IsImmMovz(uint64_t imm, unsigned reg_size);
// Size of the code generated since label to the current position. // Size of the code generated since label to the current position.
size_t SizeOfCodeGeneratedSince(Label* label) const { size_t SizeOfCodeGeneratedSince(Label* label) const {
VIXL_ASSERT(label->IsBound()); VIXL_ASSERT(label->IsBound());
return buffer_->OffsetFrom(label->location()); return buffer_->OffsetFrom(label->location());
} }
size_t SizeOfCodeGenerated() const {
return buffer_->CursorOffset();
}
size_t BufferCapacity() const { return buffer_->capacity(); } size_t BufferCapacity() const { return buffer_->capacity(); }
size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); } size_t RemainingBufferSpace() const { return buffer_->RemainingBytes(); }
@ -2025,7 +2091,7 @@ class Assembler {
} }
} }
#ifdef DEBUG #ifdef VIXL_DEBUG
void AcquireBuffer() { void AcquireBuffer() {
VIXL_ASSERT(buffer_monitor_ >= 0); VIXL_ASSERT(buffer_monitor_ >= 0);
buffer_monitor_++; buffer_monitor_++;
@ -2037,16 +2103,16 @@ class Assembler {
} }
#endif #endif
inline PositionIndependentCodeOption pic() { PositionIndependentCodeOption pic() const {
return pic_; return pic_;
} }
inline bool AllowPageOffsetDependentCode() { bool AllowPageOffsetDependentCode() const {
return (pic() == PageOffsetDependentCode) || return (pic() == PageOffsetDependentCode) ||
(pic() == PositionDependentCode); (pic() == PositionDependentCode);
} }
static inline const Register& AppropriateZeroRegFor(const CPURegister& reg) { static const Register& AppropriateZeroRegFor(const CPURegister& reg) {
return reg.Is64Bits() ? xzr : wzr; return reg.Is64Bits() ? xzr : wzr;
} }
@ -2056,14 +2122,15 @@ class Assembler {
const MemOperand& addr, const MemOperand& addr,
LoadStoreOp op, LoadStoreOp op,
LoadStoreScalingOption option = PreferScaledOffset); LoadStoreScalingOption option = PreferScaledOffset);
static bool IsImmLSUnscaled(int64_t offset);
static bool IsImmLSScaled(int64_t offset, LSDataSize size);
void LoadStorePair(const CPURegister& rt, void LoadStorePair(const CPURegister& rt,
const CPURegister& rt2, const CPURegister& rt2,
const MemOperand& addr, const MemOperand& addr,
LoadStorePairOp op); LoadStorePairOp op);
static bool IsImmLSPair(int64_t offset, LSDataSize size);
void Prefetch(PrefetchOperation op,
const MemOperand& addr,
LoadStoreScalingOption option = PreferScaledOffset);
// TODO(all): The third parameter should be passed by reference but gcc 4.8.2 // TODO(all): The third parameter should be passed by reference but gcc 4.8.2
// reports a bogus uninitialised warning then. // reports a bogus uninitialised warning then.
@ -2077,18 +2144,12 @@ class Assembler {
unsigned imm_s, unsigned imm_s,
unsigned imm_r, unsigned imm_r,
LogicalOp op); LogicalOp op);
static bool IsImmLogical(uint64_t value,
unsigned width,
unsigned* n = NULL,
unsigned* imm_s = NULL,
unsigned* imm_r = NULL);
void ConditionalCompare(const Register& rn, void ConditionalCompare(const Register& rn,
const Operand& operand, const Operand& operand,
StatusFlags nzcv, StatusFlags nzcv,
Condition cond, Condition cond,
ConditionalCompareOp op); ConditionalCompareOp op);
static bool IsImmConditionalCompare(int64_t immediate);
void AddSubWithCarry(const Register& rd, void AddSubWithCarry(const Register& rd,
const Register& rn, const Register& rn,
@ -2096,8 +2157,6 @@ class Assembler {
FlagsUpdate S, FlagsUpdate S,
AddSubWithCarryOp op); AddSubWithCarryOp op);
static bool IsImmFP32(float imm);
static bool IsImmFP64(double imm);
// Functions for emulating operands not directly supported by the instruction // Functions for emulating operands not directly supported by the instruction
// set. // set.
@ -2115,7 +2174,6 @@ class Assembler {
const Operand& operand, const Operand& operand,
FlagsUpdate S, FlagsUpdate S,
AddSubOp op); AddSubOp op);
static bool IsImmAddSub(int64_t immediate);
// Find an appropriate LoadStoreOp or LoadStorePairOp for the specified // Find an appropriate LoadStoreOp or LoadStorePairOp for the specified
// registers. Only simple loads are supported; sign- and zero-extension (such // registers. Only simple loads are supported; sign- and zero-extension (such
@ -2180,6 +2238,12 @@ class Assembler {
const FPRegister& fa, const FPRegister& fa,
FPDataProcessing3SourceOp op); FPDataProcessing3SourceOp op);
// Encode the specified MemOperand for the specified access size and scaling
// preference.
Instr LoadStoreMemOperand(const MemOperand& addr,
LSDataSize size,
LoadStoreScalingOption option);
// Link the current (not-yet-emitted) instruction to the specified label, then // Link the current (not-yet-emitted) instruction to the specified label, then
// return an offset to be encoded in the instruction. If the label is not yet // return an offset to be encoded in the instruction. If the label is not yet
// bound, an offset of 0 is returned. // bound, an offset of 0 is returned.
@ -2205,7 +2269,7 @@ class Assembler {
CodeBuffer* buffer_; CodeBuffer* buffer_;
PositionIndependentCodeOption pic_; PositionIndependentCodeOption pic_;
#ifdef DEBUG #ifdef VIXL_DEBUG
int64_t buffer_monitor_; int64_t buffer_monitor_;
#endif #endif
}; };
@ -2239,7 +2303,7 @@ class CodeBufferCheckScope {
AssertPolicy assert_policy = kMaximumSize) AssertPolicy assert_policy = kMaximumSize)
: assm_(assm) { : assm_(assm) {
if (check_policy == kCheck) assm->EnsureSpaceFor(size); if (check_policy == kCheck) assm->EnsureSpaceFor(size);
#ifdef DEBUG #ifdef VIXL_DEBUG
assm->bind(&start_); assm->bind(&start_);
size_ = size; size_ = size;
assert_policy_ = assert_policy; assert_policy_ = assert_policy;
@ -2251,7 +2315,7 @@ class CodeBufferCheckScope {
// This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert). // This is a shortcut for CodeBufferCheckScope(assm, 0, kNoCheck, kNoAssert).
explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) { explicit CodeBufferCheckScope(Assembler* assm) : assm_(assm) {
#ifdef DEBUG #ifdef VIXL_DEBUG
size_ = 0; size_ = 0;
assert_policy_ = kNoAssert; assert_policy_ = kNoAssert;
assm->AcquireBuffer(); assm->AcquireBuffer();
@ -2259,7 +2323,7 @@ class CodeBufferCheckScope {
} }
~CodeBufferCheckScope() { ~CodeBufferCheckScope() {
#ifdef DEBUG #ifdef VIXL_DEBUG
assm_->ReleaseBuffer(); assm_->ReleaseBuffer();
switch (assert_policy_) { switch (assert_policy_) {
case kNoAssert: break; case kNoAssert: break;
@ -2277,7 +2341,7 @@ class CodeBufferCheckScope {
protected: protected:
Assembler* assm_; Assembler* assm_;
#ifdef DEBUG #ifdef VIXL_DEBUG
Label start_; Label start_;
size_t size_; size_t size_;
AssertPolicy assert_policy_; AssertPolicy assert_policy_;

View file

@ -31,12 +31,6 @@ namespace vixl {
const unsigned kNumberOfRegisters = 32; const unsigned kNumberOfRegisters = 32;
const unsigned kNumberOfFPRegisters = 32; const unsigned kNumberOfFPRegisters = 32;
// Callee saved registers are x21-x30(lr).
const int kNumberOfCalleeSavedRegisters = 10;
const int kFirstCalleeSavedRegisterIndex = 21;
// Callee saved FP registers are d8-d15.
const int kNumberOfCalleeSavedFPRegisters = 8;
const int kFirstCalleeSavedFPRegisterIndex = 8;
#define REGISTER_CODE_LIST(R) \ #define REGISTER_CODE_LIST(R) \
R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \ R(0) R(1) R(2) R(3) R(4) R(5) R(6) R(7) \
@ -53,7 +47,6 @@ V_(Ra, 14, 10, Bits) /* Third source register. */ \
V_(Rt, 4, 0, Bits) /* Load/store register. */ \ V_(Rt, 4, 0, Bits) /* Load/store register. */ \
V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \ V_(Rt2, 14, 10, Bits) /* Load/store second register. */ \
V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \ V_(Rs, 20, 16, Bits) /* Exclusive access status. */ \
V_(PrefetchMode, 4, 0, Bits) \
\ \
/* Common bits */ \ /* Common bits */ \
V_(SixtyFourBits, 31, 31, Bits) \ V_(SixtyFourBits, 31, 31, Bits) \
@ -109,6 +102,10 @@ V_(ImmLSUnsigned, 21, 10, Bits) \
V_(ImmLSPair, 21, 15, SignedBits) \ V_(ImmLSPair, 21, 15, SignedBits) \
V_(SizeLS, 31, 30, Bits) \ V_(SizeLS, 31, 30, Bits) \
V_(ImmShiftLS, 12, 12, Bits) \ V_(ImmShiftLS, 12, 12, Bits) \
V_(ImmPrefetchOperation, 4, 0, Bits) \
V_(PrefetchHint, 4, 3, Bits) \
V_(PrefetchTarget, 2, 1, Bits) \
V_(PrefetchStream, 0, 0, Bits) \
\ \
/* Other immediates */ \ /* Other immediates */ \
V_(ImmUncondBranch, 25, 0, SignedBits) \ V_(ImmUncondBranch, 25, 0, SignedBits) \
@ -269,6 +266,29 @@ enum BarrierType {
BarrierAll = 3 BarrierAll = 3
}; };
enum PrefetchOperation {
PLDL1KEEP = 0x00,
PLDL1STRM = 0x01,
PLDL2KEEP = 0x02,
PLDL2STRM = 0x03,
PLDL3KEEP = 0x04,
PLDL3STRM = 0x05,
PLIL1KEEP = 0x08,
PLIL1STRM = 0x09,
PLIL2KEEP = 0x0a,
PLIL2STRM = 0x0b,
PLIL3KEEP = 0x0c,
PLIL3STRM = 0x0d,
PSTL1KEEP = 0x10,
PSTL1STRM = 0x11,
PSTL2KEEP = 0x12,
PSTL2STRM = 0x13,
PSTL3KEEP = 0x14,
PSTL3STRM = 0x15
};
// System/special register names. // System/special register names.
// This information is not encoded as one field but as the concatenation of // This information is not encoded as one field but as the concatenation of
// multiple fields (Op0<0>, Op1, Crn, Crm, Op2). // multiple fields (Op0<0>, Op1, Crn, Crm, Op2).
@ -605,6 +625,12 @@ enum LoadStoreAnyOp {
LoadStoreAnyFixed = 0x08000000 LoadStoreAnyFixed = 0x08000000
}; };
// Any load pair or store pair.
enum LoadStorePairAnyOp {
LoadStorePairAnyFMask = 0x3a000000,
LoadStorePairAnyFixed = 0x28000000
};
#define LOAD_STORE_PAIR_OP_LIST(V) \ #define LOAD_STORE_PAIR_OP_LIST(V) \
V(STP, w, 0x00000000), \ V(STP, w, 0x00000000), \
V(LDP, w, 0x00400000), \ V(LDP, w, 0x00400000), \
@ -703,17 +729,6 @@ enum LoadLiteralOp {
V(LD, R, d, 0xC4400000) V(LD, R, d, 0xC4400000)
// Load/store unscaled offset.
enum LoadStoreUnscaledOffsetOp {
LoadStoreUnscaledOffsetFixed = 0x38000000,
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
#define LOAD_STORE_UNSCALED(A, B, C, D) \
A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
#undef LOAD_STORE_UNSCALED
};
// Load/store (post, pre, offset and unsigned.) // Load/store (post, pre, offset and unsigned.)
enum LoadStoreOp { enum LoadStoreOp {
LoadStoreOpMask = 0xC4C00000, LoadStoreOpMask = 0xC4C00000,
@ -724,6 +739,18 @@ enum LoadStoreOp {
PRFM = 0xC0800000 PRFM = 0xC0800000
}; };
// Load/store unscaled offset.
enum LoadStoreUnscaledOffsetOp {
LoadStoreUnscaledOffsetFixed = 0x38000000,
LoadStoreUnscaledOffsetFMask = 0x3B200C00,
LoadStoreUnscaledOffsetMask = 0xFFE00C00,
PRFUM = LoadStoreUnscaledOffsetFixed | PRFM,
#define LOAD_STORE_UNSCALED(A, B, C, D) \
A##U##B##_##C = LoadStoreUnscaledOffsetFixed | D
LOAD_STORE_OP_LIST(LOAD_STORE_UNSCALED)
#undef LOAD_STORE_UNSCALED
};
// Load/store post index. // Load/store post index.
enum LoadStorePostIndex { enum LoadStorePostIndex {
LoadStorePostIndexFixed = 0x38000400, LoadStorePostIndexFixed = 0x38000400,

View file

@ -108,7 +108,7 @@ class DecoderVisitor {
} }
private: private:
VisitorConstness constness_; const VisitorConstness constness_;
}; };

View file

@ -34,6 +34,7 @@ Disassembler::Disassembler() {
buffer_ = reinterpret_cast<char*>(malloc(buffer_size_)); buffer_ = reinterpret_cast<char*>(malloc(buffer_size_));
buffer_pos_ = 0; buffer_pos_ = 0;
own_buffer_ = true; own_buffer_ = true;
code_address_offset_ = 0;
} }
@ -42,6 +43,7 @@ Disassembler::Disassembler(char* text_buffer, int buffer_size) {
buffer_ = text_buffer; buffer_ = text_buffer;
buffer_pos_ = 0; buffer_pos_ = 0;
own_buffer_ = false; own_buffer_ = false;
code_address_offset_ = 0;
} }
@ -739,9 +741,25 @@ void Disassembler::VisitMoveWideImmediate(const Instruction* instr) {
// shift calculation. // shift calculation.
switch (instr->Mask(MoveWideImmediateMask)) { switch (instr->Mask(MoveWideImmediateMask)) {
case MOVN_w: case MOVN_w:
case MOVN_x: mnemonic = "movn"; break; case MOVN_x:
if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0)) {
if ((instr->SixtyFourBits() == 0) && (instr->ImmMoveWide() == 0xffff)) {
mnemonic = "movn";
} else {
mnemonic = "mov";
form = "'Rd, 'IMoveNeg";
}
} else {
mnemonic = "movn";
}
break;
case MOVZ_w: case MOVZ_w:
case MOVZ_x: mnemonic = "movz"; break; case MOVZ_x:
if ((instr->ImmMoveWide()) || (instr->ShiftMoveWide() == 0))
mnemonic = "mov";
else
mnemonic = "movz";
break;
case MOVK_w: case MOVK_w:
case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break; case MOVK_x: mnemonic = "movk"; form = "'Rd, 'IMoveLSL"; break;
default: VIXL_UNREACHABLE(); default: VIXL_UNREACHABLE();
@ -806,7 +824,7 @@ void Disassembler::VisitLoadStoreUnsignedOffset(const Instruction* instr) {
case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break; case A##_unsigned: mnemonic = B; form = C ", ['Xns'ILU]"; break;
LOAD_STORE_LIST(LS_UNSIGNEDOFFSET) LOAD_STORE_LIST(LS_UNSIGNEDOFFSET)
#undef LS_UNSIGNEDOFFSET #undef LS_UNSIGNEDOFFSET
case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xn'ILU]"; case PRFM_unsigned: mnemonic = "prfm"; form = "'PrefOp, ['Xns'ILU]";
} }
Format(instr, mnemonic, form); Format(instr, mnemonic, form);
} }
@ -833,6 +851,7 @@ void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
const char *form_x = "'Xt, ['Xns'ILS]"; const char *form_x = "'Xt, ['Xns'ILS]";
const char *form_s = "'St, ['Xns'ILS]"; const char *form_s = "'St, ['Xns'ILS]";
const char *form_d = "'Dt, ['Xns'ILS]"; const char *form_d = "'Dt, ['Xns'ILS]";
const char *form_prefetch = "'PrefOp, ['Xns'ILS]";
switch (instr->Mask(LoadStoreUnscaledOffsetMask)) { switch (instr->Mask(LoadStoreUnscaledOffsetMask)) {
case STURB_w: mnemonic = "sturb"; break; case STURB_w: mnemonic = "sturb"; break;
@ -852,6 +871,7 @@ void Disassembler::VisitLoadStoreUnscaledOffset(const Instruction* instr) {
case LDURSH_x: form = form_x; // Fall through. case LDURSH_x: form = form_x; // Fall through.
case LDURSH_w: mnemonic = "ldursh"; break; case LDURSH_w: mnemonic = "ldursh"; break;
case LDURSW_x: mnemonic = "ldursw"; form = form_x; break; case LDURSW_x: mnemonic = "ldursw"; form = form_x; break;
case PRFUM: mnemonic = "prfum"; form = form_prefetch; break;
default: form = "(LoadStoreUnscaledOffset)"; default: form = "(LoadStoreUnscaledOffset)";
} }
Format(instr, mnemonic, form); Format(instr, mnemonic, form);
@ -872,6 +892,11 @@ void Disassembler::VisitLoadLiteral(const Instruction* instr) {
form = "'Xt, 'ILLiteral 'LValue"; form = "'Xt, 'ILLiteral 'LValue";
break; break;
} }
case PRFM_lit: {
mnemonic = "prfm";
form = "'PrefOp, 'ILLiteral 'LValue";
break;
}
default: mnemonic = "unimplemented"; default: mnemonic = "unimplemented";
} }
Format(instr, mnemonic, form); Format(instr, mnemonic, form);
@ -1344,7 +1369,7 @@ void Disassembler::AppendPCRelativeOffsetToOutput(const Instruction* instr,
void Disassembler::AppendAddressToOutput(const Instruction* instr, void Disassembler::AppendAddressToOutput(const Instruction* instr,
const void* addr) { const void* addr) {
USE(instr); USE(instr);
AppendToOutput("(addr %p)", addr); AppendToOutput("(addr 0x%" PRIxPTR ")", reinterpret_cast<uintptr_t>(addr));
} }
@ -1360,6 +1385,40 @@ void Disassembler::AppendDataAddressToOutput(const Instruction* instr,
} }
void Disassembler::AppendCodeRelativeAddressToOutput(const Instruction* instr,
const void* addr) {
USE(instr);
int64_t rel_addr = CodeRelativeAddress(addr);
if (rel_addr >= 0) {
AppendToOutput("(addr 0x%" PRIx64 ")", rel_addr);
} else {
AppendToOutput("(addr -0x%" PRIx64 ")", -rel_addr);
}
}
void Disassembler::AppendCodeRelativeCodeAddressToOutput(
const Instruction* instr, const void* addr) {
AppendCodeRelativeAddressToOutput(instr, addr);
}
void Disassembler::AppendCodeRelativeDataAddressToOutput(
const Instruction* instr, const void* addr) {
AppendCodeRelativeAddressToOutput(instr, addr);
}
void Disassembler::MapCodeAddress(int64_t base_address,
const Instruction* instr_address) {
set_code_address_offset(
base_address - reinterpret_cast<intptr_t>(instr_address));
}
int64_t Disassembler::CodeRelativeAddress(const void* addr) {
return reinterpret_cast<intptr_t>(addr) + code_address_offset();
}
void Disassembler::Format(const Instruction* instr, const char* mnemonic, void Disassembler::Format(const Instruction* instr, const char* mnemonic,
const char* format) { const char* format) {
VIXL_ASSERT(mnemonic != NULL); VIXL_ASSERT(mnemonic != NULL);
@ -1486,16 +1545,20 @@ int Disassembler::SubstituteImmediateField(const Instruction* instr,
VIXL_ASSERT(format[0] == 'I'); VIXL_ASSERT(format[0] == 'I');
switch (format[1]) { switch (format[1]) {
case 'M': { // IMoveImm or IMoveLSL. case 'M': { // IMoveImm, IMoveNeg or IMoveLSL.
if (format[5] == 'I') { if (format[5] == 'L') {
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
AppendToOutput("#0x%" PRIx64, imm);
} else {
VIXL_ASSERT(format[5] == 'L');
AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide()); AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
if (instr->ShiftMoveWide() > 0) { if (instr->ShiftMoveWide() > 0) {
AppendToOutput(", lsl #%" PRId64, 16 * instr->ShiftMoveWide()); AppendToOutput(", lsl #%" PRId64, 16 * instr->ShiftMoveWide());
} }
} else {
VIXL_ASSERT((format[5] == 'I') || (format[5] == 'N'));
uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
if (format[5] == 'N')
imm = ~imm;
if (!instr->SixtyFourBits())
imm &= UINT64_C(0xffffffff);
AppendToOutput("#0x%" PRIx64, imm);
} }
return 8; return 8;
} }
@ -1634,14 +1697,31 @@ int Disassembler::SubstituteLiteralField(const Instruction* instr,
VIXL_ASSERT(strncmp(format, "LValue", 6) == 0); VIXL_ASSERT(strncmp(format, "LValue", 6) == 0);
USE(format); USE(format);
const void * address = instr->LiteralAddress<const void *>();
switch (instr->Mask(LoadLiteralMask)) { switch (instr->Mask(LoadLiteralMask)) {
case LDR_w_lit: case LDR_w_lit:
case LDR_x_lit: case LDR_x_lit:
case LDRSW_x_lit: case LDRSW_x_lit:
case LDR_s_lit: case LDR_s_lit:
case LDR_d_lit: case LDR_d_lit:
AppendDataAddressToOutput(instr, instr->LiteralAddress()); AppendCodeRelativeDataAddressToOutput(instr, address);
break; break;
case PRFM_lit: {
// Use the prefetch hint to decide how to print the address.
switch (instr->PrefetchHint()) {
case 0x0: // PLD: prefetch for load.
case 0x2: // PST: prepare for store.
AppendCodeRelativeDataAddressToOutput(instr, address);
break;
case 0x1: // PLI: preload instructions.
AppendCodeRelativeCodeAddressToOutput(instr, address);
break;
case 0x3: // Unallocated hint.
AppendCodeRelativeAddressToOutput(instr, address);
break;
}
break;
}
default: default:
VIXL_UNREACHABLE(); VIXL_UNREACHABLE();
} }
@ -1701,17 +1781,22 @@ int Disassembler::SubstitutePCRelAddressField(const Instruction* instr,
(strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`. (strcmp(format, "AddrPCRelPage") == 0)); // Used by `adrp`.
int64_t offset = instr->ImmPCRel(); int64_t offset = instr->ImmPCRel();
const Instruction * base = instr;
// Compute the target address based on the effective address (after applying
// code_address_offset). This is required for correct behaviour of adrp.
const Instruction* base = instr + code_address_offset();
if (format[9] == 'P') { if (format[9] == 'P') {
offset *= kPageSize; offset *= kPageSize;
base = AlignDown(base, kPageSize); base = AlignDown(base, kPageSize);
} }
// Strip code_address_offset before printing, so we can use the
// semantically-correct AppendCodeRelativeAddressToOutput.
const void* target =
reinterpret_cast<const void*>(base + offset - code_address_offset());
const void* target = reinterpret_cast<const void*>(base + offset);
AppendPCRelativeOffsetToOutput(instr, offset); AppendPCRelativeOffsetToOutput(instr, offset);
AppendToOutput(" "); AppendToOutput(" ");
AppendAddressToOutput(instr, target); AppendCodeRelativeAddressToOutput(instr, target);
return 13; return 13;
} }
@ -1738,7 +1823,7 @@ int Disassembler::SubstituteBranchTargetField(const Instruction* instr,
AppendPCRelativeOffsetToOutput(instr, offset); AppendPCRelativeOffsetToOutput(instr, offset);
AppendToOutput(" "); AppendToOutput(" ");
AppendCodeAddressToOutput(instr, target_address); AppendCodeRelativeCodeAddressToOutput(instr, target_address);
return 8; return 8;
} }
@ -1805,13 +1890,26 @@ int Disassembler::SubstitutePrefetchField(const Instruction* instr,
VIXL_ASSERT(format[0] == 'P'); VIXL_ASSERT(format[0] == 'P');
USE(format); USE(format);
int prefetch_mode = instr->PrefetchMode(); static const char* hints[] = {"ld", "li", "st"};
static const char* stream_options[] = {"keep", "strm"};
const char* ls = (prefetch_mode & 0x10) ? "st" : "ld"; unsigned hint = instr->PrefetchHint();
int level = (prefetch_mode >> 1) + 1; unsigned target = instr->PrefetchTarget() + 1;
const char* ks = (prefetch_mode & 1) ? "strm" : "keep"; unsigned stream = instr->PrefetchStream();
AppendToOutput("p%sl%d%s", ls, level, ks); if ((hint >= (sizeof(hints) / sizeof(hints[0]))) || (target > 3)) {
// Unallocated prefetch operations.
int prefetch_mode = instr->ImmPrefetchOperation();
AppendToOutput("#0b%c%c%c%c%c",
(prefetch_mode & (1 << 4)) ? '1' : '0',
(prefetch_mode & (1 << 3)) ? '1' : '0',
(prefetch_mode & (1 << 2)) ? '1' : '0',
(prefetch_mode & (1 << 1)) ? '1' : '0',
(prefetch_mode & (1 << 0)) ? '1' : '0');
} else {
VIXL_ASSERT(stream < (sizeof(stream_options) / sizeof(stream_options[0])));
AppendToOutput("p%sl%d%s", hints[hint], target, stream_options[stream]);
}
return 6; return 6;
} }

View file

@ -43,7 +43,7 @@ class Disassembler: public DecoderVisitor {
char* GetOutput(); char* GetOutput();
// Declare all Visitor functions. // Declare all Visitor functions.
#define DECLARE(A) void Visit##A(const Instruction* instr); #define DECLARE(A) virtual void Visit##A(const Instruction* instr);
VISITOR_LIST(DECLARE) VISITOR_LIST(DECLARE)
#undef DECLARE #undef DECLARE
@ -65,23 +65,45 @@ class Disassembler: public DecoderVisitor {
// Prints an address, in the general case. It can be code or data. This is // Prints an address, in the general case. It can be code or data. This is
// used for example to print the target address of an ADR instruction. // used for example to print the target address of an ADR instruction.
virtual void AppendAddressToOutput(const Instruction* instr, virtual void AppendCodeRelativeAddressToOutput(const Instruction* instr,
const void* addr); const void* addr);
// Prints the address of some code. // Prints the address of some code.
// This is used for example to print the target address of a branch to an // This is used for example to print the target address of a branch to an
// immediate offset. // immediate offset.
// A sub-class can for example override this method to lookup the address and // A sub-class can for example override this method to lookup the address and
// print an appropriate name. // print an appropriate name.
virtual void AppendCodeAddressToOutput(const Instruction* instr, virtual void AppendCodeRelativeCodeAddressToOutput(const Instruction* instr,
const void* addr); const void* addr);
// Prints the address of some data. // Prints the address of some data.
// This is used for example to print the source address of a load literal // This is used for example to print the source address of a load literal
// instruction. // instruction.
virtual void AppendCodeRelativeDataAddressToOutput(const Instruction* instr,
const void* addr);
// Same as the above, but for addresses that are not relative to the code
// buffer. They are currently not used by VIXL.
virtual void AppendAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendCodeAddressToOutput(const Instruction* instr,
const void* addr);
virtual void AppendDataAddressToOutput(const Instruction* instr, virtual void AppendDataAddressToOutput(const Instruction* instr,
const void* addr); const void* addr);
public:
// Get/Set the offset that should be added to code addresses when printing
// code-relative addresses in the AppendCodeRelative<Type>AddressToOutput()
// helpers.
// Below is an example of how a branch immediate instruction in memory at
// address 0xb010200 would disassemble with different offsets.
// Base address | Disassembly
// 0x0 | 0xb010200: b #+0xcc (addr 0xb0102cc)
// 0x10000 | 0xb000200: b #+0xcc (addr 0xb0002cc)
// 0xb010200 | 0x0: b #+0xcc (addr 0xcc)
void MapCodeAddress(int64_t base_address, const Instruction* instr_address);
int64_t CodeRelativeAddress(const void* instr);
private: private:
void Format( void Format(
const Instruction* instr, const char* mnemonic, const char* format); const Instruction* instr, const char* mnemonic, const char* format);
@ -101,32 +123,40 @@ class Disassembler: public DecoderVisitor {
int SubstitutePrefetchField(const Instruction* instr, const char* format); int SubstitutePrefetchField(const Instruction* instr, const char* format);
int SubstituteBarrierField(const Instruction* instr, const char* format); int SubstituteBarrierField(const Instruction* instr, const char* format);
inline bool RdIsZROrSP(const Instruction* instr) const { bool RdIsZROrSP(const Instruction* instr) const {
return (instr->Rd() == kZeroRegCode); return (instr->Rd() == kZeroRegCode);
} }
inline bool RnIsZROrSP(const Instruction* instr) const { bool RnIsZROrSP(const Instruction* instr) const {
return (instr->Rn() == kZeroRegCode); return (instr->Rn() == kZeroRegCode);
} }
inline bool RmIsZROrSP(const Instruction* instr) const { bool RmIsZROrSP(const Instruction* instr) const {
return (instr->Rm() == kZeroRegCode); return (instr->Rm() == kZeroRegCode);
} }
inline bool RaIsZROrSP(const Instruction* instr) const { bool RaIsZROrSP(const Instruction* instr) const {
return (instr->Ra() == kZeroRegCode); return (instr->Ra() == kZeroRegCode);
} }
bool IsMovzMovnImm(unsigned reg_size, uint64_t value); bool IsMovzMovnImm(unsigned reg_size, uint64_t value);
int64_t code_address_offset() const { return code_address_offset_; }
protected: protected:
void ResetOutput(); void ResetOutput();
void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3); void AppendToOutput(const char* string, ...) PRINTF_CHECK(2, 3);
void set_code_address_offset(int64_t code_address_offset) {
code_address_offset_ = code_address_offset;
}
char* buffer_; char* buffer_;
uint32_t buffer_pos_; uint32_t buffer_pos_;
uint32_t buffer_size_; uint32_t buffer_size_;
bool own_buffer_; bool own_buffer_;
int64_t code_address_offset_;
}; };

View file

@ -30,6 +30,20 @@
namespace vixl { namespace vixl {
// Floating-point infinity values.
const float kFP32PositiveInfinity = rawbits_to_float(0x7f800000);
const float kFP32NegativeInfinity = rawbits_to_float(0xff800000);
const double kFP64PositiveInfinity =
rawbits_to_double(UINT64_C(0x7ff0000000000000));
const double kFP64NegativeInfinity =
rawbits_to_double(UINT64_C(0xfff0000000000000));
// The default NaN values (for FPCR.DN=1).
const double kFP64DefaultNaN = rawbits_to_double(UINT64_C(0x7ff8000000000000));
const float kFP32DefaultNaN = rawbits_to_float(0x7fc00000);
static uint64_t RotateRight(uint64_t value, static uint64_t RotateRight(uint64_t value,
unsigned int rotate, unsigned int rotate,
unsigned int width) { unsigned int width) {
@ -54,6 +68,55 @@ static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
} }
bool Instruction::IsLoad() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) != 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case LDRB_w:
case LDRH_w:
case LDR_w:
case LDR_x:
case LDRSB_w:
case LDRSB_x:
case LDRSH_w:
case LDRSH_x:
case LDRSW_x:
case LDR_s:
case LDR_d: return true;
default: return false;
}
}
}
bool Instruction::IsStore() const {
if (Mask(LoadStoreAnyFMask) != LoadStoreAnyFixed) {
return false;
}
if (Mask(LoadStorePairAnyFMask) == LoadStorePairAnyFixed) {
return Mask(LoadStorePairLBit) == 0;
} else {
LoadStoreOp op = static_cast<LoadStoreOp>(Mask(LoadStoreOpMask));
switch (op) {
case STRB_w:
case STRH_w:
case STR_w:
case STR_x:
case STR_s:
case STR_d: return true;
default: return false;
}
}
}
// Logical immediates can't encode zero, so a return value of zero is used to // Logical immediates can't encode zero, so a return value of zero is used to
// indicate a failure case. Specifically, where the constraints on imm_s are // indicate a failure case. Specifically, where the constraints on imm_s are
// not met. // not met.

View file

@ -96,6 +96,17 @@ const unsigned kDoubleExponentBits = 11;
const unsigned kFloatMantissaBits = 23; const unsigned kFloatMantissaBits = 23;
const unsigned kFloatExponentBits = 8; const unsigned kFloatExponentBits = 8;
// Floating-point infinity values.
extern const float kFP32PositiveInfinity;
extern const float kFP32NegativeInfinity;
extern const double kFP64PositiveInfinity;
extern const double kFP64NegativeInfinity;
// The default NaN values (for FPCR.DN=1).
extern const double kFP64DefaultNaN;
extern const float kFP32DefaultNaN;
enum LSDataSize { enum LSDataSize {
LSByte = 0, LSByte = 0,
LSHalfword = 1, LSHalfword = 1,
@ -140,33 +151,33 @@ enum Reg31Mode {
class Instruction { class Instruction {
public: public:
inline Instr InstructionBits() const { Instr InstructionBits() const {
return *(reinterpret_cast<const Instr*>(this)); return *(reinterpret_cast<const Instr*>(this));
} }
inline void SetInstructionBits(Instr new_instr) { void SetInstructionBits(Instr new_instr) {
*(reinterpret_cast<Instr*>(this)) = new_instr; *(reinterpret_cast<Instr*>(this)) = new_instr;
} }
inline int Bit(int pos) const { int Bit(int pos) const {
return (InstructionBits() >> pos) & 1; return (InstructionBits() >> pos) & 1;
} }
inline uint32_t Bits(int msb, int lsb) const { uint32_t Bits(int msb, int lsb) const {
return unsigned_bitextract_32(msb, lsb, InstructionBits()); return unsigned_bitextract_32(msb, lsb, InstructionBits());
} }
inline int32_t SignedBits(int msb, int lsb) const { int32_t SignedBits(int msb, int lsb) const {
int32_t bits = *(reinterpret_cast<const int32_t*>(this)); int32_t bits = *(reinterpret_cast<const int32_t*>(this));
return signed_bitextract_32(msb, lsb, bits); return signed_bitextract_32(msb, lsb, bits);
} }
inline Instr Mask(uint32_t mask) const { Instr Mask(uint32_t mask) const {
return InstructionBits() & mask; return InstructionBits() & mask;
} }
#define DEFINE_GETTER(Name, HighBit, LowBit, Func) \ #define DEFINE_GETTER(Name, HighBit, LowBit, Func) \
inline int64_t Name() const { return Func(HighBit, LowBit); } int64_t Name() const { return Func(HighBit, LowBit); }
INSTRUCTION_FIELDS_LIST(DEFINE_GETTER) INSTRUCTION_FIELDS_LIST(DEFINE_GETTER)
#undef DEFINE_GETTER #undef DEFINE_GETTER
@ -182,56 +193,64 @@ class Instruction {
float ImmFP32() const; float ImmFP32() const;
double ImmFP64() const; double ImmFP64() const;
inline LSDataSize SizeLSPair() const { LSDataSize SizeLSPair() const {
return CalcLSPairDataSize( return CalcLSPairDataSize(
static_cast<LoadStorePairOp>(Mask(LoadStorePairMask))); static_cast<LoadStorePairOp>(Mask(LoadStorePairMask)));
} }
// Helpers. // Helpers.
inline bool IsCondBranchImm() const { bool IsCondBranchImm() const {
return Mask(ConditionalBranchFMask) == ConditionalBranchFixed; return Mask(ConditionalBranchFMask) == ConditionalBranchFixed;
} }
inline bool IsUncondBranchImm() const { bool IsUncondBranchImm() const {
return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed; return Mask(UnconditionalBranchFMask) == UnconditionalBranchFixed;
} }
inline bool IsCompareBranch() const { bool IsCompareBranch() const {
return Mask(CompareBranchFMask) == CompareBranchFixed; return Mask(CompareBranchFMask) == CompareBranchFixed;
} }
inline bool IsTestBranch() const { bool IsTestBranch() const {
return Mask(TestBranchFMask) == TestBranchFixed; return Mask(TestBranchFMask) == TestBranchFixed;
} }
inline bool IsPCRelAddressing() const { bool IsPCRelAddressing() const {
return Mask(PCRelAddressingFMask) == PCRelAddressingFixed; return Mask(PCRelAddressingFMask) == PCRelAddressingFixed;
} }
inline bool IsLogicalImmediate() const { bool IsLogicalImmediate() const {
return Mask(LogicalImmediateFMask) == LogicalImmediateFixed; return Mask(LogicalImmediateFMask) == LogicalImmediateFixed;
} }
inline bool IsAddSubImmediate() const { bool IsAddSubImmediate() const {
return Mask(AddSubImmediateFMask) == AddSubImmediateFixed; return Mask(AddSubImmediateFMask) == AddSubImmediateFixed;
} }
inline bool IsAddSubExtended() const { bool IsAddSubExtended() const {
return Mask(AddSubExtendedFMask) == AddSubExtendedFixed; return Mask(AddSubExtendedFMask) == AddSubExtendedFixed;
} }
inline bool IsLoadOrStore() const { bool IsLoadOrStore() const {
return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed; return Mask(LoadStoreAnyFMask) == LoadStoreAnyFixed;
} }
inline bool IsMovn() const { bool IsLoad() const;
bool IsStore() const;
bool IsLoadLiteral() const {
// This includes PRFM_lit.
return Mask(LoadLiteralFMask) == LoadLiteralFixed;
}
bool IsMovn() const {
return (Mask(MoveWideImmediateMask) == MOVN_x) || return (Mask(MoveWideImmediateMask) == MOVN_x) ||
(Mask(MoveWideImmediateMask) == MOVN_w); (Mask(MoveWideImmediateMask) == MOVN_w);
} }
// Indicate whether Rd can be the stack pointer or the zero register. This // Indicate whether Rd can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rd field. // does not check that the instruction actually has an Rd field.
inline Reg31Mode RdMode() const { Reg31Mode RdMode() const {
// The following instructions use sp or wsp as Rd: // The following instructions use sp or wsp as Rd:
// Add/sub (immediate) when not setting the flags. // Add/sub (immediate) when not setting the flags.
// Add/sub (extended) when not setting the flags. // Add/sub (extended) when not setting the flags.
@ -260,7 +279,7 @@ class Instruction {
// Indicate whether Rn can be the stack pointer or the zero register. This // Indicate whether Rn can be the stack pointer or the zero register. This
// does not check that the instruction actually has an Rn field. // does not check that the instruction actually has an Rn field.
inline Reg31Mode RnMode() const { Reg31Mode RnMode() const {
// The following instructions use sp or wsp as Rn: // The following instructions use sp or wsp as Rn:
// All loads and stores. // All loads and stores.
// Add/sub (immediate). // Add/sub (immediate).
@ -272,7 +291,7 @@ class Instruction {
return Reg31IsZeroRegister; return Reg31IsZeroRegister;
} }
inline ImmBranchType BranchType() const { ImmBranchType BranchType() const {
if (IsCondBranchImm()) { if (IsCondBranchImm()) {
return CondBranchType; return CondBranchType;
} else if (IsUncondBranchImm()) { } else if (IsUncondBranchImm()) {
@ -296,55 +315,66 @@ class Instruction {
// Patch a literal load instruction to load from 'source'. // Patch a literal load instruction to load from 'source'.
void SetImmLLiteral(const Instruction* source); void SetImmLLiteral(const Instruction* source);
inline uint8_t* LiteralAddress() const { // Calculate the address of a literal referred to by a load-literal
int offset = ImmLLiteral() << kLiteralEntrySizeLog2; // instruction, and return it as the specified type.
const uint8_t* address = reinterpret_cast<const uint8_t*>(this) + offset; //
// Note that the result is safely mutable only if the backing buffer is // The literal itself is safely mutable only if the backing buffer is safely
// safely mutable. // mutable.
return const_cast<uint8_t*>(address); template <typename T>
T LiteralAddress() const {
uint64_t base_raw = reinterpret_cast<uintptr_t>(this);
ptrdiff_t offset = ImmLLiteral() << kLiteralEntrySizeLog2;
uint64_t address_raw = base_raw + offset;
// Cast the address using a C-style cast. A reinterpret_cast would be
// appropriate, but it can't cast one integral type to another.
T address = (T)(address_raw);
// Assert that the address can be represented by the specified type.
VIXL_ASSERT((uint64_t)(address) == address_raw);
return address;
} }
inline uint32_t Literal32() const { uint32_t Literal32() const {
uint32_t literal; uint32_t literal;
memcpy(&literal, LiteralAddress(), sizeof(literal)); memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
return literal; return literal;
} }
inline uint64_t Literal64() const { uint64_t Literal64() const {
uint64_t literal; uint64_t literal;
memcpy(&literal, LiteralAddress(), sizeof(literal)); memcpy(&literal, LiteralAddress<const void*>(), sizeof(literal));
return literal; return literal;
} }
inline float LiteralFP32() const { float LiteralFP32() const {
return rawbits_to_float(Literal32()); return rawbits_to_float(Literal32());
} }
inline double LiteralFP64() const { double LiteralFP64() const {
return rawbits_to_double(Literal64()); return rawbits_to_double(Literal64());
} }
inline const Instruction* NextInstruction() const { const Instruction* NextInstruction() const {
return this + kInstructionSize; return this + kInstructionSize;
} }
inline const Instruction* InstructionAtOffset(int64_t offset) const { const Instruction* InstructionAtOffset(int64_t offset) const {
VIXL_ASSERT(IsWordAligned(this + offset)); VIXL_ASSERT(IsWordAligned(this + offset));
return this + offset; return this + offset;
} }
template<typename T> static inline Instruction* Cast(T src) { template<typename T> static Instruction* Cast(T src) {
return reinterpret_cast<Instruction*>(src); return reinterpret_cast<Instruction*>(src);
} }
template<typename T> static inline const Instruction* CastConst(T src) { template<typename T> static const Instruction* CastConst(T src) {
return reinterpret_cast<const Instruction*>(src); return reinterpret_cast<const Instruction*>(src);
} }
private: private:
inline int ImmBranch() const; int ImmBranch() const;
void SetPCRelImmTarget(const Instruction* target); void SetPCRelImmTarget(const Instruction* target);
void SetBranchImmTarget(const Instruction* target); void SetBranchImmTarget(const Instruction* target);

View file

@ -58,7 +58,7 @@ const int KBytes = 1024;
const int MBytes = 1024 * KBytes; const int MBytes = 1024 * KBytes;
#define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort() #define VIXL_ABORT() printf("in %s, line %i", __FILE__, __LINE__); abort()
#ifdef DEBUG #ifdef VIXL_DEBUG
#define VIXL_ASSERT(condition) assert(condition) #define VIXL_ASSERT(condition) assert(condition)
#define VIXL_CHECK(condition) VIXL_ASSERT(condition) #define VIXL_CHECK(condition) VIXL_ASSERT(condition)
#define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT() #define VIXL_UNIMPLEMENTED() printf("UNIMPLEMENTED\t"); VIXL_ABORT()

View file

@ -135,4 +135,17 @@ bool IsPowerOf2(int64_t value) {
return (value != 0) && ((value & (value - 1)) == 0); return (value != 0) && ((value & (value - 1)) == 0);
} }
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size) {
VIXL_ASSERT((reg_size % 8) == 0);
int count = 0;
for (unsigned i = 0; i < (reg_size / 16); i++) {
if ((imm & 0xffff) == 0) {
count++;
}
imm >>= 16;
}
return count;
}
} // namespace vixl } // namespace vixl

View file

@ -166,6 +166,8 @@ int CountSetBits(uint64_t value, int width);
uint64_t LowestSetBit(uint64_t value); uint64_t LowestSetBit(uint64_t value);
bool IsPowerOf2(int64_t value); bool IsPowerOf2(int64_t value);
unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
// Pointer alignment // Pointer alignment
// TODO: rename/refactor to make it specific to instructions. // TODO: rename/refactor to make it specific to instructions.
template<typename T> template<typename T>
@ -174,14 +176,14 @@ bool IsWordAligned(T pointer) {
return ((intptr_t)(pointer) & 3) == 0; return ((intptr_t)(pointer) & 3) == 0;
} }
// Increment a pointer until it has the specified alignment. // Increment a pointer (up to 64 bits) until it has the specified alignment.
template<class T> template<class T>
T AlignUp(T pointer, size_t alignment) { T AlignUp(T pointer, size_t alignment) {
// Use C-style casts to get static_cast behaviour for integral types (T), and // Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types. // reinterpret_cast behaviour for other types.
uintptr_t pointer_raw = (uintptr_t)pointer; uint64_t pointer_raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw)); VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
size_t align_step = (alignment - pointer_raw) % alignment; size_t align_step = (alignment - pointer_raw) % alignment;
VIXL_ASSERT((pointer_raw + align_step) % alignment == 0); VIXL_ASSERT((pointer_raw + align_step) % alignment == 0);
@ -189,14 +191,14 @@ T AlignUp(T pointer, size_t alignment) {
return (T)(pointer_raw + align_step); return (T)(pointer_raw + align_step);
} }
// Decrement a pointer until it has the specified alignment. // Decrement a pointer (up to 64 bits) until it has the specified alignment.
template<class T> template<class T>
T AlignDown(T pointer, size_t alignment) { T AlignDown(T pointer, size_t alignment) {
// Use C-style casts to get static_cast behaviour for integral types (T), and // Use C-style casts to get static_cast behaviour for integral types (T), and
// reinterpret_cast behaviour for other types. // reinterpret_cast behaviour for other types.
uintptr_t pointer_raw = (uintptr_t)pointer; uint64_t pointer_raw = (uint64_t)pointer;
VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(pointer_raw)); VIXL_STATIC_ASSERT(sizeof(pointer) <= sizeof(pointer_raw));
size_t align_step = pointer_raw % alignment; size_t align_step = pointer_raw % alignment;
VIXL_ASSERT((pointer_raw - align_step) % alignment == 0); VIXL_ASSERT((pointer_raw - align_step) % alignment == 0);