tcg/i386: Use atom_and_align_for_opc

No change to the ultimate load/store routines yet, so some atomicity
conditions not yet honored, but plumbs the change to alignment through
the relevant functions.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-04-17 09:33:08 +02:00
parent e63b8a2983
commit 1c5322d90c

View file

@ -1774,6 +1774,7 @@ typedef struct {
int index;
int ofs;
int seg;
TCGAtomAlign aa;
} HostAddress;
bool tcg_target_has_memory_bswap(MemOp memop)
@ -1895,8 +1896,18 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
{
TCGLabelQemuLdst *ldst = NULL;
MemOp opc = get_memop(oi);
unsigned a_bits = get_alignment_bits(opc);
unsigned a_mask = (1 << a_bits) - 1;
unsigned a_mask;
#ifdef CONFIG_SOFTMMU
h->index = TCG_REG_L0;
h->ofs = 0;
h->seg = 0;
#else
*h = x86_guest_base;
#endif
h->base = addrlo;
h->aa = atom_and_align_for_opc(s, opc, MO_ATOM_IFALIGN, false);
a_mask = (1 << h->aa.align) - 1;
#ifdef CONFIG_SOFTMMU
int cmp_ofs = is_ld ? offsetof(CPUTLBEntry, addr_read)
@ -1946,7 +1957,7 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
* copy the address and mask. For lesser alignments, check that we don't
* cross pages for the complete access.
*/
if (a_bits >= s_bits) {
if (a_mask >= s_mask) {
tcg_out_mov(s, ttype, TCG_REG_L1, addrlo);
} else {
tcg_out_modrm_offset(s, OPC_LEA + trexw, TCG_REG_L1,
@ -1977,13 +1988,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
/* TLB Hit. */
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_L0, TCG_REG_L0,
offsetof(CPUTLBEntry, addend));
*h = (HostAddress) {
.base = addrlo,
.index = TCG_REG_L0,
};
#else
if (a_bits) {
if (a_mask) {
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
@ -1997,9 +2003,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
ldst->label_ptr[0] = s->code_ptr;
s->code_ptr += 4;
}
*h = x86_guest_base;
h->base = addrlo;
#endif
return ldst;