tcg/loongarch64: Support softmmu unaligned accesses

Test the final byte of an unaligned access.
Use BSTRINS.D to clear the range of bits, rather than AND.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2023-04-18 19:09:29 +02:00
parent 81f004b223
commit 12d7fead7c

View file

@ -848,7 +848,6 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
tcg_target_long compare_mask;
ldst = new_ldst_label(s);
ldst->is_ld = is_ld;
@ -872,14 +871,20 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, HostAddress *h,
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
offsetof(CPUTLBEntry, addend));
/* We don't support unaligned accesses. */
/*
* For aligned accesses, we check the first byte and include the alignment
* bits within the address. For unaligned access, we check that we don't
* cross pages using the address of the last byte of the access.
*/
if (a_bits < s_bits) {
a_bits = s_bits;
unsigned a_mask = (1u << a_bits) - 1;
unsigned s_mask = (1u << s_bits) - 1;
tcg_out_addi(s, TCG_TYPE_TL, TCG_REG_TMP1, addr_reg, s_mask - a_mask);
} else {
tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_TMP1, addr_reg);
}
/* Clear the non-page, non-alignment bits from the address. */
compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
tcg_out_opc_and(s, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg);
tcg_out_opc_bstrins_d(s, TCG_REG_TMP1, TCG_REG_ZERO,
a_bits, TARGET_PAGE_BITS - 1);
/* Compare masked address with the TLB entry. */
ldst->label_ptr[0] = s->code_ptr;