mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
target-arm queue:
* arm_gicv3_its: Don't abort on table save failure * arm_gicv3_its: Fix the VM termination in vm_change_state_handler() * translate.c: Fix usermode big-endian AArch32 LDREXD and STREXD * hw/arm: Mark the "fsl,imx31/25/6" devices with user_creatable = false * arm: implement cache/shareability attribute bits for PAR registers -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABCAAGBQJaAbZGAAoJEDwlJe0UNgzesTAP/R7/cm8sIV5bQxAvFdXZ9tKH 0V8/SmrKm5XE6LoZC+yOo4vh5+ypwvNvcYeoRLUuThYEtmCKBouteVkB1VJqbQO3 6VX6n600O4DZcOSkKlhrfJpaYtox+zqrbgQ/VFEbZ8ICBoZevdsLDgoxaI+lw8bo c0nz39ophHQ3H4R+gF7wXt6Am4pvYx2C6SnhmVetKqBfMZsMpVvgm1X6yboUUa1W 1mfjdLS499JIPJwwDl8QZtcIAzOucg7FS+CNwgZ/Rfd3Cx3uBw32E16i9WJlPbUi IJFf+CwubiFN54MUWgOAbWNH4nUSTNQzWthWNzdRUhUSxrIb72j8vHUsDlASx8nx wzPiVWLkx8k9ef5GlQ5sL4l3NDZuM52YeJ7Chq5JOjnQCuNBYptuKFETXKSNe8Lw qSvPysZp6ZVNkeTQLr1xwngJQxIkxloP2jPknlbJXnAjHztPHA39lg4sNFj+u3hR RvG5BsyRJM2ccbo8Jk497qtwowplAji8M79YuZFkt+aiAutP8iYTbwoW6IIAopt4 RNn+CAvIkLcS6JWBIbhI6gvCk4eczvjaS77w20K2EthAmQtpx5i/AP4p9fxX4ulq Hokg6rHXeA61TNgXhRlKc/G+Y+gWRlMwiJk5BYrmR3dibRKYLjFVRU3JRsfDo/5l 5zPuoBHv1CG0NDYsrQBU =6ocu -----END PGP SIGNATURE----- Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20171107' into staging target-arm queue: * arm_gicv3_its: Don't abort on table save failure * arm_gicv3_its: Fix the VM termination in vm_change_state_handler() * translate.c: Fix usermode big-endian AArch32 LDREXD and STREXD * hw/arm: Mark the "fsl,imx31/25/6" devices with user_creatable = false * arm: implement cache/shareability attribute bits for PAR registers # gpg: Signature made Tue 07 Nov 2017 13:33:58 GMT # gpg: using RSA key 0x3C2525ED14360CDE # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" # gpg: aka "Peter Maydell <pmaydell@gmail.com>" # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" # Primary key fingerprint: E1A5 C593 CD41 9DE2 8E83 15CF 3C25 25ED 1436 0CDE * remotes/pmaydell/tags/pull-target-arm-20171107: hw/intc/arm_gicv3_its: Don't abort on table save failure hw/intc/arm_gicv3_its: Fix the VM termination in vm_change_state_handler() translate.c: Fix usermode big-endian AArch32 LDREXD and STREXD hw/arm: Mark the "fsl,imx31" device with user_creatable = false hw/arm: Mark the "fsl,imx25" device with user_creatable = false hw/arm: Mark the "fsl,imx6" device with user_creatable = false arm: implement cache/shareability attribute bits for PAR registers Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
commit
a4f0537db0
6 changed files with 214 additions and 30 deletions
|
@ -288,8 +288,12 @@ static void fsl_imx25_class_init(ObjectClass *oc, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
|
||||
dc->realize = fsl_imx25_realize;
|
||||
|
||||
dc->desc = "i.MX25 SOC";
|
||||
/*
|
||||
* Reason: uses serial_hds in realize and the imx25 board does not
|
||||
* support multiple CPUs
|
||||
*/
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo fsl_imx25_type_info = {
|
||||
|
|
|
@ -260,8 +260,12 @@ static void fsl_imx31_class_init(ObjectClass *oc, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
|
||||
dc->realize = fsl_imx31_realize;
|
||||
|
||||
dc->desc = "i.MX31 SOC";
|
||||
/*
|
||||
* Reason: uses serial_hds in realize and the kzm board does not
|
||||
* support multiple CPUs
|
||||
*/
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo fsl_imx31_type_info = {
|
||||
|
|
|
@ -440,8 +440,9 @@ static void fsl_imx6_class_init(ObjectClass *oc, void *data)
|
|||
DeviceClass *dc = DEVICE_CLASS(oc);
|
||||
|
||||
dc->realize = fsl_imx6_realize;
|
||||
|
||||
dc->desc = "i.MX6 SOC";
|
||||
/* Reason: Uses serial_hds[] in the realize() function */
|
||||
dc->user_creatable = false;
|
||||
}
|
||||
|
||||
static const TypeInfo fsl_imx6_type_info = {
|
||||
|
|
|
@ -64,20 +64,16 @@ static void vm_change_state_handler(void *opaque, int running,
|
|||
{
|
||||
GICv3ITSState *s = (GICv3ITSState *)opaque;
|
||||
Error *err = NULL;
|
||||
int ret;
|
||||
|
||||
if (running) {
|
||||
return;
|
||||
}
|
||||
|
||||
ret = kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
|
||||
KVM_DEV_ARM_ITS_SAVE_TABLES, NULL, true, &err);
|
||||
kvm_device_access(s->dev_fd, KVM_DEV_ARM_VGIC_GRP_CTRL,
|
||||
KVM_DEV_ARM_ITS_SAVE_TABLES, NULL, true, &err);
|
||||
if (err) {
|
||||
error_report_err(err);
|
||||
}
|
||||
if (ret < 0 && ret != -EFAULT) {
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
|
||||
|
@ -111,13 +107,13 @@ static void kvm_arm_its_realize(DeviceState *dev, Error **errp)
|
|||
error_free(s->migration_blocker);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
qemu_add_vm_change_state_handler(vm_change_state_handler, s);
|
||||
}
|
||||
|
||||
kvm_msi_use_devid = true;
|
||||
kvm_gsi_direct_mapping = false;
|
||||
kvm_msi_via_irqfd_allowed = kvm_irqfds_enabled();
|
||||
|
||||
qemu_add_vm_change_state_handler(vm_change_state_handler, s);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -19,17 +19,23 @@
|
|||
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
/* Cacheability and shareability attributes for a memory access */
|
||||
typedef struct ARMCacheAttrs {
|
||||
unsigned int attrs:8; /* as in the MAIR register encoding */
|
||||
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
|
||||
} ARMCacheAttrs;
|
||||
|
||||
static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size, uint32_t *fsr,
|
||||
ARMMMUFaultInfo *fi);
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
|
||||
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||
target_ulong *page_size_ptr, uint32_t *fsr,
|
||||
ARMMMUFaultInfo *fi);
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs);
|
||||
|
||||
/* Security attributes for an address, as returned by v8m_security_lookup. */
|
||||
typedef struct V8M_SAttributes {
|
||||
|
@ -2159,9 +2165,10 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
|||
uint64_t par64;
|
||||
MemTxAttrs attrs = {};
|
||||
ARMMMUFaultInfo fi = {};
|
||||
ARMCacheAttrs cacheattrs = {};
|
||||
|
||||
ret = get_phys_addr(env, value, access_type, mmu_idx,
|
||||
&phys_addr, &attrs, &prot, &page_size, &fsr, &fi);
|
||||
ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
|
||||
&prot, &page_size, &fsr, &fi, &cacheattrs);
|
||||
if (extended_addresses_enabled(env)) {
|
||||
/* fsr is a DFSR/IFSR value for the long descriptor
|
||||
* translation table format, but with WnR always clear.
|
||||
|
@ -2173,7 +2180,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
|||
if (!attrs.secure) {
|
||||
par64 |= (1 << 9); /* NS */
|
||||
}
|
||||
/* We don't set the ATTR or SH fields in the PAR. */
|
||||
par64 |= (uint64_t)cacheattrs.attrs << 56; /* ATTR */
|
||||
par64 |= cacheattrs.shareability << 7; /* SH */
|
||||
} else {
|
||||
par64 |= 1; /* F */
|
||||
par64 |= (fsr & 0x3f) << 1; /* FS */
|
||||
|
@ -6925,7 +6933,7 @@ static bool v7m_read_half_insn(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
|||
return false;
|
||||
}
|
||||
if (get_phys_addr(env, addr, MMU_INST_FETCH, mmu_idx,
|
||||
&physaddr, &attrs, &prot, &page_size, &fsr, &fi)) {
|
||||
&physaddr, &attrs, &prot, &page_size, &fsr, &fi, NULL)) {
|
||||
/* the MPU lookup failed */
|
||||
env->v7m.cfsr[env->v7m.secure] |= R_V7M_CFSR_IACCVIOL_MASK;
|
||||
armv7m_nvic_set_pending(env->nvic, ARMV7M_EXCP_MEM, env->v7m.secure);
|
||||
|
@ -8207,7 +8215,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|||
int ret;
|
||||
|
||||
ret = get_phys_addr_lpae(env, addr, 0, ARMMMUIdx_S2NS, &s2pa,
|
||||
&txattrs, &s2prot, &s2size, fsr, fi);
|
||||
&txattrs, &s2prot, &s2size, fsr, fi, NULL);
|
||||
if (ret) {
|
||||
fi->s2addr = addr;
|
||||
fi->stage2 = true;
|
||||
|
@ -8608,11 +8616,41 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
|
|||
return true;
|
||||
}
|
||||
|
||||
/* Translate from the 4-bit stage 2 representation of
|
||||
* memory attributes (without cache-allocation hints) to
|
||||
* the 8-bit representation of the stage 1 MAIR registers
|
||||
* (which includes allocation hints).
|
||||
*
|
||||
* ref: shared/translation/attrs/S2AttrDecode()
|
||||
* .../S2ConvertAttrsHints()
|
||||
*/
|
||||
static uint8_t convert_stage2_attrs(CPUARMState *env, uint8_t s2attrs)
|
||||
{
|
||||
uint8_t hiattr = extract32(s2attrs, 2, 2);
|
||||
uint8_t loattr = extract32(s2attrs, 0, 2);
|
||||
uint8_t hihint = 0, lohint = 0;
|
||||
|
||||
if (hiattr != 0) { /* normal memory */
|
||||
if ((env->cp15.hcr_el2 & HCR_CD) != 0) { /* cache disabled */
|
||||
hiattr = loattr = 1; /* non-cacheable */
|
||||
} else {
|
||||
if (hiattr != 1) { /* Write-through or write-back */
|
||||
hihint = 3; /* RW allocate */
|
||||
}
|
||||
if (loattr != 1) { /* Write-through or write-back */
|
||||
lohint = 3; /* RW allocate */
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return (hiattr << 6) | (hihint << 4) | (loattr << 2) | lohint;
|
||||
}
|
||||
|
||||
static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
||||
target_ulong *page_size_ptr, uint32_t *fsr,
|
||||
ARMMMUFaultInfo *fi)
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||
{
|
||||
ARMCPU *cpu = arm_env_get_cpu(env);
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
@ -8929,6 +8967,21 @@ static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
|||
*/
|
||||
txattrs->secure = false;
|
||||
}
|
||||
|
||||
if (cacheattrs != NULL) {
|
||||
if (mmu_idx == ARMMMUIdx_S2NS) {
|
||||
cacheattrs->attrs = convert_stage2_attrs(env,
|
||||
extract32(attrs, 0, 4));
|
||||
} else {
|
||||
/* Index into MAIR registers for cache attributes */
|
||||
uint8_t attrindx = extract32(attrs, 0, 3);
|
||||
uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
|
||||
assert(attrindx <= 7);
|
||||
cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
|
||||
}
|
||||
cacheattrs->shareability = extract32(attrs, 6, 2);
|
||||
}
|
||||
|
||||
*phys_ptr = descaddr;
|
||||
*page_size_ptr = page_size;
|
||||
return false;
|
||||
|
@ -9490,6 +9543,93 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|||
return false;
|
||||
}
|
||||
|
||||
/* Combine either inner or outer cacheability attributes for normal
|
||||
* memory, according to table D4-42 and pseudocode procedure
|
||||
* CombineS1S2AttrHints() of ARM DDI 0487B.b (the ARMv8 ARM).
|
||||
*
|
||||
* NB: only stage 1 includes allocation hints (RW bits), leading to
|
||||
* some asymmetry.
|
||||
*/
|
||||
static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
|
||||
{
|
||||
if (s1 == 4 || s2 == 4) {
|
||||
/* non-cacheable has precedence */
|
||||
return 4;
|
||||
} else if (extract32(s1, 2, 2) == 0 || extract32(s1, 2, 2) == 2) {
|
||||
/* stage 1 write-through takes precedence */
|
||||
return s1;
|
||||
} else if (extract32(s2, 2, 2) == 2) {
|
||||
/* stage 2 write-through takes precedence, but the allocation hint
|
||||
* is still taken from stage 1
|
||||
*/
|
||||
return (2 << 2) | extract32(s1, 0, 2);
|
||||
} else { /* write-back */
|
||||
return s1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
|
||||
* and CombineS1S2Desc()
|
||||
*
|
||||
* @s1: Attributes from stage 1 walk
|
||||
* @s2: Attributes from stage 2 walk
|
||||
*/
|
||||
static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
|
||||
{
|
||||
uint8_t s1lo = extract32(s1.attrs, 0, 4), s2lo = extract32(s2.attrs, 0, 4);
|
||||
uint8_t s1hi = extract32(s1.attrs, 4, 4), s2hi = extract32(s2.attrs, 4, 4);
|
||||
ARMCacheAttrs ret;
|
||||
|
||||
/* Combine shareability attributes (table D4-43) */
|
||||
if (s1.shareability == 2 || s2.shareability == 2) {
|
||||
/* if either are outer-shareable, the result is outer-shareable */
|
||||
ret.shareability = 2;
|
||||
} else if (s1.shareability == 3 || s2.shareability == 3) {
|
||||
/* if either are inner-shareable, the result is inner-shareable */
|
||||
ret.shareability = 3;
|
||||
} else {
|
||||
/* both non-shareable */
|
||||
ret.shareability = 0;
|
||||
}
|
||||
|
||||
/* Combine memory type and cacheability attributes */
|
||||
if (s1hi == 0 || s2hi == 0) {
|
||||
/* Device has precedence over normal */
|
||||
if (s1lo == 0 || s2lo == 0) {
|
||||
/* nGnRnE has precedence over anything */
|
||||
ret.attrs = 0;
|
||||
} else if (s1lo == 4 || s2lo == 4) {
|
||||
/* non-Reordering has precedence over Reordering */
|
||||
ret.attrs = 4; /* nGnRE */
|
||||
} else if (s1lo == 8 || s2lo == 8) {
|
||||
/* non-Gathering has precedence over Gathering */
|
||||
ret.attrs = 8; /* nGRE */
|
||||
} else {
|
||||
ret.attrs = 0xc; /* GRE */
|
||||
}
|
||||
|
||||
/* Any location for which the resultant memory type is any
|
||||
* type of Device memory is always treated as Outer Shareable.
|
||||
*/
|
||||
ret.shareability = 2;
|
||||
} else { /* Normal memory */
|
||||
/* Outer/inner cacheability combine independently */
|
||||
ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
|
||||
| combine_cacheattr_nibble(s1lo, s2lo);
|
||||
|
||||
if (ret.attrs == 0x44) {
|
||||
/* Any location for which the resultant memory type is Normal
|
||||
* Inner Non-cacheable, Outer Non-cacheable is always treated
|
||||
* as Outer Shareable.
|
||||
*/
|
||||
ret.shareability = 2;
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/* get_phys_addr - get the physical address for this virtual address
|
||||
*
|
||||
* Find the physical address corresponding to the given virtual address,
|
||||
|
@ -9514,12 +9654,14 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|||
* @prot: set to the permissions for the page containing phys_ptr
|
||||
* @page_size: set to the size of the page containing phys_ptr
|
||||
* @fsr: set to the DFSR/IFSR value on failure
|
||||
* @fi: set to fault info if the translation fails
|
||||
* @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
|
||||
*/
|
||||
static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
||||
hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
||||
target_ulong *page_size, uint32_t *fsr,
|
||||
ARMMMUFaultInfo *fi)
|
||||
ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
||||
{
|
||||
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
|
||||
/* Call ourselves recursively to do the stage 1 and then stage 2
|
||||
|
@ -9529,10 +9671,11 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||
hwaddr ipa;
|
||||
int s2_prot;
|
||||
int ret;
|
||||
ARMCacheAttrs cacheattrs2 = {};
|
||||
|
||||
ret = get_phys_addr(env, address, access_type,
|
||||
stage_1_mmu_idx(mmu_idx), &ipa, attrs,
|
||||
prot, page_size, fsr, fi);
|
||||
prot, page_size, fsr, fi, cacheattrs);
|
||||
|
||||
/* If S1 fails or S2 is disabled, return early. */
|
||||
if (ret || regime_translation_disabled(env, ARMMMUIdx_S2NS)) {
|
||||
|
@ -9543,10 +9686,17 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||
/* S1 is done. Now do S2 translation. */
|
||||
ret = get_phys_addr_lpae(env, ipa, access_type, ARMMMUIdx_S2NS,
|
||||
phys_ptr, attrs, &s2_prot,
|
||||
page_size, fsr, fi);
|
||||
page_size, fsr, fi,
|
||||
cacheattrs != NULL ? &cacheattrs2 : NULL);
|
||||
fi->s2addr = ipa;
|
||||
/* Combine the S1 and S2 perms. */
|
||||
*prot &= s2_prot;
|
||||
|
||||
/* Combine the S1 and S2 cache attributes, if needed */
|
||||
if (!ret && cacheattrs != NULL) {
|
||||
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
|
||||
}
|
||||
|
||||
return ret;
|
||||
} else {
|
||||
/*
|
||||
|
@ -9617,7 +9767,7 @@ static bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|||
|
||||
if (regime_using_lpae_format(env, mmu_idx)) {
|
||||
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
|
||||
attrs, prot, page_size, fsr, fi);
|
||||
attrs, prot, page_size, fsr, fi, cacheattrs);
|
||||
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
||||
return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
|
||||
attrs, prot, page_size, fsr, fi);
|
||||
|
@ -9645,7 +9795,7 @@ bool arm_tlb_fill(CPUState *cs, vaddr address,
|
|||
|
||||
ret = get_phys_addr(env, address, access_type,
|
||||
core_to_arm_mmu_idx(env, mmu_idx), &phys_addr,
|
||||
&attrs, &prot, &page_size, fsr, fi);
|
||||
&attrs, &prot, &page_size, fsr, fi, NULL);
|
||||
if (!ret) {
|
||||
/* Map a single [sub]page. */
|
||||
phys_addr &= TARGET_PAGE_MASK;
|
||||
|
@ -9674,7 +9824,7 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
|||
*attrs = (MemTxAttrs) {};
|
||||
|
||||
ret = get_phys_addr(env, addr, 0, mmu_idx, &phys_addr,
|
||||
attrs, &prot, &page_size, &fsr, &fi);
|
||||
attrs, &prot, &page_size, &fsr, &fi, NULL);
|
||||
|
||||
if (ret) {
|
||||
return -1;
|
||||
|
|
|
@ -7903,9 +7903,27 @@ static void gen_load_exclusive(DisasContext *s, int rt, int rt2,
|
|||
TCGv_i32 tmp2 = tcg_temp_new_i32();
|
||||
TCGv_i64 t64 = tcg_temp_new_i64();
|
||||
|
||||
gen_aa32_ld_i64(s, t64, addr, get_mem_index(s), opc);
|
||||
/* For AArch32, architecturally the 32-bit word at the lowest
|
||||
* address is always Rt and the one at addr+4 is Rt2, even if
|
||||
* the CPU is big-endian. That means we don't want to do a
|
||||
* gen_aa32_ld_i64(), which invokes gen_aa32_frob64() as if
|
||||
* for an architecturally 64-bit access, but instead do a
|
||||
* 64-bit access using MO_BE if appropriate and then split
|
||||
* the two halves.
|
||||
* This only makes a difference for BE32 user-mode, where
|
||||
* frob64() must not flip the two halves of the 64-bit data
|
||||
* but this code must treat BE32 user-mode like BE32 system.
|
||||
*/
|
||||
TCGv taddr = gen_aa32_addr(s, addr, opc);
|
||||
|
||||
tcg_gen_qemu_ld_i64(t64, taddr, get_mem_index(s), opc);
|
||||
tcg_temp_free(taddr);
|
||||
tcg_gen_mov_i64(cpu_exclusive_val, t64);
|
||||
tcg_gen_extr_i64_i32(tmp, tmp2, t64);
|
||||
if (s->be_data == MO_BE) {
|
||||
tcg_gen_extr_i64_i32(tmp2, tmp, t64);
|
||||
} else {
|
||||
tcg_gen_extr_i64_i32(tmp, tmp2, t64);
|
||||
}
|
||||
tcg_temp_free_i64(t64);
|
||||
|
||||
store_reg(s, rt2, tmp2);
|
||||
|
@ -7954,15 +7972,26 @@ static void gen_store_exclusive(DisasContext *s, int rd, int rt, int rt2,
|
|||
TCGv_i64 n64 = tcg_temp_new_i64();
|
||||
|
||||
t2 = load_reg(s, rt2);
|
||||
tcg_gen_concat_i32_i64(n64, t1, t2);
|
||||
/* For AArch32, architecturally the 32-bit word at the lowest
|
||||
* address is always Rt and the one at addr+4 is Rt2, even if
|
||||
* the CPU is big-endian. Since we're going to treat this as a
|
||||
* single 64-bit BE store, we need to put the two halves in the
|
||||
* opposite order for BE to LE, so that they end up in the right
|
||||
* places.
|
||||
* We don't want gen_aa32_frob64() because that does the wrong
|
||||
* thing for BE32 usermode.
|
||||
*/
|
||||
if (s->be_data == MO_BE) {
|
||||
tcg_gen_concat_i32_i64(n64, t2, t1);
|
||||
} else {
|
||||
tcg_gen_concat_i32_i64(n64, t1, t2);
|
||||
}
|
||||
tcg_temp_free_i32(t2);
|
||||
gen_aa32_frob64(s, n64);
|
||||
|
||||
tcg_gen_atomic_cmpxchg_i64(o64, taddr, cpu_exclusive_val, n64,
|
||||
get_mem_index(s), opc);
|
||||
tcg_temp_free_i64(n64);
|
||||
|
||||
gen_aa32_frob64(s, o64);
|
||||
tcg_gen_setcond_i64(TCG_COND_NE, o64, o64, cpu_exclusive_val);
|
||||
tcg_gen_extrl_i64_i32(t0, o64);
|
||||
|
||||
|
|
Loading…
Reference in a new issue