mirror of
https://gitlab.com/qemu-project/qemu
synced 2024-11-05 20:35:44 +00:00
target/riscv: rvv-1.0: add evl parameter to vext_ldst_us()
Add supports of Vector unit-stride mask load/store instructions (vlm.v, vsm.v), which has: evl (effective vector length) = ceil(env->vl / 8). The new instructions operate the same as unmasked byte loads and stores. Add evl parameter to reuse vext_ldst_us(). Signed-off-by: Frank Chang <frank.chang@sifive.com> Reviewed-by: Alistair Francis <alistair.francis@wdc.com> Message-Id: <20211210075704.23951-74-frank.chang@sifive.com> Signed-off-by: Alistair Francis <alistair.francis@wdc.com>
This commit is contained in:
parent
34a2c2d81a
commit
5c89e9c096
1 changed files with 18 additions and 18 deletions
|
@ -279,15 +279,15 @@ GEN_VEXT_ST_STRIDE(vsse64_v, int64_t, ste_d)
|
|||
/* unmasked unit-stride load and store operation*/
|
||||
static void
|
||||
vext_ldst_us(void *vd, target_ulong base, CPURISCVState *env, uint32_t desc,
|
||||
vext_ldst_elem_fn *ldst_elem,
|
||||
uint32_t esz, uintptr_t ra, MMUAccessType access_type)
|
||||
vext_ldst_elem_fn *ldst_elem, uint32_t esz, uint32_t evl,
|
||||
uintptr_t ra, MMUAccessType access_type)
|
||||
{
|
||||
uint32_t i, k;
|
||||
uint32_t nf = vext_nf(desc);
|
||||
uint32_t max_elems = vext_max_elems(desc, esz);
|
||||
|
||||
/* load bytes from guest memory */
|
||||
for (i = env->vstart; i < env->vl; i++, env->vstart++) {
|
||||
for (i = env->vstart; i < evl; i++, env->vstart++) {
|
||||
k = 0;
|
||||
while (k < nf) {
|
||||
target_ulong addr = base + ((i * nf + k) << esz);
|
||||
|
@ -316,7 +316,7 @@ void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
|||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
vext_ldst_us(vd, base, env, desc, LOAD_FN, \
|
||||
ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_LOAD); \
|
||||
ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_LOAD); \
|
||||
}
|
||||
|
||||
GEN_VEXT_LD_US(vle8_v, int8_t, lde_b)
|
||||
|
@ -324,20 +324,20 @@ GEN_VEXT_LD_US(vle16_v, int16_t, lde_h)
|
|||
GEN_VEXT_LD_US(vle32_v, int32_t, lde_w)
|
||||
GEN_VEXT_LD_US(vle64_v, int64_t, lde_d)
|
||||
|
||||
#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
|
||||
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
|
||||
vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
|
||||
ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
|
||||
} \
|
||||
\
|
||||
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
vext_ldst_us(vd, base, env, desc, STORE_FN, \
|
||||
ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
|
||||
#define GEN_VEXT_ST_US(NAME, ETYPE, STORE_FN) \
|
||||
void HELPER(NAME##_mask)(void *vd, void *v0, target_ulong base, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
uint32_t stride = vext_nf(desc) << ctzl(sizeof(ETYPE)); \
|
||||
vext_ldst_stride(vd, v0, base, stride, env, desc, false, STORE_FN, \
|
||||
ctzl(sizeof(ETYPE)), GETPC(), MMU_DATA_STORE); \
|
||||
} \
|
||||
\
|
||||
void HELPER(NAME)(void *vd, void *v0, target_ulong base, \
|
||||
CPURISCVState *env, uint32_t desc) \
|
||||
{ \
|
||||
vext_ldst_us(vd, base, env, desc, STORE_FN, \
|
||||
ctzl(sizeof(ETYPE)), env->vl, GETPC(), MMU_DATA_STORE); \
|
||||
}
|
||||
|
||||
GEN_VEXT_ST_US(vse8_v, int8_t, ste_b)
|
||||
|
|
Loading…
Reference in a new issue