From a78b25fa71f1d2d9bcfdf2026743784e12efeeac Mon Sep 17 00:00:00 2001 From: Peter Maydell Date: Mon, 28 Jun 2021 14:58:27 +0100 Subject: [PATCH] target/arm: Implement MVE VSRI, VSLI Implement the MVE VSRI and VSLI insns, which perform a shift-and-insert operation. Signed-off-by: Peter Maydell Reviewed-by: Richard Henderson Message-id: 20210628135835.6690-11-peter.maydell@linaro.org --- target/arm/helper-mve.h | 8 ++++++++ target/arm/mve.decode | 9 ++++++++ target/arm/mve_helper.c | 42 ++++++++++++++++++++++++++++++++++++++ target/arm/translate-mve.c | 3 +++ 4 files changed, 62 insertions(+) diff --git a/target/arm/helper-mve.h b/target/arm/helper-mve.h index 8af0e7fd8c..e452d2ef7a 100644 --- a/target/arm/helper-mve.h +++ b/target/arm/helper-mve.h @@ -396,3 +396,11 @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) + +DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) +DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32) diff --git a/target/arm/mve.decode b/target/arm/mve.decode index 6e6032b25a..c3b5366617 100644 --- a/target/arm/mve.decode +++ b/target/arm/mve.decode @@ -371,3 +371,12 @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h + +# Shift-and-insert +VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b +VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h +VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w + +VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b +VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h +VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w diff --git a/target/arm/mve_helper.c b/target/arm/mve_helper.c index 8798e77cba..24336d1d28 100644 --- a/target/arm/mve_helper.c +++ b/target/arm/mve_helper.c @@ -1251,6 +1251,48 @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP) DO_2SHIFT_U(vrshli_u, DO_VRSHLU) DO_2SHIFT_S(vrshli_s, DO_VRSHLS) +/* Shift-and-insert; we always work with 64 bits at a time */ +#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \ + void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \ + void *vm, uint32_t shift) \ + { \ + uint64_t *d = vd, *m = vm; \ + uint16_t mask; \ + uint64_t shiftmask; \ + unsigned e; \ + if (shift == 0 || shift == ESIZE * 8) { \ + /* \ + * Only VSLI can shift by 0; only VSRI can shift by
. \ + * The generic logic would give the right answer for 0 but \ + * fails for
. \ + */ \ + goto done; \ + } \ + assert(shift < ESIZE * 8); \ + mask = mve_element_mask(env); \ + /* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \ + shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \ + for (e = 0; e < 16 / 8; e++, mask >>= 8) { \ + uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \ + (d[H8(e)] & ~shiftmask); \ + mergemask(&d[H8(e)], r, mask); \ + } \ +done: \ + mve_advance_vpt(env); \ + } + +#define DO_SHL(N, SHIFT) ((N) << (SHIFT)) +#define DO_SHR(N, SHIFT) ((N) >> (SHIFT)) +#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT)) +#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT)) + +DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK) +DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK) +DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK) +DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK) +DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK) +DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK) + /* * Long shifts taking half-sized inputs from top or bottom of the input * vector and producing a double-width result. ESIZE, TYPE are for diff --git a/target/arm/translate-mve.c b/target/arm/translate-mve.c index 044462c375..b031f84966 100644 --- a/target/arm/translate-mve.c +++ b/target/arm/translate-mve.c @@ -894,6 +894,9 @@ DO_2SHIFT(VSHRI_U, vshli_u, true) DO_2SHIFT(VRSHRI_S, vrshli_s, true) DO_2SHIFT(VRSHRI_U, vrshli_u, true) +DO_2SHIFT(VSRI, vsri, false) +DO_2SHIFT(VSLI, vsli, false) + #define DO_VSHLL(INSN, FN) \ static bool trans_##INSN(DisasContext *s, arg_2shift *a) \ { \