target/arm: Implement MVE VSRI, VSLI

Implement the MVE VSRI and VSLI insns, which perform a
shift-and-insert operation.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210628135835.6690-11-peter.maydell@linaro.org
This commit is contained in:
Peter Maydell 2021-06-28 14:58:27 +01:00
parent c226270703
commit a78b25fa71
4 changed files with 62 additions and 0 deletions

View file

@ -396,3 +396,11 @@ DEF_HELPER_FLAGS_4(mve_vshlltsb, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vshlltsh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vshlltub, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vshlltuh, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vsrib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vsrih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vsriw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vslib, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vslih, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)
DEF_HELPER_FLAGS_4(mve_vsliw, TCG_CALL_NO_WG, void, env, ptr, ptr, i32)

View file

@ -371,3 +371,12 @@ VSHLL_TS 111 0 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_b
VSHLL_TU 111 1 1110 1 . 1 .. ... ... 1 1111 0 1 . 0 ... 0 @2_shll_h
# Shift-and-insert
VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_b
VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_h
VSRI 111 1 1111 1 . ... ... ... 0 0100 0 1 . 1 ... 0 @2_shr_w
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_b
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_h
VSLI 111 1 1111 1 . ... ... ... 0 0101 0 1 . 1 ... 0 @2_shl_w

View file

@ -1251,6 +1251,48 @@ DO_2SHIFT_SAT_S(vqshlui_s, DO_SUQSHL_OP)
DO_2SHIFT_U(vrshli_u, DO_VRSHLU)
DO_2SHIFT_S(vrshli_s, DO_VRSHLS)
/* Shift-and-insert; we always work with 64 bits at a time */
#define DO_2SHIFT_INSERT(OP, ESIZE, SHIFTFN, MASKFN) \
void HELPER(glue(mve_, OP))(CPUARMState *env, void *vd, \
void *vm, uint32_t shift) \
{ \
uint64_t *d = vd, *m = vm; \
uint16_t mask; \
uint64_t shiftmask; \
unsigned e; \
if (shift == 0 || shift == ESIZE * 8) { \
/* \
* Only VSLI can shift by 0; only VSRI can shift by <dt>. \
* The generic logic would give the right answer for 0 but \
* fails for <dt>. \
*/ \
goto done; \
} \
assert(shift < ESIZE * 8); \
mask = mve_element_mask(env); \
/* ESIZE / 2 gives the MO_* value if ESIZE is in [1,2,4] */ \
shiftmask = dup_const(ESIZE / 2, MASKFN(ESIZE * 8, shift)); \
for (e = 0; e < 16 / 8; e++, mask >>= 8) { \
uint64_t r = (SHIFTFN(m[H8(e)], shift) & shiftmask) | \
(d[H8(e)] & ~shiftmask); \
mergemask(&d[H8(e)], r, mask); \
} \
done: \
mve_advance_vpt(env); \
}
#define DO_SHL(N, SHIFT) ((N) << (SHIFT))
#define DO_SHR(N, SHIFT) ((N) >> (SHIFT))
#define SHL_MASK(EBITS, SHIFT) MAKE_64BIT_MASK((SHIFT), (EBITS) - (SHIFT))
#define SHR_MASK(EBITS, SHIFT) MAKE_64BIT_MASK(0, (EBITS) - (SHIFT))
DO_2SHIFT_INSERT(vsrib, 1, DO_SHR, SHR_MASK)
DO_2SHIFT_INSERT(vsrih, 2, DO_SHR, SHR_MASK)
DO_2SHIFT_INSERT(vsriw, 4, DO_SHR, SHR_MASK)
DO_2SHIFT_INSERT(vslib, 1, DO_SHL, SHL_MASK)
DO_2SHIFT_INSERT(vslih, 2, DO_SHL, SHL_MASK)
DO_2SHIFT_INSERT(vsliw, 4, DO_SHL, SHL_MASK)
/*
* Long shifts taking half-sized inputs from top or bottom of the input
* vector and producing a double-width result. ESIZE, TYPE are for

View file

@ -894,6 +894,9 @@ DO_2SHIFT(VSHRI_U, vshli_u, true)
DO_2SHIFT(VRSHRI_S, vrshli_s, true)
DO_2SHIFT(VRSHRI_U, vrshli_u, true)
DO_2SHIFT(VSRI, vsri, false)
DO_2SHIFT(VSLI, vsli, false)
#define DO_VSHLL(INSN, FN) \
static bool trans_##INSN(DisasContext *s, arg_2shift *a) \
{ \