qemu/include/exec/memop.h
Richard Henderson 37031fefc7 include/exec/memop: Add MO_ATOM_*
This field may be used to describe the precise atomicity requirements
of the guest, which may then be used to constrain the methods by which
it may be emulated by the host.

For instance, the AArch64 LDP (32-bit) instruction changes semantics
with ARMv8.4 LSE2, from

  MO_64 | MO_ATOM_IFALIGN_PAIR
  (64-bits, single-copy atomic only on 4 byte units,
   nonatomic if not aligned by 4),

to

  MO_64 | MO_ATOM_WITHIN16
  (64-bits, single-copy atomic within a 16 byte block)

The former may be implemented with two 4 byte loads, or a single 8 byte
load if that happens to be efficient on the host.  The latter may not
be implemented with two 4 byte loads and may also require a helper when
misaligned.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
2023-05-16 15:21:38 -07:00

174 lines
5.3 KiB
C

/*
* Constants for memory operations
*
* Authors:
* Richard Henderson <rth@twiddle.net>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef MEMOP_H
#define MEMOP_H
#include "qemu/host-utils.h"
typedef enum MemOp {
MO_8 = 0,
MO_16 = 1,
MO_32 = 2,
MO_64 = 3,
MO_128 = 4,
MO_256 = 5,
MO_512 = 6,
MO_1024 = 7,
MO_SIZE = 0x07, /* Mask for the above. */
MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */
MO_BSWAP = 0x10, /* Host reverse endian. */
#if HOST_BIG_ENDIAN
MO_LE = MO_BSWAP,
MO_BE = 0,
#else
MO_LE = 0,
MO_BE = MO_BSWAP,
#endif
#ifdef NEED_CPU_H
#if TARGET_BIG_ENDIAN
MO_TE = MO_BE,
#else
MO_TE = MO_LE,
#endif
#endif
/*
* MO_UNALN accesses are never checked for alignment.
* MO_ALIGN accesses will result in a call to the CPU's
* do_unaligned_access hook if the guest address is not aligned.
*
* Some architectures (e.g. ARMv8) need the address which is aligned
* to a size more than the size of the memory access.
* Some architectures (e.g. SPARCv9) need an address which is aligned,
* but less strictly than the natural alignment.
*
* MO_ALIGN supposes the alignment size is the size of a memory access.
*
* There are three options:
* - unaligned access permitted (MO_UNALN).
* - an alignment to the size of an access (MO_ALIGN);
* - an alignment to a specified size, which may be more or less than
* the access size (MO_ALIGN_x where 'x' is a size in bytes);
*/
MO_ASHIFT = 5,
MO_AMASK = 0x7 << MO_ASHIFT,
MO_UNALN = 0,
MO_ALIGN_2 = 1 << MO_ASHIFT,
MO_ALIGN_4 = 2 << MO_ASHIFT,
MO_ALIGN_8 = 3 << MO_ASHIFT,
MO_ALIGN_16 = 4 << MO_ASHIFT,
MO_ALIGN_32 = 5 << MO_ASHIFT,
MO_ALIGN_64 = 6 << MO_ASHIFT,
MO_ALIGN = MO_AMASK,
/*
* MO_ATOM_* describes the atomicity requirements of the operation:
* MO_ATOM_IFALIGN: the operation must be single-copy atomic if it
* is aligned; if unaligned there is no atomicity.
* MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to
* be a pair of half-sized operations which are packed together
* for convenience, with single-copy atomicity on each half if
* the half is aligned.
* This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP.
* MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it
* is unaligned, so long as it does not cross a 16-byte boundary;
* if it crosses a 16-byte boundary there is no atomicity.
* This is the atomicity e.g. of Arm FEAT_LSE2 LDR.
* MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic,
* if it happens to be within a 16-byte boundary, otherwise it
* devolves to a pair of half-sized MO_ATOM_WITHIN16 operations.
* Depending on alignment, one or both will be single-copy atomic.
* This is the atomicity e.g. of Arm FEAT_LSE2 LDP.
* MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts
* by the alignment. E.g. if the address is 0 mod 4, then each
* 4-byte subobject is single-copy atomic.
* This is the atomicity e.g. of IBM Power.
* MO_ATOM_NONE: the operation has no atomicity requirements.
*
* Note the default (i.e. 0) value is single-copy atomic to the
* size of the operation, if aligned. This retains the behaviour
* from before this field was introduced.
*/
MO_ATOM_SHIFT = 8,
MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT,
MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT,
MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT,
MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT,
MO_ATOM_SUBALIGN = 4 << MO_ATOM_SHIFT,
MO_ATOM_NONE = 5 << MO_ATOM_SHIFT,
MO_ATOM_MASK = 7 << MO_ATOM_SHIFT,
/* Combinations of the above, for ease of use. */
MO_UB = MO_8,
MO_UW = MO_16,
MO_UL = MO_32,
MO_UQ = MO_64,
MO_UO = MO_128,
MO_SB = MO_SIGN | MO_8,
MO_SW = MO_SIGN | MO_16,
MO_SL = MO_SIGN | MO_32,
MO_SQ = MO_SIGN | MO_64,
MO_SO = MO_SIGN | MO_128,
MO_LEUW = MO_LE | MO_UW,
MO_LEUL = MO_LE | MO_UL,
MO_LEUQ = MO_LE | MO_UQ,
MO_LESW = MO_LE | MO_SW,
MO_LESL = MO_LE | MO_SL,
MO_LESQ = MO_LE | MO_SQ,
MO_BEUW = MO_BE | MO_UW,
MO_BEUL = MO_BE | MO_UL,
MO_BEUQ = MO_BE | MO_UQ,
MO_BESW = MO_BE | MO_SW,
MO_BESL = MO_BE | MO_SL,
MO_BESQ = MO_BE | MO_SQ,
#ifdef NEED_CPU_H
MO_TEUW = MO_TE | MO_UW,
MO_TEUL = MO_TE | MO_UL,
MO_TEUQ = MO_TE | MO_UQ,
MO_TEUO = MO_TE | MO_UO,
MO_TESW = MO_TE | MO_SW,
MO_TESL = MO_TE | MO_SL,
MO_TESQ = MO_TE | MO_SQ,
#endif
MO_SSIZE = MO_SIZE | MO_SIGN,
} MemOp;
/* MemOp to size in bytes. */
static inline unsigned memop_size(MemOp op)
{
return 1 << (op & MO_SIZE);
}
/* Size in bytes to MemOp. */
static inline MemOp size_memop(unsigned size)
{
#ifdef CONFIG_DEBUG_TCG
/* Power of 2 up to 8. */
assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
#endif
return ctz32(size);
}
/* Big endianness from MemOp. */
static inline bool memop_big_endian(MemOp op)
{
return (op & MO_BSWAP) == MO_BE;
}
#endif