diff --git a/target/i386/tcg/excp_helper.c b/target/i386/tcg/excp_helper.c index c1ffa1c0ef..7c3c8dc7fe 100644 --- a/target/i386/tcg/excp_helper.c +++ b/target/i386/tcg/excp_helper.c @@ -140,3 +140,16 @@ G_NORETURN void raise_exception_ra(CPUX86State *env, int exception_index, { raise_interrupt2(env, exception_index, 0, 0, 0, retaddr); } + +G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr, + MMUAccessType access_type, + uintptr_t retaddr) +{ + /* + * Unaligned accesses are currently only triggered by SSE/AVX + * instructions that impose alignment requirements on memory + * operands. These instructions raise #GP(0) upon accessing an + * unaligned address. + */ + raise_exception_ra(env, EXCP0D_GPF, retaddr); +} diff --git a/target/i386/tcg/helper-tcg.h b/target/i386/tcg/helper-tcg.h index 34167e2e29..cd1723389a 100644 --- a/target/i386/tcg/helper-tcg.h +++ b/target/i386/tcg/helper-tcg.h @@ -42,17 +42,6 @@ void x86_cpu_do_interrupt(CPUState *cpu); bool x86_cpu_exec_interrupt(CPUState *cpu, int int_req); #endif -/* helper.c */ -#ifdef CONFIG_USER_ONLY -void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr, - MMUAccessType access_type, - bool maperr, uintptr_t ra); -#else -bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, - MMUAccessType access_type, int mmu_idx, - bool probe, uintptr_t retaddr); -#endif - void breakpoint_handler(CPUState *cs); /* n must be a constant to be efficient */ @@ -78,6 +67,23 @@ G_NORETURN void raise_exception_err_ra(CPUX86State *env, int exception_index, int error_code, uintptr_t retaddr); G_NORETURN void raise_interrupt(CPUX86State *nenv, int intno, int is_int, int error_code, int next_eip_addend); +G_NORETURN void handle_unaligned_access(CPUX86State *env, vaddr vaddr, + MMUAccessType access_type, + uintptr_t retaddr); +#ifdef CONFIG_USER_ONLY +void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr, + MMUAccessType access_type, + bool maperr, uintptr_t ra); +void x86_cpu_record_sigbus(CPUState *cs, vaddr addr, + MMUAccessType access_type, uintptr_t ra); +#else +bool x86_cpu_tlb_fill(CPUState *cs, vaddr address, int size, + MMUAccessType access_type, int mmu_idx, + bool probe, uintptr_t retaddr); +G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr); +#endif /* cc_helper.c */ extern const uint8_t parity_table[256]; diff --git a/target/i386/tcg/sysemu/excp_helper.c b/target/i386/tcg/sysemu/excp_helper.c index 48feba7e75..796dc2a1f3 100644 --- a/target/i386/tcg/sysemu/excp_helper.c +++ b/target/i386/tcg/sysemu/excp_helper.c @@ -439,3 +439,11 @@ bool x86_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, } return true; } + +G_NORETURN void x86_cpu_do_unaligned_access(CPUState *cs, vaddr vaddr, + MMUAccessType access_type, + int mmu_idx, uintptr_t retaddr) +{ + X86CPU *cpu = X86_CPU(cs); + handle_unaligned_access(&cpu->env, vaddr, access_type, retaddr); +} diff --git a/target/i386/tcg/tcg-cpu.c b/target/i386/tcg/tcg-cpu.c index 6fdfdf9598..d3c2b8fb49 100644 --- a/target/i386/tcg/tcg-cpu.c +++ b/target/i386/tcg/tcg-cpu.c @@ -75,10 +75,12 @@ static const struct TCGCPUOps x86_tcg_ops = { #ifdef CONFIG_USER_ONLY .fake_user_interrupt = x86_cpu_do_interrupt, .record_sigsegv = x86_cpu_record_sigsegv, + .record_sigbus = x86_cpu_record_sigbus, #else .tlb_fill = x86_cpu_tlb_fill, .do_interrupt = x86_cpu_do_interrupt, .cpu_exec_interrupt = x86_cpu_exec_interrupt, + .do_unaligned_access = x86_cpu_do_unaligned_access, .debug_excp_handler = breakpoint_handler, .debug_check_breakpoint = x86_debug_check_breakpoint, #endif /* !CONFIG_USER_ONLY */ diff --git a/target/i386/tcg/translate.c b/target/i386/tcg/translate.c index d6420df31d..8ec91d17af 100644 --- a/target/i386/tcg/translate.c +++ b/target/i386/tcg/translate.c @@ -2738,21 +2738,23 @@ static inline void gen_stq_env_A0(DisasContext *s, int offset) tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, s->mem_index, MO_LEUQ); } -static inline void gen_ldo_env_A0(DisasContext *s, int offset) +static inline void gen_ldo_env_A0(DisasContext *s, int offset, bool align) { int mem_index = s->mem_index; - tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ); + tcg_gen_qemu_ld_i64(s->tmp1_i64, s->A0, mem_index, + MO_LEUQ | (align ? MO_ALIGN_16 : 0)); tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); tcg_gen_addi_tl(s->tmp0, s->A0, 8); tcg_gen_qemu_ld_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); tcg_gen_st_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); } -static inline void gen_sto_env_A0(DisasContext *s, int offset) +static inline void gen_sto_env_A0(DisasContext *s, int offset, bool align) { int mem_index = s->mem_index; tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(0))); - tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, MO_LEUQ); + tcg_gen_qemu_st_i64(s->tmp1_i64, s->A0, mem_index, + MO_LEUQ | (align ? MO_ALIGN_16 : 0)); tcg_gen_addi_tl(s->tmp0, s->A0, 8); tcg_gen_ld_i64(s->tmp1_i64, cpu_env, offset + offsetof(ZMMReg, ZMM_Q(1))); tcg_gen_qemu_st_i64(s->tmp1_i64, s->tmp0, mem_index, MO_LEUQ); @@ -3131,7 +3133,7 @@ static const struct SSEOpHelper_table6 sse_op_table6[256] = { [0x25] = UNARY_OP(pmovsxdq, SSE41, SSE_OPF_MMX), [0x28] = BINARY_OP(pmuldq, SSE41, SSE_OPF_MMX), [0x29] = BINARY_OP(pcmpeqq, SSE41, SSE_OPF_MMX), - [0x2a] = SPECIAL_OP(SSE41), /* movntqda */ + [0x2a] = SPECIAL_OP(SSE41), /* movntdqa */ [0x2b] = BINARY_OP(packusdw, SSE41, SSE_OPF_MMX), [0x30] = UNARY_OP(pmovzxbw, SSE41, SSE_OPF_MMX), [0x31] = UNARY_OP(pmovzxbd, SSE41, SSE_OPF_MMX), @@ -3294,17 +3296,17 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; case 0x1e7: /* movntdq */ case 0x02b: /* movntps */ - case 0x12b: /* movntps */ + case 0x12b: /* movntpd */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); - gen_sto_env_A0(s, ZMM_OFFSET(reg)); + gen_sto_env_A0(s, ZMM_OFFSET(reg), true); break; case 0x3f0: /* lddqu */ if (mod == 3) goto illegal_op; gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, ZMM_OFFSET(reg)); + gen_ldo_env_A0(s, ZMM_OFFSET(reg), false); break; case 0x22b: /* movntss */ case 0x32b: /* movntsd */ @@ -3373,7 +3375,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x26f: /* movdqu xmm, ea */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, ZMM_OFFSET(reg)); + gen_ldo_env_A0(s, ZMM_OFFSET(reg), + /* movaps, movapd, movdqa */ + b == 0x028 || b == 0x128 || b == 0x16f); } else { rm = (modrm & 7) | REX_B(s); gen_op_movo(s, ZMM_OFFSET(reg), ZMM_OFFSET(rm)); @@ -3432,7 +3436,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x212: /* movsldup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, ZMM_OFFSET(reg)); + gen_ldo_env_A0(s, ZMM_OFFSET(reg), true); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(0)), @@ -3474,7 +3478,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x216: /* movshdup */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, ZMM_OFFSET(reg)); + gen_ldo_env_A0(s, ZMM_OFFSET(reg), true); } else { rm = (modrm & 7) | REX_B(s); gen_op_movl(s, offsetof(CPUX86State, xmm_regs[reg].ZMM_L(1)), @@ -3568,7 +3572,9 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, case 0x27f: /* movdqu ea, xmm */ if (mod != 3) { gen_lea_modrm(env, s, modrm); - gen_sto_env_A0(s, ZMM_OFFSET(reg)); + gen_sto_env_A0(s, ZMM_OFFSET(reg), + /* movaps, movapd, movdqa */ + b == 0x029 || b == 0x129 || b == 0x17f); } else { rm = (modrm & 7) | REX_B(s); gen_op_movo(s, ZMM_OFFSET(rm), ZMM_OFFSET(reg)); @@ -3724,7 +3730,8 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, if (mod != 3) { gen_lea_modrm(env, s, modrm); op2_offset = offsetof(CPUX86State,xmm_t0); - gen_ldo_env_A0(s, op2_offset); + /* FIXME: should be 64-bit access if b1 == 0. */ + gen_ldo_env_A0(s, op2_offset, !!b1); } else { rm = (modrm & 7) | REX_B(s); op2_offset = ZMM_OFFSET(rm); @@ -3913,11 +3920,11 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, tcg_gen_st16_tl(s->tmp0, cpu_env, op2_offset + offsetof(ZMMReg, ZMM_W(0))); break; - case 0x2a: /* movntqda */ - gen_ldo_env_A0(s, op1_offset); + case 0x2a: /* movntdqa */ + gen_ldo_env_A0(s, op1_offset, true); return; default: - gen_ldo_env_A0(s, op2_offset); + gen_ldo_env_A0(s, op2_offset, true); } } if (!op6->fn[b1].op1) { @@ -4499,7 +4506,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, } else { op2_offset = offsetof(CPUX86State, xmm_t0); gen_lea_modrm(env, s, modrm); - gen_ldo_env_A0(s, op2_offset); + gen_ldo_env_A0(s, op2_offset, true); } val = x86_ldub_code(env, s); @@ -4606,7 +4613,7 @@ static void gen_sse(CPUX86State *env, DisasContext *s, int b, break; default: /* 128 bit access */ - gen_ldo_env_A0(s, op2_offset); + gen_ldo_env_A0(s, op2_offset, true); break; } } else { diff --git a/target/i386/tcg/user/excp_helper.c b/target/i386/tcg/user/excp_helper.c index cd507e2a1b..b3bdb7831a 100644 --- a/target/i386/tcg/user/excp_helper.c +++ b/target/i386/tcg/user/excp_helper.c @@ -48,3 +48,10 @@ void x86_cpu_record_sigsegv(CPUState *cs, vaddr addr, cpu_loop_exit_restore(cs, ra); } + +void x86_cpu_record_sigbus(CPUState *cs, vaddr addr, + MMUAccessType access_type, uintptr_t ra) +{ + X86CPU *cpu = X86_CPU(cs); + handle_unaligned_access(&cpu->env, addr, access_type, ra); +}