diff --git a/tcg/region.c b/tcg/region.c index bef4c4756f..f8410ba5db 100644 --- a/tcg/region.c +++ b/tcg/region.c @@ -505,6 +505,14 @@ static int alloc_code_gen_buffer(size_t tb_size, int splitwx, Error **errp) return PROT_READ | PROT_WRITE; } #elif defined(_WIN32) +/* + * Local source-level compatibility with Unix. + * Used by tcg_region_init below. + */ +#define PROT_READ 1 +#define PROT_WRITE 2 +#define PROT_EXEC 4 + static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) { void *buf; @@ -525,7 +533,7 @@ static int alloc_code_gen_buffer(size_t size, int splitwx, Error **errp) region.start_aligned = buf; region.total_size = size; - return PAGE_READ | PAGE_WRITE | PAGE_EXEC; + return PROT_READ | PROT_WRITE | PROT_EXEC; } #else static int alloc_code_gen_buffer_anon(size_t size, int prot, @@ -794,10 +802,10 @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) * buffer -- let that one use hugepages throughout. * Work with the page protections set up with the initial mapping. */ - need_prot = PAGE_READ | PAGE_WRITE; + need_prot = PROT_READ | PROT_WRITE; #ifndef CONFIG_TCG_INTERPRETER if (tcg_splitwx_diff == 0) { - need_prot |= PAGE_EXEC; + need_prot |= PROT_EXEC; } #endif for (size_t i = 0, n = region.n; i < n; i++) { @@ -807,9 +815,9 @@ void tcg_region_init(size_t tb_size, int splitwx, unsigned max_cpus) if (have_prot != need_prot) { int rc; - if (need_prot == (PAGE_READ | PAGE_WRITE | PAGE_EXEC)) { + if (need_prot == (PROT_READ | PROT_WRITE | PROT_EXEC)) { rc = qemu_mprotect_rwx(start, end - start); - } else if (need_prot == (PAGE_READ | PAGE_WRITE)) { + } else if (need_prot == (PROT_READ | PROT_WRITE)) { rc = qemu_mprotect_rw(start, end - start); } else { g_assert_not_reached();