Implement memory access bound check with hardware trap for 64-bit platforms (#293)

Also implement native stack overflow check with hardware trap for 64-bit platforms
Refine classic interpreter and fast interpreter to improve performance
Update document
This commit is contained in:
wenyongh
2020-06-28 15:41:25 +08:00
committed by GitHub
parent 548926ab1a
commit ee315e4049
33 changed files with 1143 additions and 438 deletions

View File

@ -149,6 +149,12 @@ enum {
#define WASM_ENABLE_MINI_LOADER 0
#endif
/* Disable boundary check with hardware trap or not,
* enable it by default if it is supported */
#ifndef WASM_DISABLE_HW_BOUND_CHECK
#define WASM_DISABLE_HW_BOUND_CHECK 0
#endif
/* Heap and stack profiling */
#define BH_ENABLE_MEMORY_PROFILING 0
@ -199,8 +205,8 @@ enum {
/* Default/min/max stack size of each app thread */
#if !defined(BH_PLATFORM_ZEPHYR) && !defined(BH_PLATFORM_ALIOS_THINGS)
#define APP_THREAD_STACK_SIZE_DEFAULT (20 * 1024)
#define APP_THREAD_STACK_SIZE_MIN (16 * 1024)
#define APP_THREAD_STACK_SIZE_DEFAULT (32 * 1024)
#define APP_THREAD_STACK_SIZE_MIN (24 * 1024)
#define APP_THREAD_STACK_SIZE_MAX (256 * 1024)
#else
#define APP_THREAD_STACK_SIZE_DEFAULT (6 * 1024)

View File

@ -158,10 +158,38 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModule *module,
uint64 total_size = heap_size + memory_data_size;
uint8 *p;
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* Allocate memory */
if (!(p = runtime_malloc(total_size, error_buf, error_buf_size))) {
return false;
}
#else
uint8 *mapped_mem;
uint64 map_size = 8 * (uint64)BH_GB;
/* Totally 8G is mapped, the opcode load/store address range is -2G to 6G:
* ea = i + memarg.offset
* i is i32, the range is -2G to 2G
* memarg.offset is u32, the range is 0 to 4G
* so the range of ea is -2G to 6G
*/
if (total_size >= UINT32_MAX
|| !(mapped_mem = os_mmap(NULL, map_size,
MMAP_PROT_NONE, MMAP_MAP_NONE))) {
set_error_buf(error_buf, error_buf_size,
"AOT module instantiate failed: mmap memory failed.");
return false;
}
p = mapped_mem + 2 * (uint64)BH_GB - heap_size;
if (os_mprotect(p, total_size, MMAP_PROT_READ | MMAP_PROT_WRITE) != 0) {
set_error_buf(error_buf, error_buf_size,
"AOT module instantiate failed: mprotec memory failed.");
os_munmap(mapped_mem, map_size);
return false;
}
memset(p, 0, (uint32)total_size);
#endif
/* Initialize heap info */
module_inst->heap_data.ptr = p;
@ -184,15 +212,15 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModule *module,
p += (uint32)memory_data_size;
module_inst->memory_data_end.ptr = p;
module_inst->memory_data_size = (uint32)memory_data_size;
module_inst->total_mem_size = (uint32)(heap_size + memory_data_size);
module_inst->mem_cur_page_count = module->mem_init_page_count;
module_inst->mem_max_page_count = module->mem_max_page_count;
if (module_inst->total_mem_size > 0) {
module_inst->mem_bound_check_1byte = module_inst->total_mem_size - 1;
module_inst->mem_bound_check_2bytes = module_inst->total_mem_size - 2;
module_inst->mem_bound_check_4bytes = module_inst->total_mem_size - 4;
module_inst->mem_bound_check_8bytes = module_inst->total_mem_size - 8;
module_inst->mem_bound_check_heap_base = module_inst->heap_base_offset;
if (module_inst->memory_data_size > 0) {
module_inst->mem_bound_check_1byte = module_inst->memory_data_size - 1;
module_inst->mem_bound_check_2bytes = module_inst->memory_data_size - 2;
module_inst->mem_bound_check_4bytes = module_inst->memory_data_size - 4;
module_inst->mem_bound_check_8bytes = module_inst->memory_data_size - 8;
}
for (i = 0; i < module->mem_init_data_count; i++) {
@ -263,7 +291,11 @@ fail2:
module_inst->heap_handle.ptr = NULL;
}
fail1:
#ifndef OS_ENABLE_HW_BOUND_CHECK
wasm_runtime_free(module_inst->heap_data.ptr);
#else
os_munmap(mapped_mem, map_size);
#endif
module_inst->heap_data.ptr = NULL;
return false;
}
@ -374,6 +406,9 @@ aot_instantiate(AOTModule *module, bool is_sub_inst,
heap_size = align_uint(heap_size, 8);
if (heap_size > APP_HEAP_SIZE_MAX)
heap_size = APP_HEAP_SIZE_MAX;
#ifdef OS_ENABLE_HW_BOUND_CHECK
heap_size = align_uint(heap_size, os_getpagesize());
#endif
/* Allocate module instance, global data, table data and heap data */
if (!(module_inst = runtime_malloc(total_size,
@ -468,8 +503,14 @@ aot_deinstantiate(AOTModuleInstance *module_inst, bool is_sub_inst)
if (module_inst->heap_handle.ptr)
mem_allocator_destroy(module_inst->heap_handle.ptr);
if (module_inst->heap_data.ptr)
if (module_inst->heap_data.ptr) {
#ifndef OS_ENABLE_HW_BOUND_CHECK
wasm_runtime_free(module_inst->heap_data.ptr);
#else
os_munmap((uint8*)module_inst->memory_data.ptr - 2 * (uint64)BH_GB,
8 * (uint64)BH_GB);
#endif
}
if (module_inst->func_ptrs.ptr)
wasm_runtime_free(module_inst->func_ptrs.ptr);
@ -508,6 +549,157 @@ aot_lookup_function(const AOTModuleInstance *module_inst,
(addr)[1] = u.parts[1]; \
} while (0)
#ifdef OS_ENABLE_HW_BOUND_CHECK
static os_thread_local_attribute WASMExecEnv *aot_exec_env = NULL;
static inline uint8 *
get_stack_min_addr(WASMExecEnv *exec_env, uint32 page_size)
{
uintptr_t stack_bound = (uintptr_t)exec_env->native_stack_boundary;
return (uint8*)(stack_bound & ~(uintptr_t)(page_size -1 ));
}
static void
aot_signal_handler(void *sig_addr)
{
AOTModuleInstance *module_inst;
WASMJmpBuf *jmpbuf_node;
uint8 *mapped_mem_start_addr, *mapped_mem_end_addr;
uint8 *stack_min_addr;
uint32 page_size;
/* Check whether current thread is running aot function */
if (aot_exec_env
&& aot_exec_env->handle == os_self_thread()
&& (jmpbuf_node = aot_exec_env->jmpbuf_stack_top)) {
/* Get mapped mem info of current instance */
module_inst = (AOTModuleInstance *)aot_exec_env->module_inst;
mapped_mem_start_addr = (uint8*)module_inst->memory_data.ptr
- 2 * (uint64)BH_GB;
mapped_mem_end_addr = (uint8*)module_inst->memory_data.ptr
+ 6 * (uint64)BH_GB;
/* Get stack info of current thread */
page_size = os_getpagesize();
stack_min_addr = get_stack_min_addr(aot_exec_env, page_size);
if (mapped_mem_start_addr <= (uint8*)sig_addr
&& (uint8*)sig_addr < mapped_mem_end_addr) {
/* The address which causes segmentation fault is inside
aot instance's guard regions */
aot_set_exception_with_id(module_inst, EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS);
os_longjmp(jmpbuf_node->jmpbuf, 1);
}
else if (stack_min_addr - page_size <= (uint8*)sig_addr
&& (uint8*)sig_addr < stack_min_addr + page_size * 3) {
/* The address which causes segmentation fault is inside
native thread's guard page */
aot_set_exception_with_id(module_inst, EXCE_NATIVE_STACK_OVERFLOW);
os_longjmp(jmpbuf_node->jmpbuf, 1);
}
}
}
bool
aot_signal_init()
{
return os_signal_init(aot_signal_handler) == 0 ? true : false;
}
void
aot_signal_destroy()
{
os_signal_destroy();
}
#if defined(__GNUC__)
__attribute__((no_sanitize_address)) static uint32
#else
static uint32
#endif
touch_pages(uint8 *stack_min_addr, uint32 page_size)
{
uint8 sum = 0;
while (1) {
uint8 *touch_addr = os_alloca(page_size / 2);
sum += *touch_addr;
if (touch_addr < stack_min_addr + page_size) {
break;
}
}
return sum;
}
static bool
invoke_native_with_hw_bound_check(WASMExecEnv *exec_env, void *func_ptr,
const WASMType *func_type, const char *signature,
void *attachment,
uint32 *argv, uint32 argc, uint32 *argv_ret)
{
AOTModuleInstance *module_inst = (AOTModuleInstance*)exec_env->module_inst;
WASMExecEnv **p_aot_exec_env = &aot_exec_env;
WASMJmpBuf *jmpbuf_node, *jmpbuf_node_pop;
uint32 page_size = os_getpagesize();
uint8 *stack_min_addr = get_stack_min_addr(exec_env, page_size);
bool ret;
if (aot_exec_env
&& (aot_exec_env != exec_env)) {
aot_set_exception(module_inst, "Invalid exec env.");
return false;
}
if (!exec_env->jmpbuf_stack_top) {
/* Touch each stack page to ensure that it has been mapped: the OS may
lazily grow the stack mapping as a guard page is hit. */
touch_pages(stack_min_addr, page_size);
/* First time to call aot function, protect one page */
if (os_mprotect(stack_min_addr, page_size * 3, MMAP_PROT_NONE) != 0) {
aot_set_exception(module_inst, "Set protected page failed.");
return false;
}
}
if (!(jmpbuf_node = wasm_runtime_malloc(sizeof(WASMJmpBuf)))) {
aot_set_exception_with_id(module_inst, EXCE_OUT_OF_MEMORY);
return false;
}
wasm_exec_env_push_jmpbuf(exec_env, jmpbuf_node);
aot_exec_env = exec_env;
if (os_setjmp(jmpbuf_node->jmpbuf) == 0) {
ret = wasm_runtime_invoke_native(exec_env, func_ptr, func_type,
signature, attachment,
argv, argc, argv);
}
else {
/* Exception has been set in signal handler before calling longjmp */
ret = false;
}
jmpbuf_node_pop = wasm_exec_env_pop_jmpbuf(exec_env);
bh_assert(jmpbuf_node == jmpbuf_node_pop);
wasm_runtime_free(jmpbuf_node);
if (!exec_env->jmpbuf_stack_top) {
/* Unprotect the guard page when the nested call depth is zero */
os_mprotect(stack_min_addr, page_size * 3,
MMAP_PROT_READ | MMAP_PROT_WRITE);
*p_aot_exec_env = NULL;
}
os_sigreturn();
os_signal_unmask();
(void)jmpbuf_node_pop;
return ret;
}
#define invoke_native_internal invoke_native_with_hw_bound_check
#else /* else of OS_ENABLE_HW_BOUND_CHECK */
#define invoke_native_internal wasm_runtime_invoke_native
#endif /* end of OS_ENABLE_HW_BOUND_CHECK */
bool
aot_call_function(WASMExecEnv *exec_env,
AOTFunctionInstance *function,
@ -515,8 +707,8 @@ aot_call_function(WASMExecEnv *exec_env,
{
AOTModuleInstance *module_inst = (AOTModuleInstance*)exec_env->module_inst;
AOTFuncType *func_type = function->func_type;
bool ret = wasm_runtime_invoke_native(exec_env, function->func_ptr,
func_type, NULL, NULL, argv, argc, argv);
bool ret = invoke_native_internal(exec_env, function->func_ptr,
func_type, NULL, NULL, argv, argc, argv);
return ret && !aot_get_exception(module_inst) ? true : false;
}
@ -762,6 +954,7 @@ aot_get_native_addr_range(AOTModuleInstance *module_inst,
return false;
}
#ifndef OS_ENABLE_HW_BOUND_CHECK
bool
aot_enlarge_memory(AOTModuleInstance *module_inst, uint32 inc_page_count)
{
@ -830,17 +1023,58 @@ aot_enlarge_memory(AOTModuleInstance *module_inst, uint32 inc_page_count)
module_inst->mem_cur_page_count = total_page_count;
module_inst->memory_data_size = (uint32)memory_data_size;
module_inst->total_mem_size = (uint32)(heap_size + memory_data_size);
module_inst->memory_data.ptr = (uint8*)heap_data + heap_size;
module_inst->memory_data_end.ptr = (uint8*)module_inst->memory_data.ptr
+ (uint32)memory_data_size;
module_inst->mem_bound_check_1byte = module_inst->total_mem_size - 1;
module_inst->mem_bound_check_2bytes = module_inst->total_mem_size - 2;
module_inst->mem_bound_check_4bytes = module_inst->total_mem_size - 4;
module_inst->mem_bound_check_8bytes = module_inst->total_mem_size - 8;
module_inst->mem_bound_check_1byte = module_inst->memory_data_size - 1;
module_inst->mem_bound_check_2bytes = module_inst->memory_data_size - 2;
module_inst->mem_bound_check_4bytes = module_inst->memory_data_size - 4;
module_inst->mem_bound_check_8bytes = module_inst->memory_data_size - 8;
return true;
}
#else
bool
aot_enlarge_memory(AOTModuleInstance *module_inst, uint32 inc_page_count)
{
uint32 num_bytes_per_page =
((AOTModule*)module_inst->aot_module.ptr)->num_bytes_per_page;
uint32 cur_page_count = module_inst->mem_cur_page_count;
uint32 max_page_count = module_inst->mem_max_page_count;
uint32 total_page_count = cur_page_count + inc_page_count;
uint64 memory_data_size = (uint64)num_bytes_per_page * total_page_count;
if (inc_page_count <= 0)
/* No need to enlarge memory */
return true;
if (total_page_count < cur_page_count /* integer overflow */
|| total_page_count > max_page_count) {
aot_set_exception(module_inst, "fail to enlarge memory.");
return false;
}
if (os_mprotect(module_inst->memory_data.ptr, memory_data_size,
MMAP_PROT_READ | MMAP_PROT_WRITE) != 0) {
aot_set_exception(module_inst, "fail to enlarge memory.");
return false;
}
memset(module_inst->memory_data_end.ptr, 0,
num_bytes_per_page * inc_page_count);
module_inst->mem_cur_page_count = total_page_count;
module_inst->memory_data_size = (uint32)memory_data_size;
module_inst->memory_data_end.ptr = (uint8*)module_inst->memory_data.ptr
+ (uint32)memory_data_size;
module_inst->mem_bound_check_1byte = module_inst->memory_data_size - 1;
module_inst->mem_bound_check_2bytes = module_inst->memory_data_size - 2;
module_inst->mem_bound_check_4bytes = module_inst->memory_data_size - 4;
module_inst->mem_bound_check_8bytes = module_inst->memory_data_size - 8;
return true;
}
#endif
bool
aot_is_wasm_type_equal(AOTModuleInstance *module_inst,
@ -979,9 +1213,9 @@ aot_call_indirect(WASMExecEnv *exec_env,
}
}
return wasm_runtime_invoke_native(exec_env, func_ptr,
func_type, signature, attachment,
argv, argc, argv);
return invoke_native_internal(exec_env, func_ptr,
func_type, signature, attachment,
argv, argc, argv);
}
#if WASM_ENABLE_BULK_MEMORY != 0

View File

@ -198,14 +198,12 @@ typedef struct AOTModuleInstance {
/* WASI context */
AOTPointer wasi_ctx;
/* total memory size: heap and linear memory */
uint32 total_mem_size;
/* boundary check constants for aot code */
uint32 mem_bound_check_1byte;
uint32 mem_bound_check_2bytes;
uint32 mem_bound_check_4bytes;
uint32 mem_bound_check_8bytes;
int64 mem_bound_check_heap_base;
int64 mem_bound_check_1byte;
int64 mem_bound_check_2bytes;
int64 mem_bound_check_4bytes;
int64 mem_bound_check_8bytes;
/* others */
int32 temp_ret;
@ -469,6 +467,14 @@ bool
aot_data_drop(AOTModuleInstance *module_inst, uint32 seg_index);
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
bool
aot_signal_init();
void
aot_signal_destroy();
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif

View File

@ -61,6 +61,15 @@ fail1:
void
wasm_exec_env_destroy_internal(WASMExecEnv *exec_env)
{
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMJmpBuf *jmpbuf = exec_env->jmpbuf_stack_top;
WASMJmpBuf *jmpbuf_prev;
while (jmpbuf) {
jmpbuf_prev = jmpbuf->prev;
wasm_runtime_free(jmpbuf);
jmpbuf = jmpbuf_prev;
}
#endif
#if WASM_ENABLE_THREAD_MGR != 0
os_mutex_destroy(&exec_env->wait_lock);
os_cond_destroy(&exec_env->wait_cond);
@ -121,7 +130,6 @@ wasm_exec_env_set_thread_info(WASMExecEnv *exec_env)
exec_env->handle = os_self_thread();
exec_env->native_stack_boundary = os_thread_get_stack_boundary()
+ RESERVED_BYTES_TO_NATIVE_STACK_BOUNDARY;
}
#if WASM_ENABLE_THREAD_MGR != 0
@ -136,4 +144,27 @@ wasm_exec_env_set_thread_arg(WASMExecEnv *exec_env, void *thread_arg)
{
exec_env->thread_arg = thread_arg;
}
#endif
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
void
wasm_exec_env_push_jmpbuf(WASMExecEnv *exec_env, WASMJmpBuf *jmpbuf)
{
jmpbuf->prev = exec_env->jmpbuf_stack_top;
exec_env->jmpbuf_stack_top = jmpbuf;
}
WASMJmpBuf *
wasm_exec_env_pop_jmpbuf(WASMExecEnv *exec_env)
{
WASMJmpBuf *stack_top = exec_env->jmpbuf_stack_top;
if (stack_top) {
exec_env->jmpbuf_stack_top = stack_top->prev;
return stack_top;
}
return NULL;
}
#endif

View File

@ -22,6 +22,13 @@ struct WASMInterpFrame;
typedef struct WASMCluster WASMCluster;
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
typedef struct WASMJmpBuf {
struct WASMJmpBuf *prev;
korp_jmpbuf jmpbuf;
} WASMJmpBuf;
#endif
/* Execution environment */
typedef struct WASMExecEnv {
/* Next thread's exec env of a WASM module instance. */
@ -82,6 +89,10 @@ typedef struct WASMExecEnv {
BlockAddr block_addr_cache[BLOCK_ADDR_CACHE_SIZE][BLOCK_ADDR_CONFLICT_SIZE];
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMJmpBuf *jmpbuf_stack_top;
#endif
/* The WASM stack size */
uint32 wasm_stack_size;
@ -207,6 +218,14 @@ void
wasm_exec_env_set_thread_arg(WASMExecEnv *exec_env, void *thread_arg);
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
void
wasm_exec_env_push_jmpbuf(WASMExecEnv *exec_env, WASMJmpBuf *jmpbuf);
WASMJmpBuf *
wasm_exec_env_pop_jmpbuf(WASMExecEnv *exec_env);
#endif
#ifdef __cplusplus
}
#endif

View File

@ -120,9 +120,24 @@ wasm_runtime_env_init()
goto fail5;
}
#endif
#if WASM_ENABLE_AOT != 0
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (!aot_signal_init()) {
goto fail6;
}
#endif
#endif
return true;
#if WASM_ENABLE_AOT != 0
#ifdef OS_ENABLE_HW_BOUND_CHECK
fail6:
#endif
#endif
#if WASM_ENABLE_THREAD_MGR != 0
thread_manager_destroy();
fail5:
#endif
#if WASM_ENABLE_SHARED_MEMORY
@ -170,6 +185,12 @@ wasm_runtime_init()
void
wasm_runtime_destroy()
{
#if WASM_ENABLE_AOT != 0
#ifdef OS_ENABLE_HW_BOUND_CHECK
aot_signal_destroy();
#endif
#endif
/* runtime env destroy */
#if WASM_ENABLE_MULTI_MODULE
wasm_runtime_destroy_loading_module_list();

View File

@ -398,7 +398,8 @@ aot_compile_op_call(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
aot_func = func_ctxes[func_idx - import_func_count]->aot_func;
callee_cell_num = aot_func->param_cell_num + aot_func->local_cell_num + 1;
if (!check_stack_boundary(comp_ctx, func_ctx, callee_cell_num))
if (comp_ctx->enable_bound_check
&& !check_stack_boundary(comp_ctx, func_ctx, callee_cell_num))
goto fail;
/* Call the function */

View File

@ -75,18 +75,13 @@ check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 offset, uint32 bytes)
{
LLVMValueRef offset_const = I32_CONST(offset);
LLVMValueRef bytes_const = I32_CONST(bytes);
LLVMValueRef bytes64_const = I64_CONST(bytes);
LLVMValueRef heap_base_offset = func_ctx->heap_base_offset;
LLVMValueRef addr, maddr, offset1, offset2, cmp;
LLVMValueRef mem_base_addr, mem_check_bound, total_mem_size;
LLVMValueRef addr, maddr, offset1, cmp, cmp1, cmp2;
LLVMValueRef mem_base_addr, mem_check_bound;
LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
LLVMBasicBlockRef check_succ, check_mem_space;
LLVMBasicBlockRef check_succ;
AOTValue *aot_value;
CHECK_LLVM_CONST(offset_const);
CHECK_LLVM_CONST(bytes_const);
CHECK_LLVM_CONST(bytes64_const);
/* Get memory base address and memory data size */
if (func_ctx->mem_space_unchanged) {
@ -104,21 +99,20 @@ check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
aot_value = func_ctx->block_stack.block_list_end->value_stack.value_list_end;
POP_I32(addr);
/* offset1 = offset + addr; */
BUILD_OP(Add, offset_const, addr, offset1, "offset1");
/* return addres directly if constant offset and inside memory space */
if (LLVMIsConstant(offset1)) {
uint32 mem_offset = (uint32)LLVMConstIntGetZExtValue(offset1);
if (LLVMIsConstant(addr)) {
int64 mem_offset = (int64)LLVMConstIntGetSExtValue(addr) + (int64)offset;
uint32 num_bytes_per_page = comp_ctx->comp_data->num_bytes_per_page;
uint32 init_page_count = comp_ctx->comp_data->mem_init_page_count;
uint32 mem_data_size = num_bytes_per_page * init_page_count;
int64 mem_data_size = num_bytes_per_page * init_page_count;
if (mem_data_size > 0
&& mem_offset >= 0
&& mem_offset <= mem_data_size - bytes) {
/* inside memory space */
/* maddr = mem_base_addr + moffset */
if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder,
mem_base_addr,
offset1 = I32_CONST((uint32)mem_offset);
CHECK_LLVM_CONST(offset_const);
if (!(maddr = LLVMBuildInBoundsGEP(comp_ctx->builder, mem_base_addr,
&offset1, 1, "maddr"))) {
aot_set_last_error("llvm build add failed.");
goto fail;
@ -127,51 +121,35 @@ check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
}
}
if (comp_ctx->comp_data->mem_init_page_count == 0) {
/* Get total memory size */
if (func_ctx->mem_space_unchanged) {
total_mem_size = func_ctx->total_mem_size;
}
else {
if (!(total_mem_size = LLVMBuildLoad(comp_ctx->builder,
func_ctx->total_mem_size,
"total_mem_size"))) {
aot_set_last_error("llvm build load failed.");
if (!(offset_const = LLVMBuildZExt(comp_ctx->builder, offset_const,
I64_TYPE, "offset_i64"))
|| !(addr = LLVMBuildSExt(comp_ctx->builder, addr,
I64_TYPE, "addr_i64"))) {
aot_set_last_error("llvm build extend i32 to i64 failed.");
goto fail;
}
}
ADD_BASIC_BLOCK(check_mem_space, "check_mem_space");
LLVMMoveBasicBlockAfter(check_mem_space, block_curr);
/* if total_mem_size is zero, boundary check fail */
BUILD_ICMP(LLVMIntEQ, total_mem_size, I32_ZERO, cmp,
"cmp_total_mem_size");
if (!aot_emit_exception(comp_ctx, func_ctx,
EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
true, cmp, check_mem_space)) {
goto fail;
}
SET_BUILD_POS(check_mem_space);
}
if (!(aot_value->is_local
&& aot_checked_addr_list_find(func_ctx, aot_value->local_idx,
offset, bytes))) {
/* offset2 = offset1 - heap_base_offset; */
BUILD_OP(Sub, offset1, heap_base_offset, offset2, "offset2");
/* offset1 = offset + addr; */
BUILD_OP(Add, offset_const, addr, offset1, "offset1");
if (comp_ctx->enable_bound_check
&& !(aot_value->is_local
&& aot_checked_addr_list_find(func_ctx, aot_value->local_idx,
offset, bytes))) {
if (!(mem_check_bound =
get_memory_check_bound(comp_ctx, func_ctx, bytes))) {
goto fail;
}
BUILD_ICMP(LLVMIntSGT, func_ctx->mem_bound_check_heap_base, offset1,
cmp1, "cmp1");
BUILD_ICMP(LLVMIntSGT, offset1, mem_check_bound, cmp2, "cmp2");
BUILD_OP(Or, cmp1, cmp2, cmp, "cmp");
/* Add basic blocks */
ADD_BASIC_BLOCK(check_succ, "check_succ");
LLVMMoveBasicBlockAfter(check_succ, block_curr);
/* offset2 > bound ? */
BUILD_ICMP(LLVMIntUGT, offset2, mem_check_bound, cmp, "cmp");
if (!aot_emit_exception(comp_ctx, func_ctx,
EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
true, cmp, check_succ)) {

View File

@ -182,29 +182,6 @@ create_memory_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
}
}
/* Load total memory size */
offset = I32_CONST(offsetof(AOTModuleInstance, total_mem_size));
if (!(func_ctx->total_mem_size =
LLVMBuildInBoundsGEP(comp_ctx->builder, func_ctx->aot_inst,
&offset, 1, "bound_check_1byte_offset"))) {
aot_set_last_error("llvm build in bounds gep failed");
return false;
}
if (!(func_ctx->total_mem_size =
LLVMBuildBitCast(comp_ctx->builder, func_ctx->total_mem_size,
INT32_PTR_TYPE, "bound_check_1byte_ptr"))) {
aot_set_last_error("llvm build bit cast failed");
return false;
}
if (mem_space_unchanged) {
if (!(func_ctx->total_mem_size =
LLVMBuildLoad(comp_ctx->builder, func_ctx->total_mem_size,
"bound_check_1byte"))) {
aot_set_last_error("llvm build load failed");
return false;
}
}
/* Load memory bound check constants */
offset = I32_CONST(offsetof(AOTModuleInstance, mem_bound_check_1byte));
if (!(func_ctx->mem_bound_check_1byte =
@ -215,7 +192,7 @@ create_memory_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
}
if (!(func_ctx->mem_bound_check_1byte =
LLVMBuildBitCast(comp_ctx->builder, func_ctx->mem_bound_check_1byte,
INT32_PTR_TYPE, "bound_check_1byte_ptr"))) {
INT64_PTR_TYPE, "bound_check_1byte_ptr"))) {
aot_set_last_error("llvm build bit cast failed");
return false;
}
@ -237,7 +214,7 @@ create_memory_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
}
if (!(func_ctx->mem_bound_check_2bytes =
LLVMBuildBitCast(comp_ctx->builder, func_ctx->mem_bound_check_2bytes,
INT32_PTR_TYPE, "bound_check_2bytes_ptr"))) {
INT64_PTR_TYPE, "bound_check_2bytes_ptr"))) {
aot_set_last_error("llvm build bit cast failed");
return false;
}
@ -259,7 +236,7 @@ create_memory_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
}
if (!(func_ctx->mem_bound_check_4bytes =
LLVMBuildBitCast(comp_ctx->builder, func_ctx->mem_bound_check_4bytes,
INT32_PTR_TYPE, "bound_check_4bytes_ptr"))) {
INT64_PTR_TYPE, "bound_check_4bytes_ptr"))) {
aot_set_last_error("llvm build bit cast failed");
return false;
}
@ -281,7 +258,7 @@ create_memory_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
}
if (!(func_ctx->mem_bound_check_8bytes =
LLVMBuildBitCast(comp_ctx->builder, func_ctx->mem_bound_check_8bytes,
INT32_PTR_TYPE, "bound_check_8bytes_ptr"))) {
INT64_PTR_TYPE, "bound_check_8bytes_ptr"))) {
aot_set_last_error("llvm build bit cast failed");
return false;
}
@ -294,23 +271,23 @@ create_memory_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
}
}
/* Load heap base offset */
offset = I32_CONST(offsetof(AOTModuleInstance, heap_base_offset));
if (!(func_ctx->heap_base_offset =
/* Load bound_check_heap_base */
offset = I32_CONST(offsetof(AOTModuleInstance, mem_bound_check_heap_base));
if (!(func_ctx->mem_bound_check_heap_base =
LLVMBuildInBoundsGEP(comp_ctx->builder, func_ctx->aot_inst,
&offset, 1, "heap_base_offset_offset"))) {
&offset, 1, "bound_check_heap_base_offset"))) {
aot_set_last_error("llvm build in bounds gep failed");
return false;
}
if (!(func_ctx->heap_base_offset =
LLVMBuildBitCast(comp_ctx->builder, func_ctx->heap_base_offset,
INT32_PTR_TYPE, "heap_base_offset_tmp"))) {
if (!(func_ctx->mem_bound_check_heap_base =
LLVMBuildBitCast(comp_ctx->builder, func_ctx->mem_bound_check_heap_base,
INT64_PTR_TYPE, "bound_check_heap_base_tmp"))) {
aot_set_last_error("llvm build bit cast failed");
return false;
}
if (!(func_ctx->heap_base_offset =
LLVMBuildLoad(comp_ctx->builder, func_ctx->heap_base_offset,
"heap_base_offset"))) {
if (!(func_ctx->mem_bound_check_heap_base =
LLVMBuildLoad(comp_ctx->builder, func_ctx->mem_bound_check_heap_base,
"bound_check_heap_base"))) {
aot_set_last_error("llvm build load failed");
return false;
}
@ -936,6 +913,11 @@ aot_create_comp_context(AOTCompData *comp_data,
comp_ctx->is_jit_mode = true;
comp_ctx->target_machine =
LLVMGetExecutionEngineTargetMachine(comp_ctx->exec_engine);
#ifndef OS_ENABLE_HW_BOUND_CHECK
comp_ctx->enable_bound_check = true;
#else
comp_ctx->enable_bound_check = false;
#endif
}
else {
/* Create LLVM target machine */
@ -1049,6 +1031,21 @@ aot_create_comp_context(AOTCompData *comp_data,
get_target_arch_from_triple(triple_norm, comp_ctx->target_arch,
sizeof(comp_ctx->target_arch));
if (option->bounds_checks == 1 || option->bounds_checks == 0) {
/* Set by user */
comp_ctx->enable_bound_check =
(option->bounds_checks == 1) ? true : false;
}
else {
/* Unset by user, use default value */
if (strstr(comp_ctx->target_arch, "64") && !option->is_sgx_platform) {
comp_ctx->enable_bound_check = false;
}
else {
comp_ctx->enable_bound_check = true;
}
}
os_printf("Create AoT compiler with:\n");
os_printf(" target: %s\n", comp_ctx->target_arch);
os_printf(" target cpu: %s\n", cpu);
@ -1114,14 +1111,11 @@ aot_create_comp_context(AOTCompData *comp_data,
goto fail;
}
LLVMAddBasicAliasAnalysisPass(comp_ctx->pass_mgr);
LLVMAddPromoteMemoryToRegisterPass(comp_ctx->pass_mgr);
LLVMAddInstructionCombiningPass(comp_ctx->pass_mgr);
LLVMAddCFGSimplificationPass(comp_ctx->pass_mgr);
LLVMAddJumpThreadingPass(comp_ctx->pass_mgr);
LLVMAddConstantPropagationPass(comp_ctx->pass_mgr);
LLVMAddReassociatePass(comp_ctx->pass_mgr);
LLVMAddGVNPass(comp_ctx->pass_mgr);
LLVMAddCFGSimplificationPass(comp_ctx->pass_mgr);
/* Create metadata for llvm float experimental constrained intrinsics */
if (!(comp_ctx->fp_rounding_mode =

View File

@ -105,9 +105,8 @@ typedef struct AOTFuncContext {
LLVMValueRef native_stack_bound;
LLVMValueRef last_alloca;
LLVMValueRef heap_base_offset;
LLVMValueRef mem_base_addr;
LLVMValueRef total_mem_size;
LLVMValueRef mem_bound_check_heap_base;
LLVMValueRef mem_bound_check_1byte;
LLVMValueRef mem_bound_check_2bytes;
LLVMValueRef mem_bound_check_4bytes;
@ -188,6 +187,9 @@ typedef struct AOTCompContext {
/* Bulk memory feature */
bool enable_bulk_memory;
/* Bounday Check */
bool enable_bound_check;
/* Whether optimize the JITed code */
bool optimize;
@ -227,9 +229,11 @@ typedef struct AOTCompOption{
char *target_cpu;
char *cpu_features;
bool enable_bulk_memory;
bool is_sgx_platform;
uint32 opt_level;
uint32 size_level;
uint32 output_format;
uint32 bounds_checks;
} AOTCompOption, *aot_comp_option_t;
AOTCompContext *

View File

@ -40,9 +40,11 @@ typedef struct AOTCompOption{
char *target_cpu;
char *cpu_features;
bool enable_bulk_memory;
bool is_sgx_platform;
uint32_t opt_level;
uint32_t size_level;
uint32_t output_format;
uint32_t bounds_checks;
} AOTCompOption, *aot_comp_option_t;
aot_comp_context_t

View File

@ -224,11 +224,9 @@ LOAD_I16(void *addr)
#endif /* WASM_CPU_SUPPORTS_UNALIGNED_64BIT_ACCESS != 0 */
#define CHECK_MEMORY_OVERFLOW(bytes) do { \
int32 offset1 = (int32)(offset + addr); \
uint64 offset2 = (uint64)(uint32)(offset1 - heap_base_offset); \
/* if (flags != 2) \
LOG_VERBOSE("unaligned load/store, flag: %d.\n", flags); */ \
if (offset2 + LOAD_SIZE[opcode - WASM_OP_I32_LOAD] <= total_mem_size) \
int64 offset1 = (int64)(uint32)offset + (int64)(int32)addr; \
if (heap_base_offset <= offset1 \
&& offset1 <= (int64)linear_mem_size - bytes) \
/* If offset1 is in valid range, maddr must also be in valid range, \
no need to check it again. */ \
maddr = memory->memory_data + offset1; \
@ -973,12 +971,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
WASMMemoryInstance *memory = module->default_memory;
int32 heap_base_offset = memory ? memory->heap_base_offset : 0;
uint32 num_bytes_per_page = memory ? memory->num_bytes_per_page : 0;
uint32 total_mem_size = memory ? num_bytes_per_page * memory->cur_page_count
- heap_base_offset : 0;
uint8 *global_data = module->global_data;
#if WASM_ENABLE_BULK_MEMORY != 0
uint32 linear_mem_size = memory ? num_bytes_per_page * memory->cur_page_count : 0;
#endif
WASMTableInstance *table = module->default_table;
WASMGlobalInstance *globals = module->globals;
uint8 opcode_IMPDEP = WASM_OP_IMPDEP;
@ -1015,12 +1009,6 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
#undef HANDLE_OPCODE
#endif
/* Size of memory load.
This starts with the first memory load operator at opcode 0x28 */
uint32 LOAD_SIZE[] = {
4, 8, 4, 8, 1, 1, 2, 2, 1, 1, 2, 2, 4, 4, /* loads */
4, 8, 4, 8, 1, 2, 1, 2, 4 }; /* stores */
#if WASM_ENABLE_LABELS_AS_VALUES == 0
while (frame_ip < frame_ip_end) {
opcode = *frame_ip++;
@ -1445,6 +1433,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if ((global_idx == (uint32)aux_stack_top_global_idx)
&& (*(uint32*)(frame_sp - 1) < exec_env->aux_stack_boundary))
goto out_of_bounds;
*(int32*)global_addr = POP_I32();
break;
case VALUE_TYPE_F32:
*(int32*)global_addr = POP_I32();
break;
@ -1462,180 +1452,261 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
/* memory load instructions */
HANDLE_OP (WASM_OP_I32_LOAD):
HANDLE_OP (WASM_OP_I64_LOAD):
HANDLE_OP (WASM_OP_F32_LOAD):
HANDLE_OP (WASM_OP_F64_LOAD):
HANDLE_OP (WASM_OP_I32_LOAD8_S):
HANDLE_OP (WASM_OP_I32_LOAD8_U):
HANDLE_OP (WASM_OP_I32_LOAD16_S):
HANDLE_OP (WASM_OP_I32_LOAD16_U):
HANDLE_OP (WASM_OP_I64_LOAD8_S):
HANDLE_OP (WASM_OP_I64_LOAD8_U):
HANDLE_OP (WASM_OP_I64_LOAD16_S):
HANDLE_OP (WASM_OP_I64_LOAD16_U):
HANDLE_OP (WASM_OP_I64_LOAD32_S):
HANDLE_OP (WASM_OP_I64_LOAD32_U):
{
uint32 offset, flags, addr;
GET_OPCODE();
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = (uint32)POP_I32();
CHECK_MEMORY_OVERFLOW();
#if WASM_ENABLE_LABELS_AS_VALUES != 0
static const void *handle_load_table[] = {
&&HANDLE_LOAD_WASM_OP_I32_LOAD,
&&HANDLE_LOAD_WASM_OP_I64_LOAD,
&&HANDLE_LOAD_WASM_OP_F32_LOAD,
&&HANDLE_LOAD_WASM_OP_F64_LOAD,
&&HANDLE_LOAD_WASM_OP_I32_LOAD8_S,
&&HANDLE_LOAD_WASM_OP_I32_LOAD8_U,
&&HANDLE_LOAD_WASM_OP_I32_LOAD16_S,
&&HANDLE_LOAD_WASM_OP_I32_LOAD16_U,
&&HANDLE_LOAD_WASM_OP_I64_LOAD8_S,
&&HANDLE_LOAD_WASM_OP_I64_LOAD8_U,
&&HANDLE_LOAD_WASM_OP_I64_LOAD16_S,
&&HANDLE_LOAD_WASM_OP_I64_LOAD16_U,
&&HANDLE_LOAD_WASM_OP_I64_LOAD32_S,
&&HANDLE_LOAD_WASM_OP_I64_LOAD32_U
};
#define HANDLE_OP_LOAD(opcode) HANDLE_LOAD_##opcode
goto *handle_load_table[opcode - WASM_OP_I32_LOAD];
#else
#define HANDLE_OP_LOAD(opcode) case opcode
switch (opcode)
#endif
{
HANDLE_OP_LOAD(WASM_OP_I32_LOAD):
PUSH_I32(LOAD_I32(maddr));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I64_LOAD):
PUSH_I64(LOAD_I64(maddr));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_F32_LOAD):
PUSH_I32(LOAD_I32(maddr));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_F64_LOAD):
PUSH_F64(LOAD_F64(maddr));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I32_LOAD8_S):
PUSH_I32(sign_ext_8_32(*(int8*)maddr));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I32_LOAD8_U):
PUSH_I32((uint32)(*(uint8*)maddr));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I32_LOAD16_S):
PUSH_I32(sign_ext_16_32(LOAD_I16(maddr)));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I32_LOAD16_U):
PUSH_I32((uint32)(LOAD_U16(maddr)));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I64_LOAD8_S):
PUSH_I64(sign_ext_8_64(*(int8*)maddr));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I64_LOAD8_U):
PUSH_I64((uint64)(*(uint8*)maddr));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I64_LOAD16_S):
PUSH_I64(sign_ext_16_64(LOAD_I16(maddr)));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I64_LOAD16_U):
PUSH_I64((uint64)(LOAD_U16(maddr)));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I64_LOAD32_S):
PUSH_I64(sign_ext_32_64(LOAD_I32(maddr)));
HANDLE_OP_END();
HANDLE_OP_LOAD(WASM_OP_I64_LOAD32_U):
PUSH_I64((uint64)(LOAD_U32(maddr)));
HANDLE_OP_END();
}
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(4);
PUSH_I32(LOAD_I32(maddr));
(void)flags;
HANDLE_OP_END ();
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I64_LOAD):
HANDLE_OP (WASM_OP_F64_LOAD):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(8);
PUSH_I64(LOAD_I64(maddr));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I32_LOAD8_S):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(1);
PUSH_I32(sign_ext_8_32(*(int8*)maddr));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I32_LOAD8_U):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(1);
PUSH_I32((uint32)(*(uint8*)maddr));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I32_LOAD16_S):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(2);
PUSH_I32(sign_ext_16_32(LOAD_I16(maddr)));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I32_LOAD16_U):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(2);
PUSH_I32((uint32)(LOAD_U16(maddr)));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I64_LOAD8_S):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(1);
PUSH_I64(sign_ext_8_64(*(int8*)maddr));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I64_LOAD8_U):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(1);
PUSH_I64((uint64)(*(uint8*)maddr));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I64_LOAD16_S):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(2);
PUSH_I64(sign_ext_16_64(LOAD_I16(maddr)));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I64_LOAD16_U):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(2);
PUSH_I64((uint64)(LOAD_U16(maddr)));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I64_LOAD32_S):
{
uint32 offset, flags;
int32 addr;
opcode = *(frame_ip - 1);
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(4);
PUSH_I64(sign_ext_32_64(LOAD_I32(maddr)));
(void)flags;
HANDLE_OP_END();
}
HANDLE_OP (WASM_OP_I64_LOAD32_U):
{
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(4);
PUSH_I64((uint64)(LOAD_U32(maddr)));
(void)flags;
HANDLE_OP_END();
}
/* memory store instructions */
HANDLE_OP (WASM_OP_I32_STORE):
HANDLE_OP (WASM_OP_F32_STORE):
{
uint32 offset, flags, addr;
GET_OPCODE();
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
frame_sp--;
addr = (uint32)POP_I32();
CHECK_MEMORY_OVERFLOW();
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(4);
STORE_U32(maddr, frame_sp[1]);
(void)flags;
HANDLE_OP_END ();
}
HANDLE_OP (WASM_OP_I64_STORE):
HANDLE_OP (WASM_OP_F64_STORE):
{
uint32 offset, flags, addr;
GET_OPCODE();
uint32 offset, flags;
int32 addr;
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
frame_sp -= 2;
addr = (uint32)POP_I32();
CHECK_MEMORY_OVERFLOW();
addr = POP_I32();
CHECK_MEMORY_OVERFLOW(8);
STORE_U32(maddr, frame_sp[1]);
STORE_U32(maddr + 4, frame_sp[2]);
(void)flags;
HANDLE_OP_END ();
}
HANDLE_OP (WASM_OP_I32_STORE):
HANDLE_OP (WASM_OP_I32_STORE8):
HANDLE_OP (WASM_OP_I32_STORE16):
{
uint32 offset, flags, addr;
uint32 offset, flags;
int32 addr;
uint32 sval;
GET_OPCODE();
opcode = *(frame_ip - 1);
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
sval = (uint32)POP_I32();
addr = (uint32)POP_I32();
CHECK_MEMORY_OVERFLOW();
switch (opcode) {
case WASM_OP_I32_STORE:
STORE_U32(maddr, sval);
break;
case WASM_OP_I32_STORE8:
addr = POP_I32();
if (opcode == WASM_OP_I32_STORE8) {
CHECK_MEMORY_OVERFLOW(1);
*(uint8*)maddr = (uint8)sval;
break;
case WASM_OP_I32_STORE16:
STORE_U16(maddr, (uint16)sval);
break;
}
else {
CHECK_MEMORY_OVERFLOW(2);
STORE_U16(maddr, (uint16)sval);
}
(void)flags;
HANDLE_OP_END ();
}
HANDLE_OP (WASM_OP_I64_STORE):
HANDLE_OP (WASM_OP_I64_STORE8):
HANDLE_OP (WASM_OP_I64_STORE16):
HANDLE_OP (WASM_OP_I64_STORE32):
{
uint32 offset, flags, addr;
uint32 offset, flags;
int32 addr;
uint64 sval;
GET_OPCODE();
opcode = *(frame_ip - 1);
read_leb_uint32(frame_ip, frame_ip_end, flags);
read_leb_uint32(frame_ip, frame_ip_end, offset);
sval = (uint64)POP_I64();
addr = (uint32)POP_I32();
CHECK_MEMORY_OVERFLOW();
switch (opcode) {
case WASM_OP_I64_STORE:
STORE_I64(maddr, sval);
break;
case WASM_OP_I64_STORE8:
addr = POP_I32();
if (opcode == WASM_OP_I64_STORE8) {
CHECK_MEMORY_OVERFLOW(1);
*(uint8*)maddr = (uint8)sval;
break;
case WASM_OP_I64_STORE16:
}
else if(opcode == WASM_OP_I64_STORE16) {
CHECK_MEMORY_OVERFLOW(2);
STORE_U16(maddr, (uint16)sval);
break;
case WASM_OP_I64_STORE32:
}
else {
CHECK_MEMORY_OVERFLOW(4);
STORE_U32(maddr, (uint32)sval);
break;
}
(void)flags;
HANDLE_OP_END ();
@ -1671,11 +1742,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
PUSH_I32(prev_page_count);
/* update the memory instance ptr */
memory = module->default_memory;
total_mem_size = num_bytes_per_page * memory->cur_page_count
- heap_base_offset;
#if WASM_ENABLE_BULK_MEMORY != 0
linear_mem_size = num_bytes_per_page * memory->cur_page_count;
#endif
}
(void)reserved;

View File

@ -226,11 +226,9 @@ LOAD_I16(void *addr)
#endif /* WASM_CPU_SUPPORTS_UNALIGNED_64BIT_ACCESS != 0 */
#define CHECK_MEMORY_OVERFLOW(bytes) do { \
int32 offset1 = (int32)(offset + addr); \
uint64 offset2 = (uint64)(uint32)(offset1 - heap_base_offset); \
/* if (flags != 2) \
LOG_VERBOSE("unaligned load/store, flag: %d.\n", flags); */ \
if (offset2 + bytes <= total_mem_size) \
int64 offset1 = (int64)(uint32)offset + (int64)(int32)addr; \
if (heap_base_offset <= offset1 \
&& offset1 <= (int64)linear_mem_size - bytes) \
/* If offset1 is in valid range, maddr must also be in valid range,\
no need to check it again. */ \
maddr = memory->memory_data + offset1; \
@ -412,6 +410,8 @@ read_leb(const uint8 *buf, uint32 *p_offset, uint32 maxbits, bool sign)
p += _off; \
} while (0)
#define read_uint32(p) (p += sizeof(uint32), *(uint32 *)(p - sizeof(uint32)))
#define GET_LOCAL_INDEX_TYPE_AND_OFFSET() do { \
uint32 param_count = cur_func->param_count; \
read_leb_uint32(frame_ip, frame_ip_end, local_idx); \
@ -965,12 +965,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
WASMMemoryInstance *memory = module->default_memory;
int32 heap_base_offset = memory ? memory->heap_base_offset : 0;
uint32 num_bytes_per_page = memory ? memory->num_bytes_per_page : 0;
uint32 total_mem_size = memory ? num_bytes_per_page * memory->cur_page_count
- heap_base_offset : 0;
uint8 *global_data = module->global_data;
#if WASM_ENABLE_BULK_MEMORY != 0
uint32 linear_mem_size = memory ? num_bytes_per_page * memory->cur_page_count : 0;
#endif
WASMTableInstance *table = module->default_table;
WASMGlobalInstance *globals = module->globals;
uint8 opcode_IMPDEP = WASM_OP_IMPDEP;
@ -1067,9 +1063,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
#if WASM_ENABLE_THREAD_MGR != 0
CHECK_SUSPEND_FLAGS();
#endif
count = GET_OPERAND(uint32, 0);
didx = GET_OPERAND(uint32, 2);
frame_ip += 4;
count = read_uint32(frame_ip);
didx = GET_OPERAND(uint32, 0);
frame_ip += 2;
if (!(didx >= 0 && (uint32)didx < count))
didx = count;
@ -1096,9 +1092,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
CHECK_SUSPEND_FLAGS();
#endif
tidx = GET_OPERAND(int32, 0);
val = GET_OPERAND(int32, 2);
frame_ip += 4;
tidx = read_uint32(frame_ip);
val = GET_OPERAND(int32, 0);
frame_ip += 2;
if (tidx >= module->module->type_count) {
wasm_set_exception(module, "type index is overflow");
@ -1228,7 +1224,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_GET_GLOBAL):
{
global_idx = frame_lp[GET_OFFSET()];
global_idx = read_uint32(frame_ip);
addr_ret = GET_OFFSET();
bh_assert(global_idx < module->global_count);
@ -1261,7 +1257,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_SET_GLOBAL):
{
global_idx = frame_lp[GET_OFFSET()];
global_idx = read_uint32(frame_ip);
addr1 = GET_OFFSET();
bh_assert(global_idx < module->global_count);
@ -1281,6 +1277,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
if ((global_idx == (uint32)aux_stack_top_global_idx)
&& (frame_lp[addr1] < exec_env->aux_stack_boundary))
goto out_of_bounds;
*(int32*)global_addr = frame_lp[addr1];
break;
case VALUE_TYPE_F32:
*(int32*)global_addr = frame_lp[addr1];
break;
@ -1300,9 +1298,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I32_LOAD):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(4);
frame_lp[addr_ret] = LOAD_I32(maddr);
@ -1312,9 +1310,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I64_LOAD):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(8);
PUT_I64_TO_ADDR(frame_lp + addr_ret, LOAD_I64(maddr));
@ -1324,9 +1322,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I32_LOAD8_S):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(1);
frame_lp[addr_ret] = sign_ext_8_32(*(int8*)maddr);
@ -1336,9 +1334,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I32_LOAD8_U):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(1);
frame_lp[addr_ret] = (uint32)(*(uint8*)maddr);
@ -1348,9 +1346,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I32_LOAD16_S):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(2);
frame_lp[addr_ret] = sign_ext_16_32(LOAD_I16(maddr));
@ -1360,9 +1358,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I32_LOAD16_U):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(2);
frame_lp[addr_ret] = (uint32)(LOAD_U16(maddr));
@ -1372,9 +1370,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I64_LOAD8_S):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(1);
*(int64 *)(frame_lp + addr_ret) = sign_ext_8_64(*(int8*)maddr);
@ -1384,9 +1382,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I64_LOAD8_U):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(1);
*(int64 *)(frame_lp + addr_ret) = (uint64)(*(uint8*)maddr);
@ -1396,9 +1394,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I64_LOAD16_S):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(2);
*(int64 *)(frame_lp + addr_ret) = sign_ext_16_64(LOAD_I16(maddr));
@ -1408,9 +1406,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I64_LOAD16_U):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(2);
*(int64 *)(frame_lp + addr_ret) = (uint64)(LOAD_U16(maddr));
@ -1420,9 +1418,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I64_LOAD32_S):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(4);
*(int64 *)(frame_lp + addr_ret) = sign_ext_32_64(LOAD_I32(maddr));
@ -1432,9 +1430,9 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (WASM_OP_I64_LOAD32_U):
{
uint32 offset, addr;
offset = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
offset = read_uint32(frame_ip);
addr = GET_OPERAND(uint32, 0);
frame_ip += 2;
addr_ret = GET_OFFSET();
CHECK_MEMORY_OVERFLOW(4);
*(int64 *)(frame_lp + addr_ret) = (uint64)(LOAD_U32(maddr));
@ -1445,10 +1443,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
uint32 offset, addr;
uint32 sval;
offset = GET_OPERAND(uint32, 0);
sval = GET_OPERAND(uint32, 2);
addr = GET_OPERAND(uint32, 4);
frame_ip += 6;
offset = read_uint32(frame_ip);
sval = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
CHECK_MEMORY_OVERFLOW(4);
STORE_U32(maddr, sval);
HANDLE_OP_END ();
@ -1458,10 +1456,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
uint32 offset, addr;
uint32 sval;
offset = GET_OPERAND(uint32, 0);
sval = GET_OPERAND(uint32, 2);
addr = GET_OPERAND(uint32, 4);
frame_ip += 6;
offset = read_uint32(frame_ip);
sval = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
CHECK_MEMORY_OVERFLOW(1);
*(uint8*)maddr = (uint8)sval;
HANDLE_OP_END ();
@ -1471,10 +1469,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
uint32 offset, addr;
uint32 sval;
offset = GET_OPERAND(uint32, 0);
sval = GET_OPERAND(uint32, 2);
addr = GET_OPERAND(uint32, 4);
frame_ip += 6;
offset = read_uint32(frame_ip);
sval = GET_OPERAND(uint32, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
CHECK_MEMORY_OVERFLOW(2);
STORE_U16(maddr, (uint16)sval);
HANDLE_OP_END ();
@ -1484,10 +1482,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
uint32 offset, addr;
uint64 sval;
offset = GET_OPERAND(uint32, 0);
sval = GET_OPERAND(uint64, 2);
addr = GET_OPERAND(uint32, 4);
frame_ip += 6;
offset = read_uint32(frame_ip);
sval = GET_OPERAND(uint64, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
CHECK_MEMORY_OVERFLOW(8);
STORE_I64(maddr, sval);
HANDLE_OP_END ();
@ -1497,10 +1495,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
uint32 offset, addr;
uint64 sval;
offset = GET_OPERAND(uint32, 0);
sval = GET_OPERAND(uint64, 2);
addr = GET_OPERAND(uint32, 4);
frame_ip += 6;
offset = read_uint32(frame_ip);
sval = GET_OPERAND(uint64, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
CHECK_MEMORY_OVERFLOW(1);
*(uint8*)maddr = (uint8)sval;
HANDLE_OP_END ();
@ -1510,10 +1508,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
uint32 offset, addr;
uint64 sval;
offset = GET_OPERAND(uint32, 0);
sval = GET_OPERAND(uint64, 2);
addr = GET_OPERAND(uint32, 4);
frame_ip += 6;
offset = read_uint32(frame_ip);
sval = GET_OPERAND(uint64, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
CHECK_MEMORY_OVERFLOW(2);
STORE_U16(maddr, (uint16)sval);
HANDLE_OP_END ();
@ -1523,10 +1521,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
uint32 offset, addr;
uint64 sval;
offset = GET_OPERAND(uint32, 0);
sval = GET_OPERAND(uint64, 2);
addr = GET_OPERAND(uint32, 4);
frame_ip += 6;
offset = read_uint32(frame_ip);
sval = GET_OPERAND(uint64, 0);
addr = GET_OPERAND(uint32, 2);
frame_ip += 4;
CHECK_MEMORY_OVERFLOW(4);
STORE_U32(maddr, (uint32)sval);
HANDLE_OP_END ();
@ -1563,11 +1561,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
frame_lp[addr_ret] = prev_page_count;
/* update the memory instance ptr */
memory = module->default_memory;
total_mem_size = num_bytes_per_page * memory->cur_page_count
- heap_base_offset;
#if WASM_ENABLE_BULK_MEMORY != 0
linear_mem_size = num_bytes_per_page * memory->cur_page_count;
#endif
}
(void)reserved;
@ -2335,23 +2329,13 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
HANDLE_OP (EXT_OP_COPY_STACK_TOP):
addr1 = GET_OFFSET();
addr2 = GET_OFFSET();
#if defined(BUILD_TARGET_X86_32)
bh_memcpy_s(frame_lp + addr2, sizeof(int32),
frame_lp + addr1, sizeof(int32));
#else
frame_lp[addr2] = frame_lp[addr1];
#endif
HANDLE_OP_END ();
HANDLE_OP (EXT_OP_COPY_STACK_TOP_I64):
addr1 = GET_OFFSET();
addr2 = GET_OFFSET();
#if defined(BUILD_TARGET_X86_32)
bh_memcpy_s(frame_lp + addr2, sizeof(int64),
frame_lp + addr1, sizeof(int64));
#else
*(float64*)(frame_lp + addr2) = *(float64*)(frame_lp + addr1);
#endif
*(uint64*)(frame_lp + addr2) = *(uint64*)(frame_lp + addr1);
HANDLE_OP_END ();
HANDLE_OP (WASM_OP_SET_LOCAL):
@ -2441,8 +2425,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
uint64 bytes, offset, seg_len;
uint8* data;
segment = GET_OPERAND(uint32, 0);
frame_ip += 2;
segment = read_uint32(frame_ip);
bytes = (uint64)POP_I32();
offset = (uint64)POP_I32();
@ -2463,8 +2446,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
uint32 segment;
segment = GET_OPERAND(uint32, 0);
frame_ip += 2;
segment = read_uint32(frame_ip);
module->module->data_segments[segment]->data_length = 0;
@ -2521,7 +2503,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
#if WASM_ENABLE_THREAD_MGR != 0
CHECK_SUSPEND_FLAGS();
#endif
fidx = frame_lp[GET_OFFSET()];
fidx = read_uint32(frame_ip);
#if WASM_ENABLE_MULTI_MODULE != 0
if (fidx >= module->function_count) {
wasm_set_exception(module, "unknown function");

View File

@ -3889,13 +3889,13 @@ wasm_loader_check_br(WASMLoaderContext *ctx, uint32 depth,
LOG_OP("%d\t", byte); \
} while (0)
#define emit_leb() do { \
wasm_loader_emit_leb(loader_ctx, p_org, p); \
#define emit_uint32(ctx, value) do { \
wasm_loader_emit_uint32(ctx, value); \
LOG_OP("%d\t", value); \
} while (0)
#define emit_const(value) do { \
GET_CONST_OFFSET(VALUE_TYPE_I32, value); \
emit_operand(loader_ctx, operand_offset); \
#define emit_leb() do { \
wasm_loader_emit_leb(loader_ctx, p_org, p); \
} while (0)
static bool
@ -3930,6 +3930,17 @@ wasm_loader_ctx_reinit(WASMLoaderContext *ctx)
return true;
}
static void
wasm_loader_emit_uint32(WASMLoaderContext *ctx, uint32 value)
{
if (ctx->p_code_compiled) {
*(uint32*)(ctx->p_code_compiled) = value;
ctx->p_code_compiled += sizeof(uint32);
}
else
ctx->code_compiled_size += sizeof(uint32);
}
static void
wasm_loader_emit_int16(WASMLoaderContext *ctx, int16 value)
{
@ -5036,7 +5047,7 @@ re_scan:
read_leb_uint32(p, p_end, count);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(count);
emit_uint32(loader_ctx, count);
#endif
POP_I32();
@ -5097,7 +5108,7 @@ re_scan:
read_leb_uint32(p, p_end, func_idx);
#if WASM_ENABLE_FAST_INTERP != 0
// we need to emit func_idx before arguments
emit_const(func_idx);
emit_uint32(loader_ctx, func_idx);
#endif
if (func_idx >= module->import_function_count + module->function_count) {
@ -5150,7 +5161,7 @@ re_scan:
read_leb_uint32(p, p_end, type_idx);
#if WASM_ENABLE_FAST_INTERP != 0
// we need to emit func_idx before arguments
emit_const(type_idx);
emit_uint32(loader_ctx, type_idx);
#endif
/* reserved byte 0x00 */
@ -5476,7 +5487,7 @@ re_scan:
PUSH_TYPE(global_type);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(global_idx);
emit_uint32(loader_ctx, global_idx);
PUSH_OFFSET_TYPE(global_type);
#endif
break;
@ -5513,7 +5524,7 @@ re_scan:
POP_TYPE(global_type);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(global_idx);
emit_uint32(loader_ctx, global_idx);
POP_OFFSET_TYPE(global_type);
#endif
break;
@ -5572,7 +5583,7 @@ re_scan:
goto fail;
}
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(mem_offset);
emit_uint32(loader_ctx, mem_offset);
#endif
switch (opcode)
{
@ -5951,7 +5962,7 @@ re_scan:
case WASM_OP_MEMORY_INIT:
read_leb_uint32(p, p_end, segment_index);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(segment_index);
emit_uint32(loader_ctx, segment_index);
#endif
if (module->import_memory_count == 0 && module->memory_count == 0)
goto fail_unknown_memory;
@ -5977,7 +5988,7 @@ re_scan:
case WASM_OP_DATA_DROP:
read_leb_uint32(p, p_end, segment_index);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(segment_index);
emit_uint32(loader_ctx, segment_index);
#endif
if (segment_index >= module->data_seg_count) {
set_error_buf(error_buf, error_buf_size,

View File

@ -2881,13 +2881,13 @@ wasm_loader_check_br(WASMLoaderContext *ctx, uint32 depth,
LOG_OP("%d\t", byte); \
} while (0)
#define emit_leb() do { \
wasm_loader_emit_leb(loader_ctx, p_org, p); \
#define emit_uint32(ctx, value) do { \
wasm_loader_emit_uint32(ctx, value); \
LOG_OP("%d\t", value); \
} while (0)
#define emit_const(value) do { \
GET_CONST_OFFSET(VALUE_TYPE_I32, value); \
emit_operand(loader_ctx, operand_offset); \
#define emit_leb() do { \
wasm_loader_emit_leb(loader_ctx, p_org, p); \
} while (0)
static bool
@ -2922,6 +2922,17 @@ wasm_loader_ctx_reinit(WASMLoaderContext *ctx)
return true;
}
static void
wasm_loader_emit_uint32(WASMLoaderContext *ctx, uint32 value)
{
if (ctx->p_code_compiled) {
*(uint32*)(ctx->p_code_compiled) = value;
ctx->p_code_compiled += sizeof(uint32);
}
else
ctx->code_compiled_size += sizeof(uint32);
}
static void
wasm_loader_emit_int16(WASMLoaderContext *ctx, int16 value)
{
@ -3968,7 +3979,7 @@ re_scan:
read_leb_uint32(p, p_end, count);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(count);
emit_uint32(loader_ctx, count);
#endif
POP_I32();
@ -4025,7 +4036,7 @@ re_scan:
read_leb_uint32(p, p_end, func_idx);
#if WASM_ENABLE_FAST_INTERP != 0
// we need to emit func_idx before arguments
emit_const(func_idx);
emit_uint32(loader_ctx, func_idx);
#endif
bh_assert(func_idx < module->import_function_count
@ -4069,7 +4080,7 @@ re_scan:
read_leb_uint32(p, p_end, type_idx);
#if WASM_ENABLE_FAST_INTERP != 0
// we need to emit func_idx before arguments
emit_const(type_idx);
emit_uint32(loader_ctx, type_idx);
#endif
/* reserved byte 0x00 */
@ -4369,7 +4380,7 @@ re_scan:
PUSH_TYPE(global_type);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(global_idx);
emit_uint32(loader_ctx, global_idx);
PUSH_OFFSET_TYPE(global_type);
#endif
break;
@ -4396,7 +4407,7 @@ re_scan:
POP_TYPE(global_type);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(global_idx);
emit_uint32(loader_ctx, global_idx);
POP_OFFSET_TYPE(global_type);
#endif
(void)is_mutable;
@ -4452,7 +4463,7 @@ re_scan:
read_leb_uint32(p, p_end, align); /* align */
read_leb_uint32(p, p_end, mem_offset); /* offset */
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(mem_offset);
emit_uint32(loader_ctx, mem_offset);
#endif
switch (opcode)
{
@ -4823,7 +4834,7 @@ re_scan:
case WASM_OP_MEMORY_INIT:
read_leb_uint32(p, p_end, segment_index);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(segment_index);
emit_uint32(loader_ctx, segment_index);
#endif
bh_assert(module->import_memory_count
+ module->memory_count > 0);
@ -4841,7 +4852,7 @@ re_scan:
case WASM_OP_DATA_DROP:
read_leb_uint32(p, p_end, segment_index);
#if WASM_ENABLE_FAST_INTERP != 0
emit_const(segment_index);
emit_uint32(loader_ctx, segment_index);
#endif
bh_assert(segment_index < module->data_seg_count);
bh_assert(module->data_seg_count1 > 0);

View File

@ -483,6 +483,7 @@ pthread_start_routine(void *arg)
return NULL;
}
wasm_exec_env_set_thread_info(exec_env);
argv[0] = addr_native_to_app(routine_args->arg);
if(!wasm_runtime_call_indirect(exec_env,

View File

@ -42,19 +42,21 @@ os_free(void *ptr)
}
void *
os_mmap(void *hint, unsigned int size, int prot, int flags)
os_mmap(void *hint, size_t size, int prot, int flags)
{
return BH_MALLOC(size);
if ((uint64)size >= UINT32_MAX)
return NULL;
return BH_MALLOC((uint32)size);
}
void
os_munmap(void *addr, uint32 size)
os_munmap(void *addr, size_t size)
{
return BH_FREE(addr);
}
int
os_mprotect(void *addr, uint32 size, int prot)
os_mprotect(void *addr, size_t size, int prot)
{
return 0;
}

View File

@ -48,6 +48,38 @@ typedef pthread_mutex_t korp_mutex;
typedef pthread_cond_t korp_cond;
typedef pthread_t korp_thread;
#if WASM_DISABLE_HW_BOUND_CHECK == 0
#if defined(BUILD_TARGET_X86_64) \
|| defined(BUILD_TARGET_AMD_64) \
|| defined(BUILD_TARGET_AARCH64)
#include <signal.h>
#include <setjmp.h>
#define OS_ENABLE_HW_BOUND_CHECK
#define os_thread_local_attribute __thread
typedef jmp_buf korp_jmpbuf;
#define os_setjmp setjmp
#define os_longjmp longjmp
#define os_alloca alloca
#define os_getpagesize getpagesize
typedef void (*os_signal_handler)(void *sig_addr);
int os_signal_init(os_signal_handler handler);
void os_signal_destroy();
void os_signal_unmask();
void os_sigreturn();
#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64 */
#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
#ifdef __cplusplus
}
#endif

View File

@ -6,7 +6,7 @@
#include "platform_api_vmcore.h"
void *
os_mmap(void *hint, uint32 size, int prot, int flags)
os_mmap(void *hint, size_t size, int prot, int flags)
{
int map_prot = PROT_NONE;
int map_flags = MAP_ANONYMOUS | MAP_PRIVATE;
@ -17,7 +17,12 @@ os_mmap(void *hint, uint32 size, int prot, int flags)
page_size = getpagesize();
request_size = (size + page_size - 1) & ~(page_size - 1);
if (request_size >= UINT32_MAX)
if ((size_t)request_size < size)
/* integer overflow */
return NULL;
if (request_size > 16 * (uint64)UINT32_MAX)
/* At most 16 G is allowed */
return NULL;
if (prot & MMAP_PROT_READ)
@ -53,7 +58,7 @@ os_mmap(void *hint, uint32 size, int prot, int flags)
}
void
os_munmap(void *addr, uint32 size)
os_munmap(void *addr, size_t size)
{
uint64 page_size = getpagesize();
uint64 request_size = (size + page_size - 1) & ~(page_size - 1);
@ -67,9 +72,11 @@ os_munmap(void *addr, uint32 size)
}
int
os_mprotect(void *addr, uint32 size, int prot)
os_mprotect(void *addr, size_t size, int prot)
{
int map_prot = PROT_NONE;
uint64 page_size = getpagesize();
uint64 request_size = (size + page_size - 1) & ~(page_size - 1);
if (!addr)
return 0;
@ -83,7 +90,7 @@ os_mprotect(void *addr, uint32 size, int prot)
if (prot & MMAP_PROT_EXEC)
map_prot |= PROT_EXEC;
return mprotect(addr, size, map_prot);
return mprotect(addr, request_size, map_prot);
}
void

View File

@ -21,7 +21,7 @@ static void *os_thread_wrapper(void *arg)
thread_wrapper_arg * targ = arg;
thread_start_routine_t start_func = targ->start;
void *thread_arg = targ->arg;
printf("THREAD CREATE %p\n", &targ);
os_printf("THREAD CREATED %p\n", &targ);
targ->stack = (void *)((uintptr_t)(&arg) & (uintptr_t)~0xfff);
BH_FREE(targ);
start_func(thread_arg);
@ -41,8 +41,8 @@ int os_thread_create_with_prio(korp_tid *tid, thread_start_routine_t start,
pthread_attr_init(&tattr);
pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_JOINABLE);
if (pthread_attr_setstacksize(&tattr, stack_size) != 0) {
printf("Invalid thread stack size %u. Min stack size on Linux = %u",
stack_size, PTHREAD_STACK_MIN);
os_printf("Invalid thread stack size %u. Min stack size on Linux = %u",
stack_size, PTHREAD_STACK_MIN);
pthread_attr_destroy(&tattr);
return BHT_ERROR;
}
@ -123,7 +123,7 @@ int os_mutex_lock(korp_mutex *mutex)
assert(mutex);
ret = pthread_mutex_lock(mutex);
if (0 != ret) {
printf("vm mutex lock failed (ret=%d)!\n", ret);
os_printf("vm mutex lock failed (ret=%d)!\n", ret);
exit(-1);
}
return ret;
@ -140,7 +140,7 @@ int os_mutex_unlock(korp_mutex *mutex)
assert(mutex);
ret = pthread_mutex_unlock(mutex);
if (0 != ret) {
printf("vm mutex unlock failed (ret=%d)!\n", ret);
os_printf("vm mutex unlock failed (ret=%d)!\n", ret);
exit(-1);
}
return ret;
@ -241,15 +241,16 @@ uint8 *os_thread_get_stack_boundary()
pthread_attr_t attr;
uint8 *addr = NULL;
size_t stack_size, guard_size;
int page_size = getpagesize();
#ifdef __linux__
if (pthread_getattr_np(self, &attr) == 0) {
pthread_attr_getstack(&attr, (void**)&addr, &stack_size);
pthread_attr_getguardsize(&attr, &guard_size);
pthread_attr_destroy(&attr);
if (guard_size < 4 * 1024)
/* Reserved 4 KB guard size at least for safety */
guard_size = 4 * 1024;
if (guard_size < (size_t)page_size)
/* Reserved 1 guard page at least for safety */
guard_size = (size_t)page_size;
addr += guard_size;
}
(void)stack_size;
@ -257,10 +258,150 @@ uint8 *os_thread_get_stack_boundary()
if ((addr = (uint8*)pthread_get_stackaddr_np(self))) {
stack_size = pthread_get_stacksize_np(self);
addr -= stack_size;
/* Reserved 4 KB guard size at least for safety */
addr += 4 * 1024;
/* Reserved 1 guard page at least for safety */
addr += page_size;
}
#endif
return addr;
}
}
#ifdef OS_ENABLE_HW_BOUND_CHECK
#define SIG_ALT_STACK_SIZE (32 * 1024)
/* The signal alternate stack base addr */
static uint8 *sigalt_stack_base_addr;
/* The signal handler passed to os_signal_init() */
static os_signal_handler signal_handler;
static void
mask_signals(int how)
{
sigset_t set;
sigemptyset(&set);
sigaddset(&set, SIGSEGV);
sigaddset(&set, SIGBUS);
pthread_sigmask(how, &set, NULL);
}
__attribute__((noreturn)) static void
signal_callback(int sig_num, siginfo_t *sig_info, void *sig_ucontext)
{
int i;
void *sig_addr = sig_info->si_addr;
mask_signals(SIG_BLOCK);
if (signal_handler
&& (sig_num == SIGSEGV || sig_num == SIGBUS)) {
signal_handler(sig_addr);
}
/* signal unhandled */
switch (sig_num) {
case SIGSEGV:
os_printf("unhandled SIGSEGV, si_addr: %p\n", sig_addr);
break;
case SIGBUS:
os_printf("unhandled SIGBUS, si_addr: %p\n", sig_addr);
break;
default:
os_printf("unhandle signal %d, si_addr: %p\n",
sig_num, sig_addr);
break;
}
/* divived by 0 to make it abort */
i = os_printf(" ");
os_printf("%d\n", i / (i - 1));
/* access NULL ptr to make it abort */
os_printf("%d\n", *(uint32*)(uintptr_t)(i - 1));
exit(1);
}
int
os_signal_init(os_signal_handler handler)
{
int ret = -1;
struct sigaction sig_act;
stack_t sigalt_stack_info;
uint32 map_size = SIG_ALT_STACK_SIZE;
uint8 *map_addr;
/* Initialize memory for signal alternate stack */
if (!(map_addr = os_mmap(NULL, map_size,
MMAP_PROT_READ | MMAP_PROT_WRITE,
MMAP_MAP_NONE))) {
os_printf("Failed to mmap memory for alternate stack\n");
return -1;
}
/* Initialize signal alternate stack */
memset(map_addr, 0, map_size);
sigalt_stack_info.ss_sp = map_addr;
sigalt_stack_info.ss_size = map_size;
sigalt_stack_info.ss_flags = 0;
if ((ret = sigaltstack(&sigalt_stack_info, NULL)) != 0) {
goto fail1;
}
/* Install signal hanlder */
sig_act.sa_sigaction = signal_callback;
sig_act.sa_flags = SA_SIGINFO | SA_ONSTACK | SA_NODEFER;
sigemptyset(&sig_act.sa_mask);
if ((ret = sigaction(SIGSEGV, &sig_act, NULL)) != 0
|| (ret = sigaction(SIGBUS, &sig_act, NULL)) != 0) {
goto fail2;
}
sigalt_stack_base_addr = map_addr;
signal_handler = handler;
return 0;
fail2:
memset(&sigalt_stack_info, 0, sizeof(stack_t));
sigalt_stack_info.ss_flags = SS_DISABLE;
sigalt_stack_info.ss_size = map_size;
sigaltstack(&sigalt_stack_info, NULL);
fail1:
os_munmap(map_addr, map_size);
return ret;
}
void
os_signal_destroy()
{
stack_t sigalt_stack_info;
/* Disable signal alternate stack */
memset(&sigalt_stack_info, 0, sizeof(stack_t));
sigalt_stack_info.ss_flags = SS_DISABLE;
sigalt_stack_info.ss_size = SIG_ALT_STACK_SIZE;
sigaltstack(&sigalt_stack_info, NULL);
os_munmap(sigalt_stack_base_addr, SIG_ALT_STACK_SIZE);
}
void
os_signal_unmask()
{
mask_signals(SIG_UNBLOCK);
}
void
os_sigreturn()
{
#if defined(__APPLE__)
#define UC_RESET_ALT_STACK 0x80000000
extern int __sigreturn(void *, int);
/* It's necessary to call __sigreturn to restore the sigaltstack state
after exiting the signal handler. */
__sigreturn(NULL, UC_RESET_ALT_STACK);
#endif
}
#endif /* end of OS_ENABLE_HW_BOUND_CHECK */

View File

@ -51,6 +51,38 @@ typedef pthread_t korp_thread;
#define os_printf printf
#define os_vprintf vprintf
#if WASM_DISABLE_HW_BOUND_CHECK == 0
#if defined(BUILD_TARGET_X86_64) \
|| defined(BUILD_TARGET_AMD_64) \
|| defined(BUILD_TARGET_AARCH64)
#include <signal.h>
#include <setjmp.h>
#define OS_ENABLE_HW_BOUND_CHECK
#define os_thread_local_attribute __thread
typedef jmp_buf korp_jmpbuf;
#define os_setjmp setjmp
#define os_longjmp longjmp
#define os_alloca alloca
#define os_getpagesize getpagesize
typedef void (*os_signal_handler)(void *sig_addr);
int os_signal_init(os_signal_handler handler);
void os_signal_destroy();
void os_signal_unmask();
void os_sigreturn();
#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64 */
#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
#ifdef __cplusplus
}
#endif

View File

@ -110,9 +110,9 @@ enum {
MMAP_MAP_FIXED = 2
};
void *os_mmap(void *hint, unsigned int size, int prot, int flags);
void os_munmap(void *addr, uint32 size);
int os_mprotect(void *addr, uint32 size, int prot);
void *os_mmap(void *hint, size_t size, int prot, int flags);
void os_munmap(void *addr, size_t size);
int os_mprotect(void *addr, size_t size, int prot);
/**
* Flush cpu data cache, in some CPUs, after applying relocation to the

View File

@ -82,7 +82,7 @@ int os_vprintf(const char * format, va_list arg)
return 0;
}
void* os_mmap(void *hint, uint32 size, int prot, int flags)
void* os_mmap(void *hint, size_t size, int prot, int flags)
{
#if WASM_ENABLE_AOT != 0
int mprot = 0;
@ -124,7 +124,7 @@ void* os_mmap(void *hint, uint32 size, int prot, int flags)
#endif
}
void os_munmap(void *addr, uint32 size)
void os_munmap(void *addr, size_t size)
{
#if WASM_ENABLE_AOT != 0
uint64 aligned_size, page_size;
@ -135,11 +135,15 @@ void os_munmap(void *addr, uint32 size)
#endif
}
int os_mprotect(void *addr, uint32 size, int prot)
int os_mprotect(void *addr, size_t size, int prot)
{
#if WASM_ENABLE_AOT != 0
int mprot = 0;
sgx_status_t st = 0;
uint64 aligned_size, page_size;
page_size = getpagesize();
aligned_size = (size + page_size - 1) & ~(page_size - 1);
if (prot & MMAP_PROT_READ)
mprot |= SGX_PROT_READ;
@ -147,7 +151,7 @@ int os_mprotect(void *addr, uint32 size, int prot)
mprot |= SGX_PROT_WRITE;
if (prot & MMAP_PROT_EXEC)
mprot |= SGX_PROT_EXEC;
st = sgx_tprotect_rsrv_mem(addr, size, mprot);
st = sgx_tprotect_rsrv_mem(addr, aligned_size, mprot);
if (st != SGX_SUCCESS)
os_printf("os_mprotect(addr=0x%lx, size=%u, prot=0x%x) failed.",
addr, size, prot);

View File

@ -51,6 +51,38 @@ typedef pthread_t korp_thread;
#define os_printf printf
#define os_vprintf vprintf
#if WASM_DISABLE_HW_BOUND_CHECK == 0
#if defined(BUILD_TARGET_X86_64) \
|| defined(BUILD_TARGET_AMD_64) \
|| defined(BUILD_TARGET_AARCH64)
#include <signal.h>
#include <setjmp.h>
#define OS_ENABLE_HW_BOUND_CHECK
#define os_thread_local_attribute __thread
typedef jmp_buf korp_jmpbuf;
#define os_setjmp setjmp
#define os_longjmp longjmp
#define os_alloca alloca
#define os_getpagesize getpagesize
typedef void (*os_signal_handler)(void *sig_addr);
int os_signal_init(os_signal_handler handler);
void os_signal_destroy();
void os_signal_unmask();
void os_sigreturn();
#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64 */
#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
#ifdef __cplusplus
}
#endif

View File

@ -50,6 +50,38 @@ typedef pthread_t korp_thread;
#define os_printf printf
#define os_vprintf vprintf
#if WASM_DISABLE_HW_BOUND_CHECK == 0
#if defined(BUILD_TARGET_X86_64) \
|| defined(BUILD_TARGET_AMD_64) \
|| defined(BUILD_TARGET_AARCH64)
#include <signal.h>
#include <setjmp.h>
#define OS_ENABLE_HW_BOUND_CHECK
#define os_thread_local_attribute __thread
typedef jmp_buf korp_jmpbuf;
#define os_setjmp setjmp
#define os_longjmp longjmp
#define os_alloca alloca
#define os_getpagesize getpagesize
typedef void (*os_signal_handler)(void *sig_addr);
int os_signal_init(os_signal_handler handler);
void os_signal_destroy();
void os_signal_unmask();
void os_sigreturn();
#endif /* end of BUILD_TARGET_X86_64/AMD_64/AARCH64 */
#endif /* end of WASM_DISABLE_HW_BOUND_CHECK */
#ifdef __cplusplus
}
#endif

View File

@ -110,16 +110,18 @@ os_vprintf(const char *fmt, va_list ap)
}
void *
os_mmap(void *hint, unsigned int size, int prot, int flags)
os_mmap(void *hint, size_t size, int prot, int flags)
{
if ((uint64)size >= UINT32_MAX)
return NULL;
if (exec_mem_alloc_func)
return exec_mem_alloc_func(size);
return exec_mem_alloc_func((uint32)size);
else
return BH_MALLOC(size);
}
void
os_munmap(void *addr, uint32 size)
os_munmap(void *addr, size_t size)
{
if (exec_mem_free_func)
exec_mem_free_func(addr);
@ -128,7 +130,7 @@ os_munmap(void *addr, uint32 size)
}
int
os_mprotect(void *addr, uint32 size, int prot)
os_mprotect(void *addr, size_t size, int prot)
{
return 0;
}