diff --git a/.github/workflows/compilation_on_android_ubuntu.yml b/.github/workflows/compilation_on_android_ubuntu.yml index e0c9326b..c1c31f5f 100644 --- a/.github/workflows/compilation_on_android_ubuntu.yml +++ b/.github/workflows/compilation_on_android_ubuntu.yml @@ -566,9 +566,7 @@ jobs: test_option: $GC_TEST_OPTIONS - running_mode: "multi-tier-jit" test_option: $GC_TEST_OPTIONS - # aot, fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Memory64 - - running_mode: "aot" - test_option: $MEMORY64_TEST_OPTIONS + # fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Memory64 - running_mode: "fast-interp" test_option: $MEMORY64_TEST_OPTIONS - running_mode: "fast-jit" @@ -616,6 +614,7 @@ jobs: if: > ((matrix.test_option == '$DEFAULT_TEST_OPTIONS' || matrix.test_option == '$THREADS_TEST_OPTIONS' || matrix.test_option == '$WASI_TEST_OPTIONS' || matrix.test_option == '$GC_TEST_OPTIONS') + && matrix.test_option != '$MEMORY64_TEST_OPTIONS' && matrix.running_mode != 'fast-jit' && matrix.running_mode != 'jit' && matrix.running_mode != 'multi-tier-jit') run: echo "TEST_ON_X86_32=true" >> $GITHUB_ENV diff --git a/core/iwasm/aot/aot_loader.c b/core/iwasm/aot/aot_loader.c index b67f9c68..48ed4e84 100644 --- a/core/iwasm/aot/aot_loader.c +++ b/core/iwasm/aot/aot_loader.c @@ -9,6 +9,7 @@ #include "aot_reloc.h" #include "../common/wasm_runtime_common.h" #include "../common/wasm_native.h" +#include "../common/wasm_loader_common.h" #include "../compilation/aot.h" #if WASM_ENABLE_DEBUG_AOT != 0 @@ -1043,6 +1044,12 @@ load_memory_info(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module, for (i = 0; i < module->memory_count; i++) { read_uint32(buf, buf_end, module->memories[i].memory_flags); + + if (!wasm_memory_check_flags(module->memories[i].memory_flags, + error_buf, error_buf_size, true)) { + return false; + } + read_uint32(buf, buf_end, module->memories[i].num_bytes_per_page); read_uint32(buf, buf_end, module->memories[i].mem_init_page_count); read_uint32(buf, buf_end, module->memories[i].mem_max_page_count); @@ -3634,6 +3641,21 @@ fail: return ret; } +#if WASM_ENABLE_MEMORY64 != 0 +static bool +has_module_memory64(AOTModule *module) +{ + /* TODO: multi-memories for now assuming the memory idx type is consistent + * across multi-memories */ + if (module->import_memory_count > 0) + return !!(module->import_memories[0].memory_flags & MEMORY64_FLAG); + else if (module->memory_count > 0) + return !!(module->memories[0].memory_flags & MEMORY64_FLAG); + + return false; +} +#endif + static bool load_from_sections(AOTModule *module, AOTSection *sections, bool is_load_from_file_buf, char *error_buf, @@ -3645,6 +3667,7 @@ load_from_sections(AOTModule *module, AOTSection *sections, uint32 i, func_index, func_type_index; AOTFuncType *func_type; AOTExport *exports; + uint8 malloc_free_io_type = VALUE_TYPE_I32; while (section) { buf = section->section_body; @@ -3719,7 +3742,10 @@ load_from_sections(AOTModule *module, AOTSection *sections, module->malloc_func_index = (uint32)-1; module->free_func_index = (uint32)-1; module->retain_func_index = (uint32)-1; - +#if WASM_ENABLE_MEMORY64 != 0 + if (has_module_memory64(module)) + malloc_free_io_type = VALUE_TYPE_I64; +#endif exports = module->exports; for (i = 0; i < module->export_count; i++) { if (exports[i].kind == EXPORT_KIND_FUNC @@ -3729,8 +3755,8 @@ load_from_sections(AOTModule *module, AOTSection *sections, func_type_index = module->func_type_indexes[func_index]; func_type = (AOTFuncType *)module->types[func_type_index]; if (func_type->param_count == 1 && func_type->result_count == 1 - && func_type->types[0] == VALUE_TYPE_I32 - && func_type->types[1] == VALUE_TYPE_I32) { + && func_type->types[0] == malloc_free_io_type + && func_type->types[1] == malloc_free_io_type) { bh_assert(module->malloc_func_index == (uint32)-1); module->malloc_func_index = func_index; LOG_VERBOSE("Found malloc function, name: %s, index: %u", @@ -3742,9 +3768,9 @@ load_from_sections(AOTModule *module, AOTSection *sections, func_type_index = module->func_type_indexes[func_index]; func_type = (AOTFuncType *)module->types[func_type_index]; if (func_type->param_count == 2 && func_type->result_count == 1 - && func_type->types[0] == VALUE_TYPE_I32 + && func_type->types[0] == malloc_free_io_type && func_type->types[1] == VALUE_TYPE_I32 - && func_type->types[2] == VALUE_TYPE_I32) { + && func_type->types[2] == malloc_free_io_type) { uint32 j; WASMExport *export_tmp; @@ -3768,8 +3794,8 @@ load_from_sections(AOTModule *module, AOTSection *sections, (AOTFuncType *)module->types[func_type_index]; if (func_type->param_count == 1 && func_type->result_count == 1 - && func_type->types[0] == VALUE_TYPE_I32 - && func_type->types[1] == VALUE_TYPE_I32) { + && func_type->types[0] == malloc_free_io_type + && func_type->types[1] == malloc_free_io_type) { bh_assert(module->retain_func_index == (uint32)-1); module->retain_func_index = export_tmp->index; @@ -3795,7 +3821,7 @@ load_from_sections(AOTModule *module, AOTSection *sections, func_type_index = module->func_type_indexes[func_index]; func_type = (AOTFuncType *)module->types[func_type_index]; if (func_type->param_count == 1 && func_type->result_count == 0 - && func_type->types[0] == VALUE_TYPE_I32) { + && func_type->types[0] == malloc_free_io_type) { bh_assert(module->free_func_index == (uint32)-1); module->free_func_index = func_index; LOG_VERBOSE("Found free function, name: %s, index: %u", diff --git a/core/iwasm/aot/aot_runtime.c b/core/iwasm/aot/aot_runtime.c index 1668ed01..610213cb 100644 --- a/core/iwasm/aot/aot_runtime.c +++ b/core/iwasm/aot/aot_runtime.c @@ -792,16 +792,18 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, uint32 max_page_count = wasm_runtime_get_max_mem(max_memory_pages, memory->mem_init_page_count, memory->mem_max_page_count); + uint32 default_max_pages; uint32 inc_page_count, global_idx; uint32 bytes_of_last_page, bytes_to_page_end; uint64 aux_heap_base, heap_offset = (uint64)num_bytes_per_page * init_page_count; uint64 memory_data_size, max_memory_data_size; uint8 *p = NULL, *global_addr; + bool is_memory64 = memory->memory_flags & MEMORY64_FLAG; bool is_shared_memory = false; #if WASM_ENABLE_SHARED_MEMORY != 0 - is_shared_memory = memory->memory_flags & 0x02 ? true : false; + is_shared_memory = memory->memory_flags & SHARED_MEMORY_FLAG ? true : false; /* Shared memory */ if (is_shared_memory && parent != NULL) { AOTMemoryInstance *shared_memory_instance; @@ -813,6 +815,16 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, } #endif +#if WASM_ENABLE_MEMORY64 != 0 + if (is_memory64) { + default_max_pages = DEFAULT_MEM64_MAX_PAGES; + } + else +#endif + { + default_max_pages = DEFAULT_MAX_PAGES; + } + if (heap_size > 0 && module->malloc_func_index != (uint32)-1 && module->free_func_index != (uint32)-1) { /* Disable app heap, use malloc/free function exported @@ -893,14 +905,14 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, } init_page_count += inc_page_count; max_page_count += inc_page_count; - if (init_page_count > DEFAULT_MAX_PAGES) { + if (init_page_count > default_max_pages) { set_error_buf(error_buf, error_buf_size, "failed to insert app heap into linear memory, " "try using `--heap-size=0` option"); return NULL; } - if (max_page_count > DEFAULT_MAX_PAGES) - max_page_count = DEFAULT_MAX_PAGES; + if (max_page_count > default_max_pages) + max_page_count = default_max_pages; } LOG_VERBOSE("Memory instantiate:"); @@ -912,11 +924,11 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, heap_size); max_memory_data_size = (uint64)num_bytes_per_page * max_page_count; - bh_assert(max_memory_data_size <= MAX_LINEAR_MEMORY_SIZE); + bh_assert(max_memory_data_size <= GET_MAX_LINEAR_MEMORY_SIZE(is_memory64)); (void)max_memory_data_size; /* TODO: memory64 uses is_memory64 flag */ - if (wasm_allocate_linear_memory(&p, is_shared_memory, false, + if (wasm_allocate_linear_memory(&p, is_shared_memory, is_memory64, num_bytes_per_page, init_page_count, max_page_count, &memory_data_size) != BHT_OK) { @@ -930,6 +942,11 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, memory_inst->cur_page_count = init_page_count; memory_inst->max_page_count = max_page_count; memory_inst->memory_data_size = memory_data_size; +#if WASM_ENABLE_MEMORY64 != 0 + if (is_memory64) { + memory_inst->is_memory64 = 1; + } +#endif /* Init memory info */ memory_inst->memory_data = p; @@ -993,11 +1010,12 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, uint32 max_memory_pages, char *error_buf, uint32 error_buf_size) { - uint32 global_index, global_data_offset, base_offset, length; + uint32 global_index, global_data_offset, length; uint32 i, memory_count = module->memory_count; AOTMemoryInstance *memories, *memory_inst; AOTMemInitData *data_seg; uint64 total_size; + mem_offset_t base_offset; module_inst->memory_count = memory_count; total_size = sizeof(AOTMemoryInstance *) * (uint64)memory_count; @@ -1036,7 +1054,9 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, initialized */ continue; - bh_assert(data_seg->offset.init_expr_type == INIT_EXPR_TYPE_I32_CONST + bh_assert(data_seg->offset.init_expr_type + == (memory_inst->is_memory64 ? INIT_EXPR_TYPE_I64_CONST + : INIT_EXPR_TYPE_I32_CONST) || data_seg->offset.init_expr_type == INIT_EXPR_TYPE_GET_GLOBAL); @@ -1057,11 +1077,28 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, module->globals[global_index - module->import_global_count] .data_offset; - base_offset = - *(uint32 *)(module_inst->global_data + global_data_offset); +#if WASM_ENABLE_MEMORY64 != 0 + if (memory_inst->is_memory64) { + base_offset = + *(uint64 *)(module_inst->global_data + global_data_offset); + } + else +#endif + { + base_offset = + *(uint32 *)(module_inst->global_data + global_data_offset); + } } else { - base_offset = (uint32)data_seg->offset.u.i32; +#if WASM_ENABLE_MEMORY64 != 0 + if (memory_inst->is_memory64) { + base_offset = data_seg->offset.u.i64; + } + else +#endif + { + base_offset = data_seg->offset.u.u32; + } } /* Copy memory data */ @@ -1071,7 +1108,8 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, /* Check memory data */ /* check offset since length might negative */ if (base_offset > memory_inst->memory_data_size) { - LOG_DEBUG("base_offset(%d) > memory_data_size(%" PRIu64 ")", + LOG_DEBUG("base_offset(%" PR_MEM_OFFSET + ") > memory_data_size(%" PRIu64 ")", base_offset, memory_inst->memory_data_size); #if WASM_ENABLE_REF_TYPES != 0 set_error_buf(error_buf, error_buf_size, @@ -1086,8 +1124,8 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent, /* check offset + length(could be zero) */ length = data_seg->byte_count; if (base_offset + length > memory_inst->memory_data_size) { - LOG_DEBUG("base_offset(%d) + length(%d) > memory_data_size(%" PRIu64 - ")", + LOG_DEBUG("base_offset(%" PR_MEM_OFFSET + ") + length(%d) > memory_data_size(%" PRIu64 ")", base_offset, length, memory_inst->memory_data_size); #if WASM_ENABLE_REF_TYPES != 0 set_error_buf(error_buf, error_buf_size, @@ -2334,22 +2372,44 @@ aot_copy_exception(AOTModuleInstance *module_inst, char *exception_buf) static bool execute_malloc_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env, AOTFunctionInstance *malloc_func, - AOTFunctionInstance *retain_func, uint32 size, - uint32 *p_result) + AOTFunctionInstance *retain_func, uint64 size, + uint64 *p_result) { #ifdef OS_ENABLE_HW_BOUND_CHECK WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls(); #endif WASMExecEnv *exec_env_created = NULL; WASMModuleInstanceCommon *module_inst_old = NULL; - uint32 argv[2], argc; + union { + uint32 u32[3]; + uint64 u64; + } argv; + uint32 argc; bool ret; - argv[0] = size; - argc = 1; - if (retain_func) { - argv[1] = 0; +#if WASM_ENABLE_MEMORY64 != 0 + bool is_memory64 = module_inst->memories[0]->is_memory64; + if (is_memory64) { argc = 2; + PUT_I64_TO_ADDR(&argv.u64, size); + } + else +#endif + { + argc = 1; + argv.u32[0] = (uint32)size; + } + + /* if __retain is exported, then this module is compiled by + assemblyscript, the memory should be managed by as's runtime, + in this case we need to call the retain function after malloc + the memory */ + if (retain_func) { + /* the malloc function from assemblyscript is: + function __new(size: usize, id: u32) + id = 0 means this is an ArrayBuffer object */ + argv.u32[argc] = 0; + argc++; } if (exec_env) { @@ -2389,10 +2449,10 @@ execute_malloc_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env, } } - ret = aot_call_function(exec_env, malloc_func, argc, argv); + ret = aot_call_function(exec_env, malloc_func, argc, argv.u32); if (retain_func && ret) - ret = aot_call_function(exec_env, retain_func, 1, argv); + ret = aot_call_function(exec_env, retain_func, 1, argv.u32); if (module_inst_old) /* Restore the existing exec_env's module inst */ @@ -2401,24 +2461,46 @@ execute_malloc_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env, if (exec_env_created) wasm_exec_env_destroy(exec_env_created); - if (ret) - *p_result = argv[0]; + if (ret) { +#if WASM_ENABLE_MEMORY64 != 0 + if (is_memory64) + *p_result = GET_I64_FROM_ADDR(&argv.u64); + else +#endif + { + *p_result = argv.u32[0]; + } + } return ret; } static bool execute_free_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env, - AOTFunctionInstance *free_func, uint32 offset) + AOTFunctionInstance *free_func, uint64 offset) { #ifdef OS_ENABLE_HW_BOUND_CHECK WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls(); #endif WASMExecEnv *exec_env_created = NULL; WASMModuleInstanceCommon *module_inst_old = NULL; - uint32 argv[2]; + union { + uint32 u32[2]; + uint64 u64; + } argv; + uint32 argc; bool ret; - argv[0] = offset; +#if WASM_ENABLE_MEMORY64 != 0 + if (module_inst->memories[0]->is_memory64) { + PUT_I64_TO_ADDR(&argv.u64, offset); + argc = 2; + } + else +#endif + { + argv.u32[0] = (uint32)offset; + argc = 1; + } if (exec_env) { #ifdef OS_ENABLE_HW_BOUND_CHECK @@ -2457,7 +2539,7 @@ execute_free_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env, } } - ret = aot_call_function(exec_env, free_func, 1, argv); + ret = aot_call_function(exec_env, free_func, argc, argv.u32); if (module_inst_old) /* Restore the existing exec_env's module inst */ @@ -2477,7 +2559,7 @@ aot_module_malloc_internal(AOTModuleInstance *module_inst, AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst); AOTModule *module = (AOTModule *)module_inst->module; uint8 *addr = NULL; - uint32 offset = 0; + uint64 offset = 0; /* TODO: Memory64 size check based on memory idx type */ bh_assert(size <= UINT32_MAX); @@ -2509,7 +2591,7 @@ aot_module_malloc_internal(AOTModuleInstance *module_inst, if (!malloc_func || !execute_malloc_function(module_inst, exec_env, malloc_func, - retain_func, (uint32)size, &offset)) { + retain_func, size, &offset)) { return 0; } addr = offset ? (uint8 *)memory_inst->memory_data + offset : NULL; @@ -2620,8 +2702,7 @@ aot_module_free_internal(AOTModuleInstance *module_inst, WASMExecEnv *exec_env, free_func = aot_lookup_function(module_inst, "__unpin"); if (free_func) - execute_free_function(module_inst, exec_env, free_func, - (uint32)ptr); + execute_free_function(module_inst, exec_env, free_func, ptr); } } } @@ -2983,7 +3064,7 @@ aot_sqrtf(float x) #if WASM_ENABLE_BULK_MEMORY != 0 bool aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index, uint32 offset, - uint32 len, uint32 dst) + uint32 len, size_t dst) { AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst); AOTModule *aot_module; @@ -3016,7 +3097,7 @@ aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index, uint32 offset, (WASMModuleInstanceCommon *)module_inst, (uint64)dst); SHARED_MEMORY_LOCK(memory_inst); - bh_memcpy_s(maddr, (uint32)(memory_inst->memory_data_size - dst), + bh_memcpy_s(maddr, CLAMP_U64_TO_U32(memory_inst->memory_data_size - dst), data + offset, len); SHARED_MEMORY_UNLOCK(memory_inst); return true; diff --git a/core/iwasm/aot/aot_runtime.h b/core/iwasm/aot/aot_runtime.h index 0dbf9a5e..79bdb1df 100644 --- a/core/iwasm/aot/aot_runtime.h +++ b/core/iwasm/aot/aot_runtime.h @@ -627,7 +627,7 @@ aot_sqrtf(float x); #if WASM_ENABLE_BULK_MEMORY != 0 bool aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index, uint32 offset, - uint32 len, uint32 dst); + uint32 len, size_t dst); bool aot_data_drop(AOTModuleInstance *module_inst, uint32 seg_index); diff --git a/core/iwasm/common/wasm_loader_common.c b/core/iwasm/common/wasm_loader_common.c new file mode 100644 index 00000000..ceb1cf67 --- /dev/null +++ b/core/iwasm/common/wasm_loader_common.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2024 Amazon Inc. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ +#include "wasm_loader_common.h" +#include "bh_log.h" +#include "../interpreter/wasm.h" + +static void +set_error_buf(char *error_buf, uint32 error_buf_size, const char *string, + bool is_aot) +{ + if (error_buf != NULL) { + snprintf(error_buf, error_buf_size, "%s module load failed: %s", + is_aot ? "AOT" : "WASM", string); + } +} + +bool +wasm_memory_check_flags(const uint8 mem_flag, char *error_buf, + uint32 error_buf_size, bool is_aot) +{ + /* Check whether certain features indicated by mem_flag are enabled in + * runtime */ + if (mem_flag > MAX_PAGE_COUNT_FLAG) { +#if WASM_ENABLE_SHARED_MEMORY == 0 + if (mem_flag & SHARED_MEMORY_FLAG) { + LOG_VERBOSE("shared memory flag was found, please enable shared " + "memory, lib-pthread or lib-wasi-threads"); + set_error_buf(error_buf, error_buf_size, "invalid limits flags", + is_aot); + return false; + } +#endif +#if WASM_ENABLE_MEMORY64 == 0 + if (mem_flag & MEMORY64_FLAG) { + LOG_VERBOSE("memory64 flag was found, please enable memory64"); + set_error_buf(error_buf, error_buf_size, "invalid limits flags", + is_aot); + return false; + } +#endif + } + + if (mem_flag > MAX_PAGE_COUNT_FLAG + SHARED_MEMORY_FLAG + MEMORY64_FLAG) { + set_error_buf(error_buf, error_buf_size, "invalid limits flags", + is_aot); + return false; + } + else if ((mem_flag & SHARED_MEMORY_FLAG) + && !(mem_flag & MAX_PAGE_COUNT_FLAG)) { + set_error_buf(error_buf, error_buf_size, + "shared memory must have maximum", is_aot); + return false; + } + + return true; +} diff --git a/core/iwasm/common/wasm_loader_common.h b/core/iwasm/common/wasm_loader_common.h new file mode 100644 index 00000000..05808176 --- /dev/null +++ b/core/iwasm/common/wasm_loader_common.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2024 Amazon Inc. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _WASM_LOADER_COMMON_H +#define _WASM_LOADER_COMMON_H + +#include "platform_common.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +wasm_memory_check_flags(const uint8 mem_flag, char *error_buf, + uint32 error_buf_size, bool is_aot); + +#ifdef __cplusplus +} +#endif + +#endif /* end of _WASM_LOADER_COMMON_H */ \ No newline at end of file diff --git a/core/iwasm/common/wasm_memory.c b/core/iwasm/common/wasm_memory.c index c49c1825..f47215f2 100644 --- a/core/iwasm/common/wasm_memory.c +++ b/core/iwasm/common/wasm_memory.c @@ -1005,15 +1005,7 @@ wasm_allocate_linear_memory(uint8 **data, bool is_shared_memory, page_size = os_getpagesize(); *memory_data_size = init_page_count * num_bytes_per_page; -#if WASM_ENABLE_MEMORY64 != 0 - if (is_memory64) { - bh_assert(*memory_data_size <= MAX_LINEAR_MEM64_MEMORY_SIZE); - } - else -#endif - { - bh_assert(*memory_data_size <= MAX_LINEAR_MEMORY_SIZE); - } + bh_assert(*memory_data_size <= GET_MAX_LINEAR_MEMORY_SIZE(is_memory64)); *memory_data_size = align_as_and_cast(*memory_data_size, page_size); if (map_size > 0) { diff --git a/core/iwasm/common/wasm_runtime_common.h b/core/iwasm/common/wasm_runtime_common.h index dc41f72d..f38eb717 100644 --- a/core/iwasm/common/wasm_runtime_common.h +++ b/core/iwasm/common/wasm_runtime_common.h @@ -362,6 +362,9 @@ LOAD_I16(void *addr) #define SHARED_MEMORY_UNLOCK(memory) (void)0 #endif +#define CLAMP_U64_TO_U32(value) \ + ((value) > UINT32_MAX ? UINT32_MAX : (uint32)(value)) + typedef struct WASMModuleCommon { /* Module type, for module loaded from WASM bytecode binary, this field is Wasm_Module_Bytecode, and this structure should diff --git a/core/iwasm/compilation/aot_compiler.c b/core/iwasm/compilation/aot_compiler.c index 694307ae..5c66e0fe 100644 --- a/core/iwasm/compilation/aot_compiler.c +++ b/core/iwasm/compilation/aot_compiler.c @@ -84,37 +84,46 @@ read_leb(const uint8 *buf, const uint8 *buf_end, uint32 *p_offset, } /* NOLINTNEXTLINE */ -#define read_leb_uint32(p, p_end, res) \ - do { \ - uint32 off = 0; \ - uint64 res64; \ - if (!read_leb(p, p_end, &off, 32, false, &res64)) \ - return false; \ - p += off; \ - res = (uint32)res64; \ +#define read_leb_generic(p, p_end, res, res_type, sign) \ + do { \ + uint32 off = 0; \ + uint64 res64; \ + if (!read_leb(p, p_end, &off, sizeof(res_type) << 3, sign, &res64)) \ + return false; \ + p += off; \ + res = (res_type)res64; \ } while (0) /* NOLINTNEXTLINE */ -#define read_leb_int32(p, p_end, res) \ - do { \ - uint32 off = 0; \ - uint64 res64; \ - if (!read_leb(p, p_end, &off, 32, true, &res64)) \ - return false; \ - p += off; \ - res = (int32)res64; \ - } while (0) +#define read_leb_int32(p, p_end, res) \ + read_leb_generic(p, p_end, res, int32, true) /* NOLINTNEXTLINE */ -#define read_leb_int64(p, p_end, res) \ - do { \ - uint32 off = 0; \ - uint64 res64; \ - if (!read_leb(p, p_end, &off, 64, true, &res64)) \ - return false; \ - p += off; \ - res = (int64)res64; \ +#define read_leb_int64(p, p_end, res) \ + read_leb_generic(p, p_end, res, int64, true) + +/* NOLINTNEXTLINE */ +#define read_leb_uint32(p, p_end, res) \ + read_leb_generic(p, p_end, res, uint32, false) + +/* NOLINTNEXTLINE */ +#define read_leb_uint64(p, p_end, res) \ + read_leb_generic(p, p_end, res, uint64, false) + +/* NOLINTNEXTLINE */ +#if WASM_ENABLE_MEMORY64 != 0 +#define read_leb_mem_offset(p, p_end, res) \ + do { \ + if (IS_MEMORY64) { \ + read_leb_uint64(p, p_end, res); \ + } \ + else { \ + read_leb_uint32(p, p_end, res); \ + } \ } while (0) +#else +#define read_leb_mem_offset read_leb_uint32 +#endif /** * Since wamrc uses a full feature Wasm loader, @@ -135,6 +144,13 @@ aot_validate_wasm(AOTCompContext *comp_ctx) } } +#if WASM_ENABLE_MEMORY64 != 0 + if (comp_ctx->pointer_size < sizeof(uint64) && IS_MEMORY64) { + aot_set_last_error("Compiling wasm64 to 32bit platform is not allowed"); + return false; + } +#endif + return true; } @@ -933,7 +949,8 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) uint16 result_count; uint32 br_depth, *br_depths, br_count; uint32 func_idx, type_idx, mem_idx, local_idx, global_idx, i; - uint32 bytes = 4, align, offset; + uint32 bytes = 4, align; + mem_offset_t offset; uint32 type_index; bool sign = true; int32 i32_const; @@ -1892,7 +1909,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) sign = (opcode == WASM_OP_I32_LOAD16_S) ? true : false; op_i32_load: read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align, offset, bytes, sign, false)) return false; @@ -1918,7 +1935,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) sign = (opcode == WASM_OP_I64_LOAD32_S) ? true : false; op_i64_load: read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align, offset, bytes, sign, false)) return false; @@ -1926,14 +1943,14 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case WASM_OP_F32_LOAD: read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_op_f32_load(comp_ctx, func_ctx, align, offset)) return false; break; case WASM_OP_F64_LOAD: read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_op_f64_load(comp_ctx, func_ctx, align, offset)) return false; break; @@ -1948,7 +1965,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) bytes = 2; op_i32_store: read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align, offset, bytes, false)) return false; @@ -1967,7 +1984,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) bytes = 4; op_i64_store: read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align, offset, bytes, false)) return false; @@ -1975,7 +1992,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case WASM_OP_F32_STORE: read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_op_f32_store(comp_ctx, func_ctx, align, offset)) return false; @@ -1983,7 +2000,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case WASM_OP_F64_STORE: read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_op_f64_store(comp_ctx, func_ctx, align, offset)) return false; @@ -2540,7 +2557,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) if (opcode != WASM_OP_ATOMIC_FENCE) { read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); } switch (opcode) { case WASM_OP_ATOMIC_WAIT32: @@ -2705,7 +2722,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case SIMD_v128_load: { read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_simd_v128_load(comp_ctx, func_ctx, align, offset)) return false; @@ -2720,7 +2737,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case SIMD_v128_load32x2_u: { read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_simd_load_extend( comp_ctx, func_ctx, opcode, align, offset)) return false; @@ -2733,7 +2750,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case SIMD_v128_load64_splat: { read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_simd_load_splat(comp_ctx, func_ctx, opcode, align, offset)) return false; @@ -2743,7 +2760,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case SIMD_v128_store: { read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_simd_v128_store(comp_ctx, func_ctx, align, offset)) return false; @@ -3006,7 +3023,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case SIMD_v128_load64_lane: { read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_simd_load_lane(comp_ctx, func_ctx, opcode, align, offset, *frame_ip++)) @@ -3020,7 +3037,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case SIMD_v128_store64_lane: { read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_simd_store_lane(comp_ctx, func_ctx, opcode, align, offset, *frame_ip++)) @@ -3032,7 +3049,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) case SIMD_v128_load64_zero: { read_leb_uint32(frame_ip, frame_ip_end, align); - read_leb_uint32(frame_ip, frame_ip_end, offset); + read_leb_mem_offset(frame_ip, frame_ip_end, offset); if (!aot_compile_simd_load_zero(comp_ctx, func_ctx, opcode, align, offset)) return false; diff --git a/core/iwasm/compilation/aot_compiler.h b/core/iwasm/compilation/aot_compiler.h index 08e9db7b..f9016ac6 100644 --- a/core/iwasm/compilation/aot_compiler.h +++ b/core/iwasm/compilation/aot_compiler.h @@ -519,6 +519,15 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type) wasm_runtime_free(aot_value); \ } while (0) +#if WASM_ENABLE_MEMORY64 != 0 +#define IS_MEMORY64 \ + (comp_ctx->comp_data->memories[0].memory_flags & MEMORY64_FLAG) +#define MEMORY64_COND_VALUE(VAL_IF_ENABLED, VAL_IF_DISABLED) \ + (IS_MEMORY64 ? VAL_IF_ENABLED : VAL_IF_DISABLED) +#else +#define MEMORY64_COND_VALUE(VAL_IF_ENABLED, VAL_IF_DISABLED) (VAL_IF_DISABLED) +#endif + #define POP_I32(v) POP(v, VALUE_TYPE_I32) #define POP_I64(v) POP(v, VALUE_TYPE_I64) #define POP_F32(v) POP(v, VALUE_TYPE_F32) @@ -527,6 +536,10 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type) #define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF) #define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF) #define POP_GC_REF(v) POP(v, VALUE_TYPE_GC_REF) +#define POP_MEM_OFFSET(v) \ + POP(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32)) +#define POP_PAGE_COUNT(v) \ + POP(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32)) #define POP_COND(llvm_value) \ do { \ @@ -590,6 +603,8 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type) #define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF) #define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF) #define PUSH_GC_REF(v) PUSH(v, VALUE_TYPE_GC_REF) +#define PUSH_PAGE_COUNT(v) \ + PUSH(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32)) #define TO_LLVM_TYPE(wasm_type) \ wasm_type_to_llvm_type(comp_ctx, &comp_ctx->basic_types, wasm_type) diff --git a/core/iwasm/compilation/aot_emit_memory.c b/core/iwasm/compilation/aot_emit_memory.c index 4582cdd0..a506f2a7 100644 --- a/core/iwasm/compilation/aot_emit_memory.c +++ b/core/iwasm/compilation/aot_emit_memory.c @@ -38,6 +38,20 @@ #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block) +static bool +zero_extend_u64(AOTCompContext *comp_ctx, LLVMValueRef *value, const char *name) +{ + if (comp_ctx->pointer_size == sizeof(uint64)) { + /* zero extend to uint64 if the target is 64-bit */ + *value = LLVMBuildZExt(comp_ctx->builder, *value, I64_TYPE, name); + if (!*value) { + aot_set_last_error("llvm build zero extend failed."); + return false; + } + } + return true; +} + static LLVMValueRef get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 bytes) @@ -82,9 +96,10 @@ get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx); LLVMValueRef aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 offset, uint32 bytes, bool enable_segue) + mem_offset_t offset, uint32 bytes, bool enable_segue) { - LLVMValueRef offset_const = I32_CONST(offset); + LLVMValueRef offset_const = + MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset)); LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp; LLVMValueRef mem_base_addr, mem_check_bound; LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder); @@ -94,17 +109,27 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, bool is_target_64bit, is_local_of_aot_value = false; #if WASM_ENABLE_SHARED_MEMORY != 0 bool is_shared_memory = - comp_ctx->comp_data->memories[0].memory_flags & 0x02; + comp_ctx->comp_data->memories[0].memory_flags & SHARED_MEMORY_FLAG; #endif is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false; if (comp_ctx->is_indirect_mode - && aot_intrinsic_check_capability(comp_ctx, "i32.const")) { + && aot_intrinsic_check_capability( + comp_ctx, MEMORY64_COND_VALUE("i64.const", "i32.const"))) { WASMValue wasm_value; - wasm_value.i32 = offset; +#if WASM_ENABLE_MEMORY64 != 0 + if (IS_MEMORY64) { + wasm_value.i64 = offset; + } + else +#endif + { + wasm_value.i32 = (int32)offset; + } offset_const = aot_load_const_from_table( - comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_I32); + comp_ctx, func_ctx->native_symbol, &wasm_value, + MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32)); if (!offset_const) { return NULL; } @@ -139,7 +164,7 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, local_idx_of_aot_value = aot_value_top->local_idx; } - POP_I32(addr); + POP_MEM_OFFSET(addr); /* * Note: not throw the integer-overflow-exception here since it must @@ -158,7 +183,7 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, if (mem_offset + bytes <= mem_data_size) { /* inside memory space */ if (comp_ctx->pointer_size == sizeof(uint64)) - offset1 = I64_CONST((uint32)mem_offset); + offset1 = I64_CONST(mem_offset); else offset1 = I32_CONST((uint32)mem_offset); CHECK_LLVM_CONST(offset1); @@ -206,7 +231,8 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) { goto fail; } - BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero"); + BUILD_ICMP(LLVMIntEQ, mem_size, + MEMORY64_COND_VALUE(I64_ZERO, I32_ZERO), cmp, "is_zero"); ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ"); LLVMMoveBasicBlockAfter(check_succ, block_curr); if (!aot_emit_exception(comp_ctx, func_ctx, @@ -412,8 +438,8 @@ fail: bool aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset, uint32 bytes, bool sign, - bool atomic) + uint32 align, mem_offset_t offset, uint32 bytes, + bool sign, bool atomic) { LLVMValueRef maddr, value = NULL; LLVMTypeRef data_type; @@ -482,8 +508,8 @@ fail: bool aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset, uint32 bytes, bool sign, - bool atomic) + uint32 align, mem_offset_t offset, uint32 bytes, + bool sign, bool atomic) { LLVMValueRef maddr, value = NULL; LLVMTypeRef data_type; @@ -560,7 +586,7 @@ fail: bool aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset) + uint32 align, mem_offset_t offset) { LLVMValueRef maddr, value; bool enable_segue = comp_ctx->enable_segue_f32_load; @@ -583,7 +609,7 @@ fail: bool aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset) + uint32 align, mem_offset_t offset) { LLVMValueRef maddr, value; bool enable_segue = comp_ctx->enable_segue_f64_load; @@ -606,7 +632,8 @@ fail: bool aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset, uint32 bytes, bool atomic) + uint32 align, mem_offset_t offset, uint32 bytes, + bool atomic) { LLVMValueRef maddr, value; bool enable_segue = comp_ctx->enable_segue_i32_store; @@ -656,7 +683,8 @@ fail: bool aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset, uint32 bytes, bool atomic) + uint32 align, mem_offset_t offset, uint32 bytes, + bool atomic) { LLVMValueRef maddr, value; bool enable_segue = comp_ctx->enable_segue_i64_store; @@ -713,7 +741,7 @@ fail: bool aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset) + uint32 align, mem_offset_t offset) { LLVMValueRef maddr, value; bool enable_segue = comp_ctx->enable_segue_f32_store; @@ -736,7 +764,7 @@ fail: bool aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset) + uint32 align, mem_offset_t offset) { LLVMValueRef maddr, value; bool enable_segue = comp_ctx->enable_segue_f64_store; @@ -774,7 +802,8 @@ get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx) } } - return mem_size; + return LLVMBuildIntCast(comp_ctx->builder, mem_size, + MEMORY64_COND_VALUE(I64_TYPE, I32_TYPE), ""); fail: return NULL; } @@ -785,7 +814,7 @@ aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx) LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx); if (mem_size) - PUSH_I32(mem_size); + PUSH_PAGE_COUNT(mem_size); return mem_size ? true : false; fail: return false; @@ -798,11 +827,14 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx) LLVMValueRef delta, param_values[2], ret_value, func, value; LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type; int32 func_index; +#if WASM_ENABLE_MEMORY64 != 0 + LLVMValueRef u32_max, u32_cmp_result; +#endif if (!mem_size) return false; - POP_I32(delta); + POP_PAGE_COUNT(delta); /* Function type of aot_enlarge_memory() */ param_types[0] = INT8_PTR_TYPE; @@ -854,7 +886,7 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx) /* Call function aot_enlarge_memory() */ param_values[0] = func_ctx->aot_inst; - param_values[1] = delta; + param_values[1] = LLVMBuildTrunc(comp_ctx->builder, delta, I32_TYPE, ""); if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func, param_values, 2, "call"))) { aot_set_last_error("llvm build call failed."); @@ -862,15 +894,26 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx) } BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret"); +#if WASM_ENABLE_MEMORY64 != 0 + if (IS_MEMORY64) { + if (!(u32_max = I64_CONST(UINT32_MAX))) { + aot_set_last_error("llvm build const failed"); + return false; + } + BUILD_ICMP(LLVMIntULE, delta, u32_max, u32_cmp_result, "page_size_cmp"); + BUILD_OP(And, ret_value, u32_cmp_result, ret_value, "and"); + } +#endif - /* ret_value = ret_value == true ? delta : pre_page_count */ - if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value, mem_size, - I32_NEG_ONE, "mem_grow_ret"))) { + /* ret_value = ret_value == true ? pre_page_count : -1 */ + if (!(ret_value = LLVMBuildSelect( + comp_ctx->builder, ret_value, mem_size, + MEMORY64_COND_VALUE(I64_NEG_ONE, I32_NEG_ONE), "mem_grow_ret"))) { aot_set_last_error("llvm build select failed."); return false; } - PUSH_I32(ret_value); + PUSH_PAGE_COUNT(ret_value); return true; fail: return false; @@ -987,13 +1030,17 @@ aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, POP_I32(len); POP_I32(offset); - POP_I32(dst); + POP_MEM_OFFSET(dst); + + if (!zero_extend_u64(comp_ctx, &dst, "dst64")) { + return false; + } param_types[0] = INT8_PTR_TYPE; param_types[1] = I32_TYPE; param_types[2] = I32_TYPE; param_types[3] = I32_TYPE; - param_types[4] = I32_TYPE; + param_types[4] = SIZE_T_TYPE; ret_type = INT8_TYPE; if (comp_ctx->is_jit_mode) @@ -1080,9 +1127,9 @@ aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx) LLVMValueRef src, dst, src_addr, dst_addr, len, res; bool call_aot_memmove = false; - POP_I32(len); - POP_I32(src); - POP_I32(dst); + POP_MEM_OFFSET(len); + POP_MEM_OFFSET(src); + POP_MEM_OFFSET(dst); if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len))) return false; @@ -1090,13 +1137,8 @@ aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx) if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len))) return false; - if (comp_ctx->pointer_size == sizeof(uint64)) { - /* zero extend to uint64 if the target is 64-bit */ - len = LLVMBuildZExt(comp_ctx->builder, len, I64_TYPE, "len64"); - if (!len) { - aot_set_last_error("llvm build zero extend failed."); - return false; - } + if (!zero_extend_u64(comp_ctx, &len, "len64")) { + return false; } call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode; @@ -1174,20 +1216,15 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx) LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type; LLVMValueRef func, params[3]; - POP_I32(len); + POP_MEM_OFFSET(len); POP_I32(val); - POP_I32(dst); + POP_MEM_OFFSET(dst); if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len))) return false; - if (comp_ctx->pointer_size == sizeof(uint64)) { - /* zero extend to uint64 if the target is 64-bit */ - len = LLVMBuildZExt(comp_ctx->builder, len, I64_TYPE, "len64"); - if (!len) { - aot_set_last_error("llvm build zero extend failed."); - return false; - } + if (!zero_extend_u64(comp_ctx, &len, "len64")) { + return false; } param_types[0] = INT8_PTR_TYPE; @@ -1251,7 +1288,7 @@ fail: bool aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint8 atomic_op, uint8 op_type, uint32 align, - uint32 offset, uint32 bytes) + mem_offset_t offset, uint32 bytes) { LLVMValueRef maddr, value, result; bool enable_segue = (op_type == VALUE_TYPE_I32) @@ -1337,7 +1374,7 @@ fail: bool aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint8 op_type, - uint32 align, uint32 offset, uint32 bytes) + uint32 align, mem_offset_t offset, uint32 bytes) { LLVMValueRef maddr, value, expect, result; bool enable_segue = (op_type == VALUE_TYPE_I32) @@ -1442,7 +1479,7 @@ fail: bool aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 op_type, uint32 align, uint32 offset, + uint8 op_type, uint32 align, mem_offset_t offset, uint32 bytes) { LLVMValueRef maddr, value, timeout, expect, cmp; @@ -1534,7 +1571,7 @@ fail: bool aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align, - uint32 offset, uint32 bytes) + mem_offset_t offset, uint32 bytes) { LLVMValueRef maddr, value, count; LLVMValueRef param_values[3], ret_value, func; diff --git a/core/iwasm/compilation/aot_emit_memory.h b/core/iwasm/compilation/aot_emit_memory.h index e174aa3d..f1828f17 100644 --- a/core/iwasm/compilation/aot_emit_memory.h +++ b/core/iwasm/compilation/aot_emit_memory.h @@ -17,43 +17,43 @@ extern "C" { bool aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset, uint32 bytes, bool sign, - bool atomic); + uint32 align, mem_offset_t offset, uint32 bytes, + bool sign, bool atomic); bool aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset, uint32 bytes, bool sign, - bool atomic); + uint32 align, mem_offset_t offset, uint32 bytes, + bool sign, bool atomic); bool aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset); + uint32 align, mem_offset_t offset); bool aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset); + uint32 align, mem_offset_t offset); bool aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset, uint32 bytes, + uint32 align, mem_offset_t offset, uint32 bytes, bool atomic); bool aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset, uint32 bytes, + uint32 align, mem_offset_t offset, uint32 bytes, bool atomic); bool aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset); + uint32 align, mem_offset_t offset); bool aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset); + uint32 align, mem_offset_t offset); LLVMValueRef aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 offset, uint32 bytes, bool enable_segue); + mem_offset_t offset, uint32 bytes, bool enable_segue); bool aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx); @@ -89,22 +89,22 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx); bool aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint8 atomic_op, uint8 op_type, uint32 align, - uint32 offset, uint32 bytes); + mem_offset_t offset, uint32 bytes); bool aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint8 op_type, - uint32 align, uint32 offset, uint32 bytes); + uint32 align, mem_offset_t offset, uint32 bytes); bool aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 op_type, uint32 align, uint32 offset, + uint8 op_type, uint32 align, mem_offset_t offset, uint32 bytes); bool aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align, - uint32 offset, uint32 bytes); + mem_offset_t offset, uint32 bytes); bool aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, diff --git a/core/iwasm/compilation/simd/simd_load_store.c b/core/iwasm/compilation/simd/simd_load_store.c index 0e869727..45829f08 100644 --- a/core/iwasm/compilation/simd/simd_load_store.c +++ b/core/iwasm/compilation/simd/simd_load_store.c @@ -13,7 +13,7 @@ /* data_length in bytes */ static LLVMValueRef simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align, - uint32 offset, uint32 data_length, LLVMTypeRef ptr_type, + mem_offset_t offset, uint32 data_length, LLVMTypeRef ptr_type, LLVMTypeRef data_type, bool enable_segue) { LLVMValueRef maddr, data; @@ -42,7 +42,7 @@ simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align, bool aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset) + uint32 align, mem_offset_t offset) { bool enable_segue = comp_ctx->enable_segue_v128_load; LLVMTypeRef v128_ptr_type = enable_segue ? V128_PTR_TYPE_GS : V128_PTR_TYPE; @@ -62,7 +62,7 @@ fail: bool aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset) + uint8 opcode, uint32 align, mem_offset_t offset) { LLVMValueRef sub_vector, result; uint32 opcode_index = opcode - SIMD_v128_load8x8_s; @@ -117,7 +117,7 @@ aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, bool aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset) + uint8 opcode, uint32 align, mem_offset_t offset) { uint32 opcode_index = opcode - SIMD_v128_load8_splat; LLVMValueRef element, result; @@ -173,7 +173,7 @@ aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, bool aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset, + uint8 opcode, uint32 align, mem_offset_t offset, uint8 lane_id) { LLVMValueRef element, vector; @@ -218,7 +218,7 @@ aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, bool aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset) + uint8 opcode, uint32 align, mem_offset_t offset) { LLVMValueRef element, result, mask; uint32 opcode_index = opcode - SIMD_v128_load32_zero; @@ -308,7 +308,7 @@ simd_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align, bool aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset) + uint32 align, mem_offset_t offset) { bool enable_segue = comp_ctx->enable_segue_v128_store; LLVMTypeRef v128_ptr_type = enable_segue ? V128_PTR_TYPE_GS : V128_PTR_TYPE; @@ -324,7 +324,7 @@ fail: bool aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset, + uint8 opcode, uint32 align, mem_offset_t offset, uint8 lane_id) { LLVMValueRef element, vector; diff --git a/core/iwasm/compilation/simd/simd_load_store.h b/core/iwasm/compilation/simd/simd_load_store.h index fd118ec1..7a98cbb6 100644 --- a/core/iwasm/compilation/simd/simd_load_store.h +++ b/core/iwasm/compilation/simd/simd_load_store.h @@ -14,32 +14,32 @@ extern "C" { bool aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset); + uint32 align, mem_offset_t offset); bool aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset); + uint8 opcode, uint32 align, mem_offset_t offset); bool aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset); + uint8 opcode, uint32 align, mem_offset_t offset); bool aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset, + uint8 opcode, uint32 align, mem_offset_t offset, uint8 lane_id); bool aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset); + uint8 opcode, uint32 align, mem_offset_t offset); bool aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint32 align, uint32 offset); + uint32 align, mem_offset_t offset); bool aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, - uint8 opcode, uint32 align, uint32 offset, + uint8 opcode, uint32 align, mem_offset_t offset, uint8 lane_id); #ifdef __cplusplus diff --git a/core/iwasm/interpreter/wasm.h b/core/iwasm/interpreter/wasm.h index 5401153a..d56ef3b3 100644 --- a/core/iwasm/interpreter/wasm.h +++ b/core/iwasm/interpreter/wasm.h @@ -500,8 +500,10 @@ typedef struct WASMTable { #if WASM_ENABLE_MEMORY64 != 0 typedef uint64 mem_offset_t; +#define PR_MEM_OFFSET PRIu64 #else typedef uint32 mem_offset_t; +#define PR_MEM_OFFSET PRIu32 #endif typedef struct WASMMemory { diff --git a/core/iwasm/interpreter/wasm_loader.c b/core/iwasm/interpreter/wasm_loader.c index 8af0fd9f..fd447ec3 100644 --- a/core/iwasm/interpreter/wasm_loader.c +++ b/core/iwasm/interpreter/wasm_loader.c @@ -9,6 +9,7 @@ #include "wasm.h" #include "wasm_opcode.h" #include "wasm_runtime.h" +#include "wasm_loader_common.h" #include "../common/wasm_native.h" #include "../common/wasm_memory.h" #if WASM_ENABLE_GC != 0 @@ -2755,43 +2756,6 @@ check_memory_max_size(bool is_memory64, uint32 init_size, uint32 max_size, return true; } -static bool -check_memory_flag(const uint8 mem_flag, char *error_buf, uint32 error_buf_size) -{ - /* Check whether certain features indicated by mem_flag are enabled in - * runtime */ - if (mem_flag > MAX_PAGE_COUNT_FLAG) { -#if WASM_ENABLE_SHARED_MEMORY == 0 - if (mem_flag & SHARED_MEMORY_FLAG) { - LOG_VERBOSE("shared memory flag was found, please enable shared " - "memory, lib-pthread or lib-wasi-threads"); - set_error_buf(error_buf, error_buf_size, "invalid limits flags"); - return false; - } -#endif -#if WASM_ENABLE_MEMORY64 == 0 - if (mem_flag & MEMORY64_FLAG) { - LOG_VERBOSE("memory64 flag was found, please enable memory64"); - set_error_buf(error_buf, error_buf_size, "invalid limits flags"); - return false; - } -#endif - } - - if (mem_flag > MAX_PAGE_COUNT_FLAG + SHARED_MEMORY_FLAG + MEMORY64_FLAG) { - set_error_buf(error_buf, error_buf_size, "invalid limits flags"); - return false; - } - else if ((mem_flag & SHARED_MEMORY_FLAG) - && !(mem_flag & MAX_PAGE_COUNT_FLAG)) { - set_error_buf(error_buf, error_buf_size, - "shared memory must have maximum"); - return false; - } - - return true; -} - static bool load_memory_import(const uint8 **p_buf, const uint8 *buf_end, WASMModule *parent_module, const char *sub_module_name, @@ -2824,7 +2788,7 @@ load_memory_import(const uint8 **p_buf, const uint8 *buf_end, return false; } - if (!check_memory_flag(mem_flag, error_buf, error_buf_size)) { + if (!wasm_memory_check_flags(mem_flag, error_buf, error_buf_size, false)) { return false; } @@ -3226,7 +3190,8 @@ load_memory(const uint8 **p_buf, const uint8 *buf_end, WASMMemory *memory, return false; } - if (!check_memory_flag(memory->flags, error_buf, error_buf_size)) { + if (!wasm_memory_check_flags(memory->flags, error_buf, error_buf_size, + false)) { return false; } @@ -14762,9 +14727,9 @@ re_scan: goto fail; } - read_leb_uint32(p, p_end, mem_offset); /* offset */ + read_leb_mem_offset(p, p_end, mem_offset); /* offset */ - POP_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_V128); + POP_AND_PUSH(mem_offset_type, VALUE_TYPE_V128); #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 func->has_memory_operations = true; #endif @@ -14781,10 +14746,10 @@ re_scan: goto fail; } - read_leb_uint32(p, p_end, mem_offset); /* offset */ + read_leb_mem_offset(p, p_end, mem_offset); /* offset */ POP_V128(); - POP_I32(); + POP_MEM_OFFSET(); #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 func->has_memory_operations = true; #endif @@ -14999,7 +14964,7 @@ re_scan: goto fail; } - read_leb_uint32(p, p_end, mem_offset); /* offset */ + read_leb_mem_offset(p, p_end, mem_offset); /* offset */ CHECK_BUF(p, p_end, 1); lane = read_uint8(p); @@ -15009,7 +14974,7 @@ re_scan: } POP_V128(); - POP_I32(); + POP_MEM_OFFSET(); if (opcode1 < SIMD_v128_store8_lane) { PUSH_V128(); } @@ -15030,9 +14995,9 @@ re_scan: goto fail; } - read_leb_uint32(p, p_end, mem_offset); /* offset */ + read_leb_mem_offset(p, p_end, mem_offset); /* offset */ - POP_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_V128); + POP_AND_PUSH(mem_offset_type, VALUE_TYPE_V128); #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 func->has_memory_operations = true; #endif diff --git a/core/iwasm/interpreter/wasm_mini_loader.c b/core/iwasm/interpreter/wasm_mini_loader.c index ff37d818..efdb06bc 100644 --- a/core/iwasm/interpreter/wasm_mini_loader.c +++ b/core/iwasm/interpreter/wasm_mini_loader.c @@ -11,6 +11,7 @@ #include "wasm_runtime.h" #include "../common/wasm_native.h" #include "../common/wasm_memory.h" +#include "wasm_loader_common.h" #if WASM_ENABLE_FAST_JIT != 0 #include "../fast-jit/jit_compiler.h" #include "../fast-jit/jit_codecache.h" @@ -714,38 +715,6 @@ load_table_import(const uint8 **p_buf, const uint8 *buf_end, return true; } -static bool -check_memory_flag(const uint8 mem_flag) -{ - /* Check whether certain features indicated by mem_flag are enabled in - * runtime */ - if (mem_flag > MAX_PAGE_COUNT_FLAG) { -#if WASM_ENABLE_SHARED_MEMORY == 0 - if (mem_flag & SHARED_MEMORY_FLAG) { - LOG_VERBOSE("shared memory flag was found, please enable shared " - "memory, lib-pthread or lib-wasi-threads"); - return false; - } -#endif -#if WASM_ENABLE_MEMORY64 == 0 - if (mem_flag & MEMORY64_FLAG) { - LOG_VERBOSE("memory64 flag was found, please enable memory64"); - return false; - } -#endif - } - - if (mem_flag > MAX_PAGE_COUNT_FLAG + SHARED_MEMORY_FLAG + MEMORY64_FLAG) { - return false; - } - else if ((mem_flag & SHARED_MEMORY_FLAG) - && !(mem_flag & MAX_PAGE_COUNT_FLAG)) { - return false; - } - - return true; -} - static bool load_memory_import(const uint8 **p_buf, const uint8 *buf_end, WASMModule *parent_module, const char *sub_module_name, @@ -766,7 +735,9 @@ load_memory_import(const uint8 **p_buf, const uint8 *buf_end, uint32 declare_max_page_count = 0; read_leb_uint32(p, p_end, mem_flag); - bh_assert(check_memory_flag(mem_flag)); + if (!wasm_memory_check_flags(mem_flag, error_buf, error_buf_size, false)) { + return false; + } #if WASM_ENABLE_APP_FRAMEWORK == 0 is_memory64 = mem_flag & MEMORY64_FLAG; @@ -796,7 +767,6 @@ load_memory_import(const uint8 **p_buf, const uint8 *buf_end, memory->num_bytes_per_page = DEFAULT_NUM_BYTES_PER_PAGE; *p_buf = p; - (void)check_memory_flag; return true; } @@ -891,7 +861,10 @@ load_memory(const uint8 **p_buf, const uint8 *buf_end, WASMMemory *memory, read_leb_uint32(p, p_end, memory->flags); bh_assert(p - p_org <= 1); (void)p_org; - bh_assert(check_memory_flag(memory->flags)); + if (!wasm_memory_check_flags(memory->flags, error_buf, error_buf_size, + false)) { + return false; + } #if WASM_ENABLE_APP_FRAMEWORK == 0 is_memory64 = memory->flags & MEMORY64_FLAG; @@ -916,7 +889,6 @@ load_memory(const uint8 **p_buf, const uint8 *buf_end, WASMMemory *memory, memory->num_bytes_per_page = DEFAULT_NUM_BYTES_PER_PAGE; *p_buf = p; - (void)check_memory_flag; return true; } diff --git a/core/iwasm/interpreter/wasm_runtime.c b/core/iwasm/interpreter/wasm_runtime.c index c0e325b5..0a952224 100644 --- a/core/iwasm/interpreter/wasm_runtime.c +++ b/core/iwasm/interpreter/wasm_runtime.c @@ -1408,19 +1408,23 @@ execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env, #endif WASMExecEnv *exec_env_created = NULL; WASMModuleInstanceCommon *module_inst_old = NULL; - uint32 argv[3], argc; + union { + uint32 u32[3]; + uint64 u64; + } argv; + uint32 argc; bool ret; #if WASM_ENABLE_MEMORY64 != 0 bool is_memory64 = module_inst->memories[0]->is_memory64; if (is_memory64) { argc = 2; - PUT_I64_TO_ADDR(&argv[0], size); + PUT_I64_TO_ADDR(&argv.u64, size); } else #endif { argc = 1; - argv[0] = (uint32)size; + argv.u32[0] = (uint32)size; } /* if __retain is exported, then this module is compiled by @@ -1431,7 +1435,7 @@ execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env, /* the malloc function from assemblyscript is: function __new(size: usize, id: u32) id = 0 means this is an ArrayBuffer object */ - argv[argc] = 0; + argv.u32[argc] = 0; argc++; } @@ -1472,10 +1476,10 @@ execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env, } } - ret = wasm_call_function(exec_env, malloc_func, argc, argv); + ret = wasm_call_function(exec_env, malloc_func, argc, argv.u32); if (retain_func && ret) - ret = wasm_call_function(exec_env, retain_func, 1, argv); + ret = wasm_call_function(exec_env, retain_func, 1, argv.u32); if (module_inst_old) /* Restore the existing exec_env's module inst */ @@ -1487,11 +1491,11 @@ execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env, if (ret) { #if WASM_ENABLE_MEMORY64 != 0 if (is_memory64) - *p_result = GET_I64_FROM_ADDR(&argv[0]); + *p_result = GET_I64_FROM_ADDR(&argv.u64); else #endif { - *p_result = argv[0]; + *p_result = argv.u32[0]; } } return ret; @@ -1506,18 +1510,22 @@ execute_free_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env, #endif WASMExecEnv *exec_env_created = NULL; WASMModuleInstanceCommon *module_inst_old = NULL; - uint32 argv[2], argc; + union { + uint32 u32[2]; + uint64 u64; + } argv; + uint32 argc; bool ret; #if WASM_ENABLE_MEMORY64 != 0 if (module_inst->memories[0]->is_memory64) { - PUT_I64_TO_ADDR(&argv[0], offset); + PUT_I64_TO_ADDR(&argv.u64, offset); argc = 2; } else #endif { - argv[0] = (uint32)offset; + argv.u32[0] = (uint32)offset; argc = 1; } @@ -1558,7 +1566,7 @@ execute_free_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env, } } - ret = wasm_call_function(exec_env, free_func, argc, argv); + ret = wasm_call_function(exec_env, free_func, argc, argv.u32); if (module_inst_old) /* Restore the existing exec_env's module inst */ @@ -4176,7 +4184,7 @@ fail: #if WASM_ENABLE_BULK_MEMORY != 0 bool llvm_jit_memory_init(WASMModuleInstance *module_inst, uint32 seg_index, - uint32 offset, uint32 len, uint32 dst) + uint32 offset, uint32 len, size_t dst) { WASMMemoryInstance *memory_inst; WASMModule *module; @@ -4211,7 +4219,7 @@ llvm_jit_memory_init(WASMModuleInstance *module_inst, uint32 seg_index, (WASMModuleInstanceCommon *)module_inst, (uint64)dst); SHARED_MEMORY_LOCK(memory_inst); - bh_memcpy_s(maddr, (uint32)(memory_inst->memory_data_size - dst), + bh_memcpy_s(maddr, CLAMP_U64_TO_U32(memory_inst->memory_data_size - dst), data + offset, len); SHARED_MEMORY_UNLOCK(memory_inst); return true; diff --git a/core/iwasm/interpreter/wasm_runtime.h b/core/iwasm/interpreter/wasm_runtime.h index e60d4f7c..8ff34ee3 100644 --- a/core/iwasm/interpreter/wasm_runtime.h +++ b/core/iwasm/interpreter/wasm_runtime.h @@ -760,7 +760,7 @@ llvm_jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx, uint32 argc, #if WASM_ENABLE_BULK_MEMORY != 0 bool llvm_jit_memory_init(WASMModuleInstance *module_inst, uint32 seg_index, - uint32 offset, uint32 len, uint32 dst); + uint32 offset, uint32 len, size_t dst); bool llvm_jit_data_drop(WASMModuleInstance *module_inst, uint32 seg_index); diff --git a/doc/build_wamr.md b/doc/build_wamr.md index e8b6ad47..f1c32f44 100644 --- a/doc/build_wamr.md +++ b/doc/build_wamr.md @@ -79,7 +79,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM #### **Enable memory64 feature** - **WAMR_BUILD_MEMORY64**=1/0, default to disable if not set -> Note: Currently, the memory64 feature is only supported in classic interpreter running mode. +> Note: Currently, the memory64 feature is only supported in classic interpreter running mode and AOT mode. #### **Enable thread manager** - **WAMR_BUILD_THREAD_MGR**=1/0, default to disable if not set diff --git a/product-mini/platforms/alios-things/aos.mk b/product-mini/platforms/alios-things/aos.mk index 3f25cb98..947a4a91 100644 --- a/product-mini/platforms/alios-things/aos.mk +++ b/product-mini/platforms/alios-things/aos.mk @@ -111,6 +111,7 @@ $(NAME)_SOURCES := ${SHARED_ROOT}/platform/alios/alios_platform.c \ ${IWASM_ROOT}/common/wasm_runtime_common.c \ ${IWASM_ROOT}/common/wasm_native.c \ ${IWASM_ROOT}/common/wasm_exec_env.c \ + ${IWASM_ROOT}/common/wasm_loader_common.c \ ${IWASM_ROOT}/common/wasm_memory.c \ ${IWASM_ROOT}/common/wasm_c_api.c \ ${IWASM_ROOT}/common/arch/${INVOKE_NATIVE} \ diff --git a/product-mini/platforms/nuttx/wamr.mk b/product-mini/platforms/nuttx/wamr.mk index 7aac0e35..75bd69be 100644 --- a/product-mini/platforms/nuttx/wamr.mk +++ b/product-mini/platforms/nuttx/wamr.mk @@ -451,6 +451,7 @@ CSRCS += nuttx_platform.c \ wasm_runtime_common.c \ wasm_native.c \ wasm_exec_env.c \ + wasm_loader_common.c \ wasm_memory.c \ wasm_c_api.c diff --git a/tests/wamr-test-suites/spec-test-script/runtest.py b/tests/wamr-test-suites/spec-test-script/runtest.py index 13229d97..3c32481f 100755 --- a/tests/wamr-test-suites/spec-test-script/runtest.py +++ b/tests/wamr-test-suites/spec-test-script/runtest.py @@ -1119,9 +1119,6 @@ def compile_wasm_to_aot(wasm_tempfile, aot_tempfile, runner, opts, r, output = ' cmd.append("--enable-gc") cmd.append("--enable-tail-call") - if opts.memory64: - cmd.append("--enable-memory64") - if output == 'object': cmd.append("--format=object") elif output == 'ir': @@ -1134,9 +1131,10 @@ def compile_wasm_to_aot(wasm_tempfile, aot_tempfile, runner, opts, r, output = ' # Bounds checks is disabled by default for 64-bit targets, to # use the hardware based bounds checks. But it is not supported - # in QEMU with NuttX. - # Enable bounds checks explicitly for all targets if running in QEMU. - if opts.qemu: + # in QEMU with NuttX and in memory64 mode. + # Enable bounds checks explicitly for all targets if running in QEMU or all targets + # running in memory64 mode. + if opts.qemu or opts.memory64: cmd.append("--bounds-checks=1") # RISCV64 requires -mcmodel=medany, which can be set by --size-level=1 diff --git a/tests/wamr-test-suites/spec-test-script/thread_proposal_remove_memory64_flag_case.patch b/tests/wamr-test-suites/spec-test-script/thread_proposal_remove_memory64_flag_case.patch new file mode 100644 index 00000000..0ed07e3d --- /dev/null +++ b/tests/wamr-test-suites/spec-test-script/thread_proposal_remove_memory64_flag_case.patch @@ -0,0 +1,20 @@ +diff --git a/test/core/binary.wast b/test/core/binary.wast +index b9fa438c..08ecee27 100644 +--- a/test/core/binary.wast ++++ b/test/core/binary.wast +@@ -621,15 +621,6 @@ + ) + + ;; Malformed memory limits flag +-(assert_malformed +- (module binary +- "\00asm" "\01\00\00\00" +- "\05\03\01" ;; memory section with one entry +- "\04" ;; malformed memory limits flag +- "\00" ;; min 0 +- ) +- "malformed limits flags" +-) + (assert_malformed + (module binary + "\00asm" "\01\00\00\00" diff --git a/tests/wamr-test-suites/test_wamr.sh b/tests/wamr-test-suites/test_wamr.sh index b9b298a5..0b27df46 100755 --- a/tests/wamr-test-suites/test_wamr.sh +++ b/tests/wamr-test-suites/test_wamr.sh @@ -409,6 +409,19 @@ function setup_wabt() fi } +function compile_reference_interpreter() +{ + echo "compile the reference intepreter" + pushd interpreter + make + if [ $? -ne 0 ] + then + echo "Failed to compile the reference interpreter" + exit 1 + fi + popd +} + # TODO: with iwasm only function spec_test() { @@ -457,6 +470,7 @@ function spec_test() git apply ../../spec-test-script/thread_proposal_ignore_cases.patch git apply ../../spec-test-script/thread_proposal_fix_atomic_case.patch + git apply ../../spec-test-script/thread_proposal_remove_memory64_flag_case.patch fi if [ ${ENABLE_EH} == 1 ]; then @@ -500,10 +514,7 @@ function spec_test() git apply ../../spec-test-script/gc_nuttx_tail_call.patch fi - echo "compile the reference intepreter" - pushd interpreter - make - popd + compile_reference_interpreter fi # update memory64 cases @@ -519,14 +530,11 @@ function spec_test() git restore . && git clean -ffd . # Reset to commit: "Merge remote-tracking branch 'upstream/main' into merge2" git reset --hard 48e69f394869c55b7bbe14ac963c09f4605490b6 - git checkout 044d0d2e77bdcbe891f7e0b9dd2ac01d56435f0b -- test/core/elem.wast + git checkout 044d0d2e77bdcbe891f7e0b9dd2ac01d56435f0b -- test/core/elem.wast test/core/data.wast git apply ../../spec-test-script/ignore_cases.patch git apply ../../spec-test-script/memory64.patch - echo "compile the reference intepreter" - pushd interpreter - make - popd + compile_reference_interpreter fi popd diff --git a/wamr-compiler/CMakeLists.txt b/wamr-compiler/CMakeLists.txt index 6a3f9752..245b4a03 100644 --- a/wamr-compiler/CMakeLists.txt +++ b/wamr-compiler/CMakeLists.txt @@ -46,6 +46,7 @@ add_definitions(-DWASM_ENABLE_DUMP_CALL_STACK=1) add_definitions(-DWASM_ENABLE_PERF_PROFILING=1) add_definitions(-DWASM_ENABLE_LOAD_CUSTOM_SECTION=1) add_definitions(-DWASM_ENABLE_MODULE_INST_CONTEXT=1) +add_definitions(-DWASM_ENABLE_MEMORY64=1) add_definitions(-DWASM_ENABLE_GC=1)