Add support for multi-memory proposal in classic interpreter (#3742)

Implement multi-memory for classic-interpreter. Support core spec (and bulk memory) opcodes now,
and will support atomic opcodes, and add multi-memory export APIs in the future. 

PS: Multi-memory spec test patched a lot for linking test to adapt for multi-module implementation.
This commit is contained in:
Wenyong Huang
2024-08-21 12:22:23 +08:00
committed by GitHub
parent f4383a9e62
commit 1329e1d3e1
22 changed files with 1658 additions and 262 deletions

View File

@ -670,6 +670,16 @@ wasm_get_default_memory(WASMModuleInstance *module_inst)
return NULL;
}
WASMMemoryInstance *
wasm_get_memory_with_idx(WASMModuleInstance *module_inst, uint32 index)
{
bh_assert(index < module_inst->memory_count);
if (module_inst->memories)
return module_inst->memories[index];
else
return NULL;
}
void
wasm_runtime_set_mem_bound_check_bytes(WASMMemoryInstance *memory,
uint64 memory_data_size)
@ -747,9 +757,14 @@ wasm_mmap_linear_memory(uint64_t map_size, uint64 commit_size)
}
bool
wasm_enlarge_memory_internal(WASMModuleInstance *module, uint32 inc_page_count)
wasm_enlarge_memory_internal(WASMModuleInstance *module, uint32 inc_page_count,
uint32 memidx)
{
#if WASM_ENABLE_MULTI_MEMORY != 0
WASMMemoryInstance *memory = wasm_get_memory_with_idx(module, memidx);
#else
WASMMemoryInstance *memory = wasm_get_default_memory(module);
#endif
uint8 *memory_data_old, *memory_data_new, *heap_data_old;
uint32 num_bytes_per_page, heap_size;
uint32 cur_page_count, max_page_count, total_page_count;
@ -960,7 +975,7 @@ wasm_enlarge_memory(WASMModuleInstance *module, uint32 inc_page_count)
if (module->memory_count > 0)
shared_memory_lock(module->memories[0]);
#endif
ret = wasm_enlarge_memory_internal(module, inc_page_count);
ret = wasm_enlarge_memory_internal(module, inc_page_count, 0);
#if WASM_ENABLE_SHARED_MEMORY != 0
if (module->memory_count > 0)
shared_memory_unlock(module->memories[0]);
@ -969,6 +984,25 @@ wasm_enlarge_memory(WASMModuleInstance *module, uint32 inc_page_count)
return ret;
}
bool
wasm_enlarge_memory_with_idx(WASMModuleInstance *module, uint32 inc_page_count,
uint32 memidx)
{
bool ret = false;
#if WASM_ENABLE_SHARED_MEMORY != 0
if (memidx < module->memory_count)
shared_memory_lock(module->memories[memidx]);
#endif
ret = wasm_enlarge_memory_internal(module, inc_page_count, memidx);
#if WASM_ENABLE_SHARED_MEMORY != 0
if (memidx < module->memory_count)
shared_memory_unlock(module->memories[memidx]);
#endif
return ret;
}
void
wasm_deallocate_linear_memory(WASMMemoryInstance *memory_inst)
{

View File

@ -181,15 +181,36 @@ static RunningMode runtime_running_mode = Mode_Default;
of signal handler */
static os_thread_local_attribute WASMExecEnv *exec_env_tls = NULL;
static bool
is_sig_addr_in_guard_pages(void *sig_addr, WASMModuleInstance *module_inst)
{
WASMMemoryInstance *memory_inst;
uint8 *mapped_mem_start_addr = NULL;
uint8 *mapped_mem_end_addr = NULL;
uint32 i;
for (i = 0; i < module_inst->memory_count; ++i) {
/* To be compatible with multi memory, get the ith memory instance */
memory_inst = wasm_get_memory_with_idx(module_inst, i);
mapped_mem_start_addr = memory_inst->memory_data;
mapped_mem_end_addr = memory_inst->memory_data + 8 * (uint64)BH_GB;
if (mapped_mem_start_addr <= (uint8 *)sig_addr
&& (uint8 *)sig_addr < mapped_mem_end_addr) {
/* The address which causes segmentation fault is inside
the memory instance's guard regions */
return true;
}
}
return false;
}
#ifndef BH_PLATFORM_WINDOWS
static void
runtime_signal_handler(void *sig_addr)
{
WASMModuleInstance *module_inst;
WASMMemoryInstance *memory_inst;
WASMJmpBuf *jmpbuf_node;
uint8 *mapped_mem_start_addr = NULL;
uint8 *mapped_mem_end_addr = NULL;
uint32 page_size = os_getpagesize();
#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
uint8 *stack_min_addr;
@ -201,23 +222,13 @@ runtime_signal_handler(void *sig_addr)
&& (jmpbuf_node = exec_env_tls->jmpbuf_stack_top)) {
/* Get mapped mem info of current instance */
module_inst = (WASMModuleInstance *)exec_env_tls->module_inst;
/* Get the default memory instance */
memory_inst = wasm_get_default_memory(module_inst);
if (memory_inst) {
mapped_mem_start_addr = memory_inst->memory_data;
mapped_mem_end_addr = memory_inst->memory_data + 8 * (uint64)BH_GB;
}
#if WASM_DISABLE_STACK_HW_BOUND_CHECK == 0
/* Get stack info of current thread */
stack_min_addr = os_thread_get_stack_boundary();
#endif
if (memory_inst
&& (mapped_mem_start_addr <= (uint8 *)sig_addr
&& (uint8 *)sig_addr < mapped_mem_end_addr)) {
/* The address which causes segmentation fault is inside
the memory instance's guard regions */
if (is_sig_addr_in_guard_pages(sig_addr, module_inst)) {
wasm_set_exception(module_inst, "out of bounds memory access");
os_longjmp(jmpbuf_node->jmpbuf, 1);
}
@ -340,16 +351,7 @@ runtime_exception_handler(EXCEPTION_POINTERS *exce_info)
&& (jmpbuf_node = exec_env_tls->jmpbuf_stack_top)) {
module_inst = (WASMModuleInstance *)exec_env_tls->module_inst;
if (ExceptionRecord->ExceptionCode == EXCEPTION_ACCESS_VIOLATION) {
/* Get the default memory instance */
memory_inst = wasm_get_default_memory(module_inst);
if (memory_inst) {
mapped_mem_start_addr = memory_inst->memory_data;
mapped_mem_end_addr =
memory_inst->memory_data + 8 * (uint64)BH_GB;
}
if (memory_inst && mapped_mem_start_addr <= (uint8 *)sig_addr
&& (uint8 *)sig_addr < mapped_mem_end_addr) {
if (is_sig_addr_in_guard_pages(sig_addr, module_inst)) {
/* The address which causes segmentation fault is inside
the memory instance's guard regions.
Set exception and let the wasm func continue to run, when