Implement Fast JIT multi-threading feature (#2134)

- Translate all the opcodes of threads spec proposal for Fast JIT
- Add the atomic flag for Fast JIT load/store IRs to support atomic load/store
- Add new atomic related Fast JIT IRs and translate them in the codegen
- Add suspend_flags check in branch opcodes and before/after call function
- Modify CI to enable Fast JIT multi-threading test

Co-authored-by: TianlongLiang <tianlong.liang@intel.com>
This commit is contained in:
Wenyong Huang
2023-04-20 10:09:34 +08:00
committed by GitHub
parent dfca21d239
commit 7e9bf9cdf5
15 changed files with 2290 additions and 168 deletions

View File

@ -904,6 +904,42 @@ check_copy_arities(const JitBlock *block_dst, JitFrame *jit_frame)
}
}
#if WASM_ENABLE_THREAD_MGR != 0
bool
jit_check_suspend_flags(JitCompContext *cc)
{
JitReg exec_env, suspend_flags, terminate_flag, offset;
JitBasicBlock *terminate_block, *cur_basic_block;
JitFrame *jit_frame = cc->jit_frame;
cur_basic_block = cc->cur_basic_block;
terminate_block = jit_cc_new_basic_block(cc, 0);
if (!terminate_block) {
return false;
}
gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
exec_env = cc->exec_env_reg;
suspend_flags = jit_cc_new_reg_I32(cc);
terminate_flag = jit_cc_new_reg_I32(cc);
offset = jit_cc_new_const_I32(cc, offsetof(WASMExecEnv, suspend_flags));
GEN_INSN(LDI32, suspend_flags, exec_env, offset);
GEN_INSN(AND, terminate_flag, suspend_flags, NEW_CONST(I32, 1));
GEN_INSN(CMP, cc->cmp_reg, terminate_flag, NEW_CONST(I32, 0));
GEN_INSN(BNE, cc->cmp_reg, jit_basic_block_label(terminate_block), 0);
cc->cur_basic_block = terminate_block;
GEN_INSN(RETURN, NEW_CONST(I32, 0));
cc->cur_basic_block = cur_basic_block;
return true;
}
#endif
static bool
handle_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip)
{
@ -986,6 +1022,13 @@ fail:
bool
jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip)
{
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
return false;
#endif
return handle_op_br(cc, br_depth, p_frame_ip)
&& handle_next_reachable_block(cc, p_frame_ip);
}
@ -1105,6 +1148,12 @@ jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth,
jit_insn_delete(insn_select);
}
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
return false;
#endif
SET_BUILDER_POS(if_basic_block);
SET_BB_BEGIN_BCIP(if_basic_block, *p_frame_ip - 1);
@ -1144,6 +1193,12 @@ jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count,
uint32 i = 0;
JitOpndLookupSwitch *opnd = NULL;
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
return false;
#endif
cur_basic_block = cc->cur_basic_block;
POP_I32(value);

View File

@ -5,6 +5,7 @@
#include "jit_emit_function.h"
#include "jit_emit_exception.h"
#include "jit_emit_control.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h"
@ -232,6 +233,12 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
bool is_pointer_arg;
bool return_value = false;
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
if (func_idx < wasm_module->import_function_count) {
/* The function to call is an import function */
func_import = &wasm_module->import_functions[func_idx].u.function;
@ -275,6 +282,12 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
goto fail;
}
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
return true;
}
@ -416,6 +429,12 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
}
}
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
/* Clear part of memory regs and table regs as their values
may be changed in the function call */
if (cc->cur_wasm_module->possible_memory_grow)
@ -540,6 +559,12 @@ jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
GEN_INSN(STI32, func_idx, cc->exec_env_reg,
NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache) + 4));
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
block_import = jit_cc_new_basic_block(cc, 0);
block_nonimport = jit_cc_new_basic_block(cc, 0);
func_return = jit_cc_new_basic_block(cc, 0);
@ -742,6 +767,12 @@ jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
goto fail;
}
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
/* Clear part of memory regs and table regs as their values
may be changed in the function call */
if (cc->cur_wasm_module->possible_memory_grow)

View File

@ -9,6 +9,7 @@
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h"
#include "jit_emit_control.h"
#ifndef OS_ENABLE_HW_BOUND_CHECK
static JitReg
@ -60,6 +61,14 @@ fail:
}
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
static void
set_load_or_store_atomic(JitInsn *load_or_store_inst)
{
load_or_store_inst->flags_u8 |= 0x1;
}
#endif
#if UINTPTR_MAX == UINT64_MAX
static JitReg
check_and_seek_on_64bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
@ -177,23 +186,36 @@ fail:
return 0;
}
#define CHECK_ALIGNMENT(maddr, memory_data, offset1) \
#if UINTPTR_MAX == UINT64_MAX
#define CHECK_ALIGNMENT(offset1) \
do { \
GEN_INSN(ADD, maddr, memory_data, offset1); \
JitReg align_mask = NEW_CONST(I64, ((uint64)1 << align) - 1); \
JitReg AND_res = jit_cc_new_reg_I64(cc); \
GEN_INSN(AND, AND_res, maddr, align_mask); \
GEN_INSN(AND, AND_res, offset1, align_mask); \
GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I64, 0)); \
if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
cc->cmp_reg, NULL)) \
goto fail; \
} while (0)
#else
#define CHECK_ALIGNMENT(offset1) \
do { \
JitReg align_mask = NEW_CONST(I32, (1 << align) - 1); \
JitReg AND_res = jit_cc_new_reg_I32(cc); \
GEN_INSN(AND, AND_res, offset1, align_mask); \
GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I32, 0)); \
if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
cc->cmp_reg, NULL)) \
goto fail; \
} while (0)
#endif
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
JitReg addr, offset1, value, memory_data;
JitInsn *load_insn = NULL;
POP_I32(addr);
@ -201,6 +223,11 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
if (!offset1) {
goto fail;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
CHECK_ALIGNMENT(offset1);
}
#endif
memory_data = get_memory_data_reg(cc->jit_frame, 0);
@ -209,30 +236,30 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
case 1:
{
if (sign) {
GEN_INSN(LDI8, value, memory_data, offset1);
load_insn = GEN_INSN(LDI8, value, memory_data, offset1);
}
else {
GEN_INSN(LDU8, value, memory_data, offset1);
load_insn = GEN_INSN(LDU8, value, memory_data, offset1);
}
break;
}
case 2:
{
if (sign) {
GEN_INSN(LDI16, value, memory_data, offset1);
load_insn = GEN_INSN(LDI16, value, memory_data, offset1);
}
else {
GEN_INSN(LDU16, value, memory_data, offset1);
load_insn = GEN_INSN(LDU16, value, memory_data, offset1);
}
break;
}
case 4:
{
if (sign) {
GEN_INSN(LDI32, value, memory_data, offset1);
load_insn = GEN_INSN(LDI32, value, memory_data, offset1);
}
else {
GEN_INSN(LDU32, value, memory_data, offset1);
load_insn = GEN_INSN(LDU32, value, memory_data, offset1);
}
break;
}
@ -243,6 +270,13 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
}
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic && load_insn)
set_load_or_store_atomic(load_insn);
#else
(void)load_insn;
#endif
PUSH_I32(value);
return true;
fail:
@ -254,6 +288,7 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
JitReg addr, offset1, value, memory_data;
JitInsn *load_insn = NULL;
POP_I32(addr);
@ -261,6 +296,11 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
if (!offset1) {
goto fail;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
CHECK_ALIGNMENT(offset1);
}
#endif
memory_data = get_memory_data_reg(cc->jit_frame, 0);
@ -269,40 +309,40 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
case 1:
{
if (sign) {
GEN_INSN(LDI8, value, memory_data, offset1);
load_insn = GEN_INSN(LDI8, value, memory_data, offset1);
}
else {
GEN_INSN(LDU8, value, memory_data, offset1);
load_insn = GEN_INSN(LDU8, value, memory_data, offset1);
}
break;
}
case 2:
{
if (sign) {
GEN_INSN(LDI16, value, memory_data, offset1);
load_insn = GEN_INSN(LDI16, value, memory_data, offset1);
}
else {
GEN_INSN(LDU16, value, memory_data, offset1);
load_insn = GEN_INSN(LDU16, value, memory_data, offset1);
}
break;
}
case 4:
{
if (sign) {
GEN_INSN(LDI32, value, memory_data, offset1);
load_insn = GEN_INSN(LDI32, value, memory_data, offset1);
}
else {
GEN_INSN(LDU32, value, memory_data, offset1);
load_insn = GEN_INSN(LDU32, value, memory_data, offset1);
}
break;
}
case 8:
{
if (sign) {
GEN_INSN(LDI64, value, memory_data, offset1);
load_insn = GEN_INSN(LDI64, value, memory_data, offset1);
}
else {
GEN_INSN(LDU64, value, memory_data, offset1);
load_insn = GEN_INSN(LDU64, value, memory_data, offset1);
}
break;
}
@ -313,6 +353,13 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
}
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic && load_insn)
set_load_or_store_atomic(load_insn);
#else
(void)load_insn;
#endif
PUSH_I64(value);
return true;
fail:
@ -370,6 +417,7 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
JitReg value, addr, offset1, memory_data;
JitInsn *store_insn = NULL;
POP_I32(value);
POP_I32(addr);
@ -378,23 +426,28 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
if (!offset1) {
goto fail;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
CHECK_ALIGNMENT(offset1);
}
#endif
memory_data = get_memory_data_reg(cc->jit_frame, 0);
switch (bytes) {
case 1:
{
GEN_INSN(STI8, value, memory_data, offset1);
store_insn = GEN_INSN(STI8, value, memory_data, offset1);
break;
}
case 2:
{
GEN_INSN(STI16, value, memory_data, offset1);
store_insn = GEN_INSN(STI16, value, memory_data, offset1);
break;
}
case 4:
{
GEN_INSN(STI32, value, memory_data, offset1);
store_insn = GEN_INSN(STI32, value, memory_data, offset1);
break;
}
default:
@ -403,6 +456,12 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
goto fail;
}
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic && store_insn)
set_load_or_store_atomic(store_insn);
#else
(void)store_insn;
#endif
return true;
fail:
@ -414,6 +473,7 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
JitReg value, addr, offset1, memory_data;
JitInsn *store_insn = NULL;
POP_I64(value);
POP_I32(addr);
@ -422,6 +482,11 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
if (!offset1) {
goto fail;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
CHECK_ALIGNMENT(offset1);
}
#endif
if (jit_reg_is_const(value) && bytes < 8) {
value = NEW_CONST(I32, (int32)jit_cc_get_const_I64(cc, value));
@ -432,22 +497,22 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
switch (bytes) {
case 1:
{
GEN_INSN(STI8, value, memory_data, offset1);
store_insn = GEN_INSN(STI8, value, memory_data, offset1);
break;
}
case 2:
{
GEN_INSN(STI16, value, memory_data, offset1);
store_insn = GEN_INSN(STI16, value, memory_data, offset1);
break;
}
case 4:
{
GEN_INSN(STI32, value, memory_data, offset1);
store_insn = GEN_INSN(STI32, value, memory_data, offset1);
break;
}
case 8:
{
GEN_INSN(STI64, value, memory_data, offset1);
store_insn = GEN_INSN(STI64, value, memory_data, offset1);
break;
}
default:
@ -456,6 +521,12 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
goto fail;
}
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic && store_insn)
set_load_or_store_atomic(store_insn);
#else
(void)store_insn;
#endif
return true;
fail:
@ -774,10 +845,153 @@ fail:
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
#define GEN_AT_RMW_INSN(op, op_type, bytes, result, value, memory_data, \
offset1) \
do { \
switch (bytes) { \
case 1: \
{ \
insn = GEN_INSN(AT_##op##U8, result, value, memory_data, \
offset1); \
break; \
} \
case 2: \
{ \
insn = GEN_INSN(AT_##op##U16, result, value, memory_data, \
offset1); \
break; \
} \
case 4: \
{ \
if (op_type == VALUE_TYPE_I32) \
insn = GEN_INSN(AT_##op##I32, result, value, memory_data, \
offset1); \
else \
insn = GEN_INSN(AT_##op##U32, result, value, memory_data, \
offset1); \
break; \
} \
case 8: \
{ \
insn = GEN_INSN(AT_##op##I64, result, value, memory_data, \
offset1); \
break; \
} \
default: \
{ \
bh_assert(0); \
goto fail; \
} \
} \
} while (0)
bool
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes)
{
JitReg addr, offset1, memory_data, value, result, eax_hreg, rax_hreg,
ebx_hreg, rbx_hreg;
JitInsn *insn = NULL;
bool is_i32 = op_type == VALUE_TYPE_I32;
bool is_logical_op = atomic_op == AtomicRMWBinOpAnd
|| atomic_op == AtomicRMWBinOpOr
|| atomic_op == AtomicRMWBinOpXor;
/* currently we only implement atomic rmw on x86-64 target */
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
/* For atomic logical binary ops, it implicitly uses rax in cmpxchg
* instruction and implicitly uses rbx for storing temp value in the
* generated loop */
eax_hreg = jit_codegen_get_hreg_by_name("eax");
rax_hreg = jit_codegen_get_hreg_by_name("rax");
ebx_hreg = jit_codegen_get_hreg_by_name("ebx");
rbx_hreg = jit_codegen_get_hreg_by_name("rbx");
bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
if (op_type == VALUE_TYPE_I32) {
POP_I32(value);
}
else {
POP_I64(value);
}
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
CHECK_ALIGNMENT(offset1);
memory_data = get_memory_data_reg(cc->jit_frame, 0);
if (op_type == VALUE_TYPE_I32)
result = jit_cc_new_reg_I32(cc);
else
result = jit_cc_new_reg_I64(cc);
switch (atomic_op) {
case AtomicRMWBinOpAdd:
{
GEN_AT_RMW_INSN(ADD, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpSub:
{
GEN_AT_RMW_INSN(SUB, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpAnd:
{
GEN_AT_RMW_INSN(AND, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpOr:
{
GEN_AT_RMW_INSN(OR, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpXor:
{
GEN_AT_RMW_INSN(XOR, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpXchg:
{
GEN_AT_RMW_INSN(XCHG, op_type, bytes, result, value, memory_data,
offset1);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
if (is_logical_op
&& (!insn
|| !jit_lock_reg_in_insn(cc, insn, is_i32 ? eax_hreg : rax_hreg)
|| !jit_lock_reg_in_insn(cc, insn, is_i32 ? ebx_hreg : rbx_hreg))) {
jit_set_last_error(
cc, "generate atomic logical insn or lock ra&rb hreg failed");
goto fail;
}
if (op_type == VALUE_TYPE_I32)
PUSH_I32(result);
else
PUSH_I64(result);
return true;
#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
fail:
return false;
}
@ -785,6 +999,93 @@ bool
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
JitReg addr, offset1, memory_data, value, expect, result;
bool is_i32 = op_type == VALUE_TYPE_I32;
/* currently we only implement atomic cmpxchg on x86-64 target */
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
/* cmpxchg will use register al/ax/eax/rax to store parameter expected
* value, and the read result will also be stored to al/ax/eax/rax */
JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
JitInsn *insn = NULL;
bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
if (is_i32) {
POP_I32(value);
POP_I32(expect);
result = jit_cc_new_reg_I32(cc);
}
else {
POP_I64(value);
POP_I64(expect);
result = jit_cc_new_reg_I64(cc);
}
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
CHECK_ALIGNMENT(offset1);
memory_data = get_memory_data_reg(cc->jit_frame, 0);
GEN_INSN(MOV, is_i32 ? eax_hreg : rax_hreg, expect);
switch (bytes) {
case 1:
{
insn = GEN_INSN(AT_CMPXCHGU8, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
break;
}
case 2:
{
insn = GEN_INSN(AT_CMPXCHGU16, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
break;
}
case 4:
{
if (op_type == VALUE_TYPE_I32)
insn =
GEN_INSN(AT_CMPXCHGI32, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
else
insn =
GEN_INSN(AT_CMPXCHGU32, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
break;
}
case 8:
{
insn = GEN_INSN(AT_CMPXCHGI64, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
if (!insn
|| !jit_lock_reg_in_insn(cc, insn, is_i32 ? eax_hreg : rax_hreg)) {
jit_set_last_error(cc, "generate cmpxchg insn or lock ra hreg failed");
goto fail;
}
GEN_INSN(MOV, result, is_i32 ? eax_hreg : rax_hreg);
if (is_i32)
PUSH_I32(result);
else
PUSH_I64(result);
return true;
#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
fail:
return false;
}
@ -812,8 +1113,10 @@ jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1)
goto fail;
JitReg maddr = jit_cc_new_reg_I64(cc);
CHECK_ALIGNMENT(maddr, memory_data, offset1);
CHECK_ALIGNMENT(offset1);
JitReg maddr = jit_cc_new_reg_ptr(cc);
GEN_INSN(ADD, maddr, memory_data, offset1);
// Prepare `wasm_runtime_atomic_wait` arguments
JitReg res = jit_cc_new_reg_I32(cc);
@ -835,6 +1138,12 @@ jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
goto fail;
PUSH_I32(res);
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
return true;
fail:
return false;
@ -854,8 +1163,10 @@ jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1)
goto fail;
JitReg maddr = jit_cc_new_reg_I64(cc);
CHECK_ALIGNMENT(maddr, memory_data, offset1);
CHECK_ALIGNMENT(offset1);
JitReg maddr = jit_cc_new_reg_ptr(cc);
GEN_INSN(ADD, maddr, memory_data, offset1);
// Prepare `wasm_runtime_atomic_notify` arguments
JitReg res = jit_cc_new_reg_I32(cc);
@ -879,4 +1190,11 @@ jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
fail:
return false;
}
bool
jit_compiler_op_atomic_fence(JitCompContext *cc)
{
GEN_INSN(FENCE);
return true;
}
#endif

View File

@ -80,6 +80,9 @@ jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes);
bool
jit_compiler_op_atomic_fence(JitCompContext *cc);
#endif
#ifdef __cplusplus