implement atomic opcode in AOT/JIT (#329)

This commit is contained in:
Xu Jun
2020-08-03 11:30:26 +08:00
committed by GitHub
parent cc05f8fb1c
commit 29e45e1527
20 changed files with 1447 additions and 158 deletions

View File

@ -86,6 +86,38 @@ read_leb(const uint8 *buf, const uint8 *buf_end,
res = (int64)res64; \
} while (0)
#define COMPILE_ATOMIC_RMW(OP, NAME) \
case WASM_OP_ATOMIC_RMW_I32_##NAME: \
bytes = 4; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME: \
bytes = 8; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I32_##NAME##8_U: \
bytes = 1; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I32_##NAME##16_U: \
bytes = 2; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##8_U: \
bytes = 1; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##16_U: \
bytes = 2; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##32_U: \
bytes = 4; \
op_type = VALUE_TYPE_I64; \
OP_ATOMIC_##OP: \
bin_op = LLVMAtomicRMWBinOp##OP; \
goto build_atomic_rmw;
static bool
aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
{
@ -286,7 +318,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align, offset,
bytes, sign))
bytes, sign, false))
return false;
break;
@ -312,7 +344,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align, offset,
bytes, sign))
bytes, sign, false))
return false;
break;
@ -341,7 +373,8 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
op_i32_store:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align, offset, bytes))
if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align,
offset, bytes, false))
return false;
break;
@ -359,7 +392,8 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
op_i64_store:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align, offset, bytes))
if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align,
offset, bytes, false))
return false;
break;
@ -810,7 +844,152 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
default:
break;
}
break;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
case WASM_OP_ATOMIC_PREFIX:
{
uint8 bin_op, op_type;
if (frame_ip < frame_ip_end) {
opcode = *frame_ip++;
}
if (opcode != WASM_OP_ATOMIC_FENCE) {
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
}
switch (opcode) {
case WASM_OP_ATOMIC_WAIT32:
if (!aot_compile_op_atomic_wait(comp_ctx, func_ctx, VALUE_TYPE_I32,
align, offset, 4))
return false;
break;
case WASM_OP_ATOMIC_WAIT64:
if (!aot_compile_op_atomic_wait(comp_ctx, func_ctx, VALUE_TYPE_I64,
align, offset, 8))
return false;
break;
case WASM_OP_ATOMIC_NOTIFY:
if (!aot_compiler_op_atomic_notify(comp_ctx, func_ctx, align,
offset, bytes))
return false;
break;
case WASM_OP_ATOMIC_I32_LOAD:
bytes = 4;
goto op_atomic_i32_load;
case WASM_OP_ATOMIC_I32_LOAD8_U:
bytes = 1;
goto op_atomic_i32_load;
case WASM_OP_ATOMIC_I32_LOAD16_U:
bytes = 2;
op_atomic_i32_load:
if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align,
offset, bytes, sign, true))
return false;
break;
case WASM_OP_ATOMIC_I64_LOAD:
bytes = 8;
goto op_atomic_i64_load;
case WASM_OP_ATOMIC_I64_LOAD8_U:
bytes = 1;
goto op_atomic_i64_load;
case WASM_OP_ATOMIC_I64_LOAD16_U:
bytes = 2;
goto op_atomic_i64_load;
case WASM_OP_ATOMIC_I64_LOAD32_U:
bytes = 4;
op_atomic_i64_load:
if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align,
offset, bytes, sign, true))
return false;
break;
case WASM_OP_ATOMIC_I32_STORE:
bytes = 4;
goto op_atomic_i32_store;
case WASM_OP_ATOMIC_I32_STORE8:
bytes = 1;
goto op_atomic_i32_store;
case WASM_OP_ATOMIC_I32_STORE16:
bytes = 2;
op_atomic_i32_store:
if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align,
offset, bytes, true))
return false;
break;
case WASM_OP_ATOMIC_I64_STORE:
bytes = 8;
goto op_atomic_i64_store;
case WASM_OP_ATOMIC_I64_STORE8:
bytes = 1;
goto op_atomic_i64_store;
case WASM_OP_ATOMIC_I64_STORE16:
bytes = 2;
goto op_atomic_i64_store;
case WASM_OP_ATOMIC_I64_STORE32:
bytes = 4;
op_atomic_i64_store:
if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align,
offset, bytes, true))
return false;
break;
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG:
bytes = 4;
op_type = VALUE_TYPE_I32;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG:
bytes = 8;
op_type = VALUE_TYPE_I64;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG8_U:
bytes = 1;
op_type = VALUE_TYPE_I32;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U:
bytes = 2;
op_type = VALUE_TYPE_I32;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG8_U:
bytes = 1;
op_type = VALUE_TYPE_I64;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U:
bytes = 2;
op_type = VALUE_TYPE_I64;
goto op_atomic_cmpxchg;
case WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U:
bytes = 4;
op_type = VALUE_TYPE_I64;
op_atomic_cmpxchg:
if (!aot_compile_op_atomic_cmpxchg(comp_ctx, func_ctx,
op_type, align,
offset, bytes))
return false;
break;
COMPILE_ATOMIC_RMW(Add, ADD);
COMPILE_ATOMIC_RMW(Sub, SUB);
COMPILE_ATOMIC_RMW(And, AND);
COMPILE_ATOMIC_RMW(Or, OR);
COMPILE_ATOMIC_RMW(Xor, XOR);
COMPILE_ATOMIC_RMW(Xchg, XCHG);
build_atomic_rmw:
if (!aot_compile_op_atomic_rmw(comp_ctx, func_ctx,
bin_op, op_type,
align, offset, bytes))
return false;
break;
default:
break;
}
break;
}
#endif /* end of WASM_ENABLE_SHARED_MEMORY */
default:
break;

View File

@ -18,7 +18,8 @@ static char *exce_block_names[] = {
"exce_undefined_element", /* EXCE_UNDEFINED_ELEMENT */
"exce_uninit_element", /* EXCE_UNINITIALIZED_ELEMENT */
"exce_call_unlinked", /* EXCE_CALL_UNLINKED_IMPORT_FUNC */
"exce_native_stack_overflow" /* EXCE_NATIVE_STACK_OVERFLOW */
"exce_native_stack_overflow", /* EXCE_NATIVE_STACK_OVERFLOW */
"exce_unaligned_atomic" /* EXCE_UNALIGNED_ATOMIC */
};
bool

View File

@ -205,7 +205,7 @@ fail:
LLVMSetAlignment(value, 1); \
} while (0)
#define BUILD_TRUNC(data_type) do { \
#define BUILD_TRUNC(value, data_type) do { \
if (!(value = LLVMBuildTrunc(comp_ctx->builder, value, \
data_type, "val_trunc"))){ \
aot_set_last_error("llvm build trunc failed."); \
@ -238,9 +238,79 @@ fail:
} \
} while (0)
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
check_memory_alignment(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
LLVMValueRef addr, uint32 align)
{
LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
LLVMBasicBlockRef check_align_succ;
LLVMValueRef align_mask = I32_CONST(((uint32)1 << align) - 1);
LLVMValueRef res;
CHECK_LLVM_CONST(align_mask);
/* Convert pointer to int */
if (!(addr = LLVMBuildPtrToInt(comp_ctx->builder, addr,
I32_TYPE, "address"))) {
aot_set_last_error("llvm build ptr to int failed.");
goto fail;
}
/* The memory address should be aligned */
BUILD_OP(And, addr, align_mask, res, "and");
BUILD_ICMP(LLVMIntNE, res, I32_ZERO, res, "cmp");
/* Add basic blocks */
ADD_BASIC_BLOCK(check_align_succ, "check_align_succ");
LLVMMoveBasicBlockAfter(check_align_succ, block_curr);
if (!aot_emit_exception(comp_ctx, func_ctx,
EXCE_UNALIGNED_ATOMIC,
true, res, check_align_succ)) {
goto fail;
}
SET_BUILD_POS(check_align_succ);
return true;
fail:
return false;
}
#define BUILD_ATOMIC_LOAD(align) do { \
if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
goto fail; \
} \
if (!(value = LLVMBuildLoad(comp_ctx->builder, maddr, \
"data"))) { \
aot_set_last_error("llvm build load failed."); \
goto fail; \
} \
LLVMSetAlignment(value, 1 << align); \
LLVMSetVolatile(value, true); \
LLVMSetOrdering(value, LLVMAtomicOrderingSequentiallyConsistent); \
} while (0)
#define BUILD_ATOMIC_STORE(align) do { \
LLVMValueRef res; \
if (!(check_memory_alignment(comp_ctx, func_ctx, maddr, align))) { \
goto fail; \
} \
if (!(res = LLVMBuildStore(comp_ctx->builder, value, maddr))) { \
aot_set_last_error("llvm build store failed."); \
goto fail; \
} \
LLVMSetAlignment(res, 1 << align); \
LLVMSetVolatile(res, true); \
LLVMSetOrdering(res, LLVMAtomicOrderingSequentiallyConsistent); \
} while (0)
#endif
bool
aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign)
uint32 align, uint32 offset, uint32 bytes,
bool sign, bool atomic)
{
LLVMValueRef maddr, value = NULL;
@ -250,7 +320,12 @@ aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
switch (bytes) {
case 4:
BUILD_PTR_CAST(INT32_PTR_TYPE);
BUILD_LOAD();
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic)
BUILD_ATOMIC_LOAD(align);
else
#endif
BUILD_LOAD();
break;
case 2:
case 1:
@ -258,11 +333,20 @@ aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
BUILD_PTR_CAST(INT16_PTR_TYPE);
else
BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_LOAD();
if (sign)
BUILD_SIGN_EXT(I32_TYPE);
else
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
BUILD_ATOMIC_LOAD(align);
BUILD_ZERO_EXT(I32_TYPE);
}
else
#endif
{
BUILD_LOAD();
if (sign)
BUILD_SIGN_EXT(I32_TYPE);
else
BUILD_ZERO_EXT(I32_TYPE);
}
break;
default:
bh_assert(0);
@ -277,7 +361,8 @@ fail:
bool
aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign)
uint32 align, uint32 offset, uint32 bytes,
bool sign, bool atomic)
{
LLVMValueRef maddr, value = NULL;
@ -287,7 +372,12 @@ aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
switch (bytes) {
case 8:
BUILD_PTR_CAST(INT64_PTR_TYPE);
BUILD_LOAD();
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic)
BUILD_ATOMIC_LOAD(align);
else
#endif
BUILD_LOAD();
break;
case 4:
case 2:
@ -298,11 +388,20 @@ aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
BUILD_PTR_CAST(INT16_PTR_TYPE);
else
BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_LOAD();
if (sign)
BUILD_SIGN_EXT(I64_TYPE);
else
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
BUILD_ATOMIC_LOAD(align);
BUILD_ZERO_EXT(I64_TYPE);
}
else
#endif
{
BUILD_LOAD();
if (sign)
BUILD_SIGN_EXT(I64_TYPE);
else
BUILD_ZERO_EXT(I64_TYPE);
}
break;
default:
bh_assert(0);
@ -351,7 +450,7 @@ fail:
bool
aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes)
uint32 align, uint32 offset, uint32 bytes, bool atomic)
{
LLVMValueRef maddr, value;
@ -366,18 +465,23 @@ aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
break;
case 2:
BUILD_PTR_CAST(INT16_PTR_TYPE);
BUILD_TRUNC(INT16_TYPE);
BUILD_TRUNC(value, INT16_TYPE);
break;
case 1:
BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_TRUNC(INT8_TYPE);
BUILD_TRUNC(value, INT8_TYPE);
break;
default:
bh_assert(0);
break;
}
BUILD_STORE();
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic)
BUILD_ATOMIC_STORE(align);
else
#endif
BUILD_STORE();
return true;
fail:
return false;
@ -385,7 +489,7 @@ fail:
bool
aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes)
uint32 align, uint32 offset, uint32 bytes, bool atomic)
{
LLVMValueRef maddr, value;
@ -400,22 +504,27 @@ aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
break;
case 4:
BUILD_PTR_CAST(INT32_PTR_TYPE);
BUILD_TRUNC(I32_TYPE);
BUILD_TRUNC(value, I32_TYPE);
break;
case 2:
BUILD_PTR_CAST(INT16_PTR_TYPE);
BUILD_TRUNC(INT16_TYPE);
BUILD_TRUNC(value, INT16_TYPE);
break;
case 1:
BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_TRUNC(INT8_TYPE);
BUILD_TRUNC(value, INT8_TYPE);
break;
default:
bh_assert(0);
break;
}
BUILD_STORE();
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic)
BUILD_ATOMIC_STORE(align);
else
#endif
BUILD_STORE();
return true;
fail:
return false;
@ -603,6 +712,36 @@ fail:
return false;
}
#define GET_AOT_FUNCTION(name, argc) do { \
if (!(func_type = LLVMFunctionType(ret_type, param_types, \
argc, false))) { \
aot_set_last_error("llvm add function type failed."); \
return false; \
} \
if (comp_ctx->is_jit_mode) { \
/* JIT mode, call the function directly */ \
if (!(func_ptr_type = LLVMPointerType(func_type, 0))) { \
aot_set_last_error("llvm add pointer type failed."); \
return false; \
} \
if (!(value = I64_CONST((uint64)(uintptr_t)name)) \
|| !(func = LLVMConstIntToPtr(value, func_ptr_type))) { \
aot_set_last_error("create LLVM value failed."); \
return false; \
} \
} \
else { \
char *func_name = #name; \
/* AOT mode, delcare the function */ \
if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name)) \
&& !(func = LLVMAddFunction(comp_ctx->module, \
func_name, func_type))) { \
aot_set_last_error("llvm add function failed."); \
return false; \
} \
} \
} while (0)
#if WASM_ENABLE_BULK_MEMORY != 0
static LLVMValueRef
@ -691,36 +830,6 @@ fail:
return NULL;
}
#define GET_AOT_FUNCTION(name, argc) do { \
if (!(func_type = LLVMFunctionType(ret_type, param_types, \
argc, false))) { \
aot_set_last_error("llvm add function type failed."); \
return false; \
} \
if (comp_ctx->is_jit_mode) { \
/* JIT mode, call the function directly */ \
if (!(func_ptr_type = LLVMPointerType(func_type, 0))) { \
aot_set_last_error("llvm add pointer type failed."); \
return false; \
} \
if (!(value = I64_CONST((uint64)(uintptr_t)name)) \
|| !(func = LLVMConstIntToPtr(value, func_ptr_type))) { \
aot_set_last_error("create LLVM value failed."); \
return false; \
} \
} \
else { \
char *func_name = #name; \
/* AOT mode, delcare the function */ \
if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name)) \
&& !(func = LLVMAddFunction(comp_ctx->module, \
func_name, func_type))) { \
aot_set_last_error("llvm add function failed."); \
return false; \
} \
} \
} while (0)
bool
aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 seg_index)
@ -810,6 +919,7 @@ aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
seg = I32_CONST(seg_index);
CHECK_LLVM_CONST(seg);
param_types[0] = INT8_PTR_TYPE;
param_types[1] = I32_TYPE;
@ -825,7 +935,10 @@ aot_compile_op_data_drop(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
aot_set_last_error("llvm build call failed.");
return false;
}
return true;
fail:
return false;
}
bool
@ -879,4 +992,308 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
fail:
return false;
}
#endif /* WASM_ENABLE_BULK_MEMORY */
#endif /* end of WASM_ENABLE_BULK_MEMORY */
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset,
uint32 bytes)
{
LLVMValueRef maddr, value, result;
if (op_type == VALUE_TYPE_I32)
POP_I32(value);
else
POP_I64(value);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
return false;
switch (bytes) {
case 8:
BUILD_PTR_CAST(INT64_PTR_TYPE);
break;
case 4:
BUILD_PTR_CAST(INT32_PTR_TYPE);
if (op_type == VALUE_TYPE_I64)
BUILD_TRUNC(value, I32_TYPE);
break;
case 2:
BUILD_PTR_CAST(INT16_PTR_TYPE);
BUILD_TRUNC(value, INT16_TYPE);
break;
case 1:
BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_TRUNC(value, INT8_TYPE);
break;
default:
bh_assert(0);
break;
}
if (!(result =
LLVMBuildAtomicRMW(comp_ctx->builder,
atomic_op, maddr, value,
LLVMAtomicOrderingSequentiallyConsistent, false))) {
goto fail;
}
LLVMSetVolatile(result, true);
if (op_type == VALUE_TYPE_I32) {
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
I32_TYPE, "result_i32"))) {
goto fail;
}
PUSH_I32(result);
}
else {
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
I64_TYPE, "result_i64"))) {
goto fail;
}
PUSH_I64(result);
}
return true;
fail:
return false;
}
bool
aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
LLVMValueRef maddr, value, expect, result;
if (op_type == VALUE_TYPE_I32) {
POP_I32(value);
POP_I32(expect);
}
else {
POP_I64(value);
POP_I64(expect);
}
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
return false;
switch (bytes) {
case 8:
BUILD_PTR_CAST(INT64_PTR_TYPE);
break;
case 4:
BUILD_PTR_CAST(INT32_PTR_TYPE);
if (op_type == VALUE_TYPE_I64) {
BUILD_TRUNC(value, I32_TYPE);
BUILD_TRUNC(expect, I32_TYPE);
}
break;
case 2:
BUILD_PTR_CAST(INT16_PTR_TYPE);
BUILD_TRUNC(value, INT16_TYPE);
BUILD_TRUNC(expect, INT16_TYPE);
break;
case 1:
BUILD_PTR_CAST(INT8_PTR_TYPE);
BUILD_TRUNC(value, INT8_TYPE);
BUILD_TRUNC(expect, INT8_TYPE);
break;
default:
bh_assert(0);
break;
}
if (!(result =
LLVMBuildAtomicCmpXchg(comp_ctx->builder, maddr, expect, value,
LLVMAtomicOrderingSequentiallyConsistent,
LLVMAtomicOrderingSequentiallyConsistent,
false))) {
goto fail;
}
LLVMSetVolatile(result, true);
/* CmpXchg return {i32, i1} structure,
we need to extrack the previous_value from the structure */
if (!(result =
LLVMBuildExtractValue(comp_ctx->builder,
result, 0, "previous_value"))) {
goto fail;
}
if (op_type == VALUE_TYPE_I32) {
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
I32_TYPE, "result_i32"))) {
goto fail;
}
PUSH_I32(result);
}
else {
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
I64_TYPE, "result_i64"))) {
goto fail;
}
PUSH_I64(result);
}
return true;
fail:
return false;
}
bool
aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
LLVMValueRef maddr, value, timeout, expect, cmp;
LLVMValueRef param_values[5], ret_value, func, is_wait64;
LLVMTypeRef param_types[5], ret_type, func_type, func_ptr_type;
LLVMBasicBlockRef wait_fail, wait_success;
LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
POP_I64(timeout);
if (op_type == VALUE_TYPE_I32) {
POP_I32(expect);
is_wait64 = I8_CONST(false);
if (!(expect =
LLVMBuildZExt(comp_ctx->builder, expect,
I64_TYPE, "expect_i64"))) {
goto fail;
}
}
else {
POP_I64(expect);
is_wait64 = I8_CONST(true);
}
CHECK_LLVM_CONST(is_wait64);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
return false;
param_types[0] = INT8_PTR_TYPE;
param_types[1] = INT8_PTR_TYPE;
param_types[2] = I64_TYPE;
param_types[3] = I64_TYPE;
param_types[4] = INT8_TYPE;
ret_type = I32_TYPE;
GET_AOT_FUNCTION(wasm_runtime_atomic_wait, 5);
/* Call function wasm_runtime_atomic_wait() */
param_values[0] = func_ctx->aot_inst;
param_values[1] = maddr;
param_values[2] = expect;
param_values[3] = timeout;
param_values[4] = is_wait64;
if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
param_values, 5, "call"))) {
aot_set_last_error("llvm build call failed.");
return false;
}
BUILD_ICMP(LLVMIntSGT, ret_value, I32_ZERO, cmp, "atomic_wait_ret");
ADD_BASIC_BLOCK(wait_fail, "atomic_wait_fail");
ADD_BASIC_BLOCK(wait_success, "wait_success");
LLVMMoveBasicBlockAfter(wait_fail, block_curr);
LLVMMoveBasicBlockAfter(wait_success, block_curr);
if (!LLVMBuildCondBr(comp_ctx->builder, cmp,
wait_success, wait_fail)) {
aot_set_last_error("llvm build cond br failed.");
goto fail;
}
/* If atomic wait failed, return this function
so the runtime can catch the exception */
LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
if (aot_func_type->result_count) {
switch (aot_func_type->types[aot_func_type->param_count]) {
case VALUE_TYPE_I32:
LLVMBuildRet(comp_ctx->builder, I32_ZERO);
break;
case VALUE_TYPE_I64:
LLVMBuildRet(comp_ctx->builder, I64_ZERO);
break;
case VALUE_TYPE_F32:
LLVMBuildRet(comp_ctx->builder, F32_ZERO);
break;
case VALUE_TYPE_F64:
LLVMBuildRet(comp_ctx->builder, F64_ZERO);
break;
}
}
else {
LLVMBuildRetVoid(comp_ctx->builder);
}
LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
PUSH_I32(ret_value);
return true;
fail:
return false;
}
bool
aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes)
{
LLVMValueRef maddr, value, count;
LLVMValueRef param_values[3], ret_value, func;
LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
POP_I32(count);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
return false;
param_types[0] = INT8_PTR_TYPE;
param_types[1] = INT8_PTR_TYPE;
param_types[2] = I32_TYPE;
ret_type = I32_TYPE;
GET_AOT_FUNCTION(wasm_runtime_atomic_notify, 3);
/* Call function wasm_runtime_atomic_notify() */
param_values[0] = func_ctx->aot_inst;
param_values[1] = maddr;
param_values[2] = count;
if (!(ret_value = LLVMBuildCall(comp_ctx->builder, func,
param_values, 3, "call"))) {
aot_set_last_error("llvm build call failed.");
return false;
}
PUSH_I32(ret_value);
return true;
fail:
return false;
}
#endif /* end of WASM_ENABLE_SHARED_MEMORY */

View File

@ -7,6 +7,9 @@
#define _AOT_EMIT_MEMORY_H_
#include "aot_compiler.h"
#if WASM_ENABLE_SHARED_MEMORY != 0
#include "wasm_shared_memory.h"
#endif
#ifdef __cplusplus
extern "C" {
@ -14,11 +17,13 @@ extern "C" {
bool
aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign);
uint32 align, uint32 offset, uint32 bytes,
bool sign, bool atomic);
bool
aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign);
uint32 align, uint32 offset, uint32 bytes,
bool sign, bool atomic);
bool
aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
@ -30,11 +35,11 @@ aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool
aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes);
uint32 align, uint32 offset, uint32 bytes, bool atomic);
bool
aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes);
uint32 align, uint32 offset, uint32 bytes, bool atomic);
bool
aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
@ -66,6 +71,31 @@ bool
aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset,
uint32 bytes);
bool
aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif