Marcin Kolny
2024-05-13 04:03:38 +01:00
committed by GitHub
parent c85bada2a9
commit fe5e7a9981
26 changed files with 521 additions and 294 deletions

View File

@ -84,37 +84,46 @@ read_leb(const uint8 *buf, const uint8 *buf_end, uint32 *p_offset,
}
/* NOLINTNEXTLINE */
#define read_leb_uint32(p, p_end, res) \
do { \
uint32 off = 0; \
uint64 res64; \
if (!read_leb(p, p_end, &off, 32, false, &res64)) \
return false; \
p += off; \
res = (uint32)res64; \
#define read_leb_generic(p, p_end, res, res_type, sign) \
do { \
uint32 off = 0; \
uint64 res64; \
if (!read_leb(p, p_end, &off, sizeof(res_type) << 3, sign, &res64)) \
return false; \
p += off; \
res = (res_type)res64; \
} while (0)
/* NOLINTNEXTLINE */
#define read_leb_int32(p, p_end, res) \
do { \
uint32 off = 0; \
uint64 res64; \
if (!read_leb(p, p_end, &off, 32, true, &res64)) \
return false; \
p += off; \
res = (int32)res64; \
} while (0)
#define read_leb_int32(p, p_end, res) \
read_leb_generic(p, p_end, res, int32, true)
/* NOLINTNEXTLINE */
#define read_leb_int64(p, p_end, res) \
do { \
uint32 off = 0; \
uint64 res64; \
if (!read_leb(p, p_end, &off, 64, true, &res64)) \
return false; \
p += off; \
res = (int64)res64; \
#define read_leb_int64(p, p_end, res) \
read_leb_generic(p, p_end, res, int64, true)
/* NOLINTNEXTLINE */
#define read_leb_uint32(p, p_end, res) \
read_leb_generic(p, p_end, res, uint32, false)
/* NOLINTNEXTLINE */
#define read_leb_uint64(p, p_end, res) \
read_leb_generic(p, p_end, res, uint64, false)
/* NOLINTNEXTLINE */
#if WASM_ENABLE_MEMORY64 != 0
#define read_leb_mem_offset(p, p_end, res) \
do { \
if (IS_MEMORY64) { \
read_leb_uint64(p, p_end, res); \
} \
else { \
read_leb_uint32(p, p_end, res); \
} \
} while (0)
#else
#define read_leb_mem_offset read_leb_uint32
#endif
/**
* Since wamrc uses a full feature Wasm loader,
@ -135,6 +144,13 @@ aot_validate_wasm(AOTCompContext *comp_ctx)
}
}
#if WASM_ENABLE_MEMORY64 != 0
if (comp_ctx->pointer_size < sizeof(uint64) && IS_MEMORY64) {
aot_set_last_error("Compiling wasm64 to 32bit platform is not allowed");
return false;
}
#endif
return true;
}
@ -933,7 +949,8 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
uint16 result_count;
uint32 br_depth, *br_depths, br_count;
uint32 func_idx, type_idx, mem_idx, local_idx, global_idx, i;
uint32 bytes = 4, align, offset;
uint32 bytes = 4, align;
mem_offset_t offset;
uint32 type_index;
bool sign = true;
int32 i32_const;
@ -1892,7 +1909,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
sign = (opcode == WASM_OP_I32_LOAD16_S) ? true : false;
op_i32_load:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align, offset,
bytes, sign, false))
return false;
@ -1918,7 +1935,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
sign = (opcode == WASM_OP_I64_LOAD32_S) ? true : false;
op_i64_load:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align, offset,
bytes, sign, false))
return false;
@ -1926,14 +1943,14 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case WASM_OP_F32_LOAD:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_f32_load(comp_ctx, func_ctx, align, offset))
return false;
break;
case WASM_OP_F64_LOAD:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_f64_load(comp_ctx, func_ctx, align, offset))
return false;
break;
@ -1948,7 +1965,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
bytes = 2;
op_i32_store:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align, offset,
bytes, false))
return false;
@ -1967,7 +1984,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
bytes = 4;
op_i64_store:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align, offset,
bytes, false))
return false;
@ -1975,7 +1992,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case WASM_OP_F32_STORE:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_f32_store(comp_ctx, func_ctx, align,
offset))
return false;
@ -1983,7 +2000,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case WASM_OP_F64_STORE:
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_f64_store(comp_ctx, func_ctx, align,
offset))
return false;
@ -2540,7 +2557,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
if (opcode != WASM_OP_ATOMIC_FENCE) {
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
}
switch (opcode) {
case WASM_OP_ATOMIC_WAIT32:
@ -2705,7 +2722,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_v128_load(comp_ctx, func_ctx,
align, offset))
return false;
@ -2720,7 +2737,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load32x2_u:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_extend(
comp_ctx, func_ctx, opcode, align, offset))
return false;
@ -2733,7 +2750,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load64_splat:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_splat(comp_ctx, func_ctx,
opcode, align, offset))
return false;
@ -2743,7 +2760,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_store:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_v128_store(comp_ctx, func_ctx,
align, offset))
return false;
@ -3006,7 +3023,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load64_lane:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_lane(comp_ctx, func_ctx,
opcode, align, offset,
*frame_ip++))
@ -3020,7 +3037,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_store64_lane:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_store_lane(comp_ctx, func_ctx,
opcode, align, offset,
*frame_ip++))
@ -3032,7 +3049,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load64_zero:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_zero(comp_ctx, func_ctx,
opcode, align, offset))
return false;

View File

@ -519,6 +519,15 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type)
wasm_runtime_free(aot_value); \
} while (0)
#if WASM_ENABLE_MEMORY64 != 0
#define IS_MEMORY64 \
(comp_ctx->comp_data->memories[0].memory_flags & MEMORY64_FLAG)
#define MEMORY64_COND_VALUE(VAL_IF_ENABLED, VAL_IF_DISABLED) \
(IS_MEMORY64 ? VAL_IF_ENABLED : VAL_IF_DISABLED)
#else
#define MEMORY64_COND_VALUE(VAL_IF_ENABLED, VAL_IF_DISABLED) (VAL_IF_DISABLED)
#endif
#define POP_I32(v) POP(v, VALUE_TYPE_I32)
#define POP_I64(v) POP(v, VALUE_TYPE_I64)
#define POP_F32(v) POP(v, VALUE_TYPE_F32)
@ -527,6 +536,10 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type)
#define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF)
#define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF)
#define POP_GC_REF(v) POP(v, VALUE_TYPE_GC_REF)
#define POP_MEM_OFFSET(v) \
POP(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32))
#define POP_PAGE_COUNT(v) \
POP(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32))
#define POP_COND(llvm_value) \
do { \
@ -590,6 +603,8 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type)
#define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF)
#define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF)
#define PUSH_GC_REF(v) PUSH(v, VALUE_TYPE_GC_REF)
#define PUSH_PAGE_COUNT(v) \
PUSH(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32))
#define TO_LLVM_TYPE(wasm_type) \
wasm_type_to_llvm_type(comp_ctx, &comp_ctx->basic_types, wasm_type)

View File

@ -38,6 +38,20 @@
#define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
static bool
zero_extend_u64(AOTCompContext *comp_ctx, LLVMValueRef *value, const char *name)
{
if (comp_ctx->pointer_size == sizeof(uint64)) {
/* zero extend to uint64 if the target is 64-bit */
*value = LLVMBuildZExt(comp_ctx->builder, *value, I64_TYPE, name);
if (!*value) {
aot_set_last_error("llvm build zero extend failed.");
return false;
}
}
return true;
}
static LLVMValueRef
get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 bytes)
@ -82,9 +96,10 @@ get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
LLVMValueRef
aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 offset, uint32 bytes, bool enable_segue)
mem_offset_t offset, uint32 bytes, bool enable_segue)
{
LLVMValueRef offset_const = I32_CONST(offset);
LLVMValueRef offset_const =
MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset));
LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
LLVMValueRef mem_base_addr, mem_check_bound;
LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
@ -94,17 +109,27 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool is_target_64bit, is_local_of_aot_value = false;
#if WASM_ENABLE_SHARED_MEMORY != 0
bool is_shared_memory =
comp_ctx->comp_data->memories[0].memory_flags & 0x02;
comp_ctx->comp_data->memories[0].memory_flags & SHARED_MEMORY_FLAG;
#endif
is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
if (comp_ctx->is_indirect_mode
&& aot_intrinsic_check_capability(comp_ctx, "i32.const")) {
&& aot_intrinsic_check_capability(
comp_ctx, MEMORY64_COND_VALUE("i64.const", "i32.const"))) {
WASMValue wasm_value;
wasm_value.i32 = offset;
#if WASM_ENABLE_MEMORY64 != 0
if (IS_MEMORY64) {
wasm_value.i64 = offset;
}
else
#endif
{
wasm_value.i32 = (int32)offset;
}
offset_const = aot_load_const_from_table(
comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_I32);
comp_ctx, func_ctx->native_symbol, &wasm_value,
MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32));
if (!offset_const) {
return NULL;
}
@ -139,7 +164,7 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
local_idx_of_aot_value = aot_value_top->local_idx;
}
POP_I32(addr);
POP_MEM_OFFSET(addr);
/*
* Note: not throw the integer-overflow-exception here since it must
@ -158,7 +183,7 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
if (mem_offset + bytes <= mem_data_size) {
/* inside memory space */
if (comp_ctx->pointer_size == sizeof(uint64))
offset1 = I64_CONST((uint32)mem_offset);
offset1 = I64_CONST(mem_offset);
else
offset1 = I32_CONST((uint32)mem_offset);
CHECK_LLVM_CONST(offset1);
@ -206,7 +231,8 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
goto fail;
}
BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero");
BUILD_ICMP(LLVMIntEQ, mem_size,
MEMORY64_COND_VALUE(I64_ZERO, I32_ZERO), cmp, "is_zero");
ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
LLVMMoveBasicBlockAfter(check_succ, block_curr);
if (!aot_emit_exception(comp_ctx, func_ctx,
@ -412,8 +438,8 @@ fail:
bool
aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign,
bool atomic)
uint32 align, mem_offset_t offset, uint32 bytes,
bool sign, bool atomic)
{
LLVMValueRef maddr, value = NULL;
LLVMTypeRef data_type;
@ -482,8 +508,8 @@ fail:
bool
aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign,
bool atomic)
uint32 align, mem_offset_t offset, uint32 bytes,
bool sign, bool atomic)
{
LLVMValueRef maddr, value = NULL;
LLVMTypeRef data_type;
@ -560,7 +586,7 @@ fail:
bool
aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset)
uint32 align, mem_offset_t offset)
{
LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_f32_load;
@ -583,7 +609,7 @@ fail:
bool
aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset)
uint32 align, mem_offset_t offset)
{
LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_f64_load;
@ -606,7 +632,8 @@ fail:
bool
aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool atomic)
uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic)
{
LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_i32_store;
@ -656,7 +683,8 @@ fail:
bool
aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool atomic)
uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic)
{
LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_i64_store;
@ -713,7 +741,7 @@ fail:
bool
aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset)
uint32 align, mem_offset_t offset)
{
LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_f32_store;
@ -736,7 +764,7 @@ fail:
bool
aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset)
uint32 align, mem_offset_t offset)
{
LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_f64_store;
@ -774,7 +802,8 @@ get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
}
}
return mem_size;
return LLVMBuildIntCast(comp_ctx->builder, mem_size,
MEMORY64_COND_VALUE(I64_TYPE, I32_TYPE), "");
fail:
return NULL;
}
@ -785,7 +814,7 @@ aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
if (mem_size)
PUSH_I32(mem_size);
PUSH_PAGE_COUNT(mem_size);
return mem_size ? true : false;
fail:
return false;
@ -798,11 +827,14 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMValueRef delta, param_values[2], ret_value, func, value;
LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
int32 func_index;
#if WASM_ENABLE_MEMORY64 != 0
LLVMValueRef u32_max, u32_cmp_result;
#endif
if (!mem_size)
return false;
POP_I32(delta);
POP_PAGE_COUNT(delta);
/* Function type of aot_enlarge_memory() */
param_types[0] = INT8_PTR_TYPE;
@ -854,7 +886,7 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
/* Call function aot_enlarge_memory() */
param_values[0] = func_ctx->aot_inst;
param_values[1] = delta;
param_values[1] = LLVMBuildTrunc(comp_ctx->builder, delta, I32_TYPE, "");
if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
param_values, 2, "call"))) {
aot_set_last_error("llvm build call failed.");
@ -862,15 +894,26 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
}
BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
#if WASM_ENABLE_MEMORY64 != 0
if (IS_MEMORY64) {
if (!(u32_max = I64_CONST(UINT32_MAX))) {
aot_set_last_error("llvm build const failed");
return false;
}
BUILD_ICMP(LLVMIntULE, delta, u32_max, u32_cmp_result, "page_size_cmp");
BUILD_OP(And, ret_value, u32_cmp_result, ret_value, "and");
}
#endif
/* ret_value = ret_value == true ? delta : pre_page_count */
if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value, mem_size,
I32_NEG_ONE, "mem_grow_ret"))) {
/* ret_value = ret_value == true ? pre_page_count : -1 */
if (!(ret_value = LLVMBuildSelect(
comp_ctx->builder, ret_value, mem_size,
MEMORY64_COND_VALUE(I64_NEG_ONE, I32_NEG_ONE), "mem_grow_ret"))) {
aot_set_last_error("llvm build select failed.");
return false;
}
PUSH_I32(ret_value);
PUSH_PAGE_COUNT(ret_value);
return true;
fail:
return false;
@ -987,13 +1030,17 @@ aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
POP_I32(len);
POP_I32(offset);
POP_I32(dst);
POP_MEM_OFFSET(dst);
if (!zero_extend_u64(comp_ctx, &dst, "dst64")) {
return false;
}
param_types[0] = INT8_PTR_TYPE;
param_types[1] = I32_TYPE;
param_types[2] = I32_TYPE;
param_types[3] = I32_TYPE;
param_types[4] = I32_TYPE;
param_types[4] = SIZE_T_TYPE;
ret_type = INT8_TYPE;
if (comp_ctx->is_jit_mode)
@ -1080,9 +1127,9 @@ aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMValueRef src, dst, src_addr, dst_addr, len, res;
bool call_aot_memmove = false;
POP_I32(len);
POP_I32(src);
POP_I32(dst);
POP_MEM_OFFSET(len);
POP_MEM_OFFSET(src);
POP_MEM_OFFSET(dst);
if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
return false;
@ -1090,13 +1137,8 @@ aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
return false;
if (comp_ctx->pointer_size == sizeof(uint64)) {
/* zero extend to uint64 if the target is 64-bit */
len = LLVMBuildZExt(comp_ctx->builder, len, I64_TYPE, "len64");
if (!len) {
aot_set_last_error("llvm build zero extend failed.");
return false;
}
if (!zero_extend_u64(comp_ctx, &len, "len64")) {
return false;
}
call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
@ -1174,20 +1216,15 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
LLVMValueRef func, params[3];
POP_I32(len);
POP_MEM_OFFSET(len);
POP_I32(val);
POP_I32(dst);
POP_MEM_OFFSET(dst);
if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
return false;
if (comp_ctx->pointer_size == sizeof(uint64)) {
/* zero extend to uint64 if the target is 64-bit */
len = LLVMBuildZExt(comp_ctx->builder, len, I64_TYPE, "len64");
if (!len) {
aot_set_last_error("llvm build zero extend failed.");
return false;
}
if (!zero_extend_u64(comp_ctx, &len, "len64")) {
return false;
}
param_types[0] = INT8_PTR_TYPE;
@ -1251,7 +1288,7 @@ fail:
bool
aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 atomic_op, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
mem_offset_t offset, uint32 bytes)
{
LLVMValueRef maddr, value, result;
bool enable_segue = (op_type == VALUE_TYPE_I32)
@ -1337,7 +1374,7 @@ fail:
bool
aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes)
uint32 align, mem_offset_t offset, uint32 bytes)
{
LLVMValueRef maddr, value, expect, result;
bool enable_segue = (op_type == VALUE_TYPE_I32)
@ -1442,7 +1479,7 @@ fail:
bool
aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 op_type, uint32 align, uint32 offset,
uint8 op_type, uint32 align, mem_offset_t offset,
uint32 bytes)
{
LLVMValueRef maddr, value, timeout, expect, cmp;
@ -1534,7 +1571,7 @@ fail:
bool
aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx, uint32 align,
uint32 offset, uint32 bytes)
mem_offset_t offset, uint32 bytes)
{
LLVMValueRef maddr, value, count;
LLVMValueRef param_values[3], ret_value, func;

View File

@ -17,43 +17,43 @@ extern "C" {
bool
aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign,
bool atomic);
uint32 align, mem_offset_t offset, uint32 bytes,
bool sign, bool atomic);
bool
aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign,
bool atomic);
uint32 align, mem_offset_t offset, uint32 bytes,
bool sign, bool atomic);
bool
aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset);
uint32 align, mem_offset_t offset);
bool
aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset);
uint32 align, mem_offset_t offset);
bool
aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes,
uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic);
bool
aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes,
uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic);
bool
aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset);
uint32 align, mem_offset_t offset);
bool
aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset);
uint32 align, mem_offset_t offset);
LLVMValueRef
aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 offset, uint32 bytes, bool enable_segue);
mem_offset_t offset, uint32 bytes, bool enable_segue);
bool
aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
@ -89,22 +89,22 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 atomic_op, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
mem_offset_t offset, uint32 bytes);
bool
aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes);
uint32 align, mem_offset_t offset, uint32 bytes);
bool
aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 op_type, uint32 align, uint32 offset,
uint8 op_type, uint32 align, mem_offset_t offset,
uint32 bytes);
bool
aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx, uint32 align,
uint32 offset, uint32 bytes);
mem_offset_t offset, uint32 bytes);
bool
aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx,

View File

@ -13,7 +13,7 @@
/* data_length in bytes */
static LLVMValueRef
simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
uint32 offset, uint32 data_length, LLVMTypeRef ptr_type,
mem_offset_t offset, uint32 data_length, LLVMTypeRef ptr_type,
LLVMTypeRef data_type, bool enable_segue)
{
LLVMValueRef maddr, data;
@ -42,7 +42,7 @@ simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
bool
aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset)
uint32 align, mem_offset_t offset)
{
bool enable_segue = comp_ctx->enable_segue_v128_load;
LLVMTypeRef v128_ptr_type = enable_segue ? V128_PTR_TYPE_GS : V128_PTR_TYPE;
@ -62,7 +62,7 @@ fail:
bool
aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset)
uint8 opcode, uint32 align, mem_offset_t offset)
{
LLVMValueRef sub_vector, result;
uint32 opcode_index = opcode - SIMD_v128_load8x8_s;
@ -117,7 +117,7 @@ aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool
aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset)
uint8 opcode, uint32 align, mem_offset_t offset)
{
uint32 opcode_index = opcode - SIMD_v128_load8_splat;
LLVMValueRef element, result;
@ -173,7 +173,7 @@ aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool
aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset,
uint8 opcode, uint32 align, mem_offset_t offset,
uint8 lane_id)
{
LLVMValueRef element, vector;
@ -218,7 +218,7 @@ aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool
aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset)
uint8 opcode, uint32 align, mem_offset_t offset)
{
LLVMValueRef element, result, mask;
uint32 opcode_index = opcode - SIMD_v128_load32_zero;
@ -308,7 +308,7 @@ simd_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
bool
aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset)
uint32 align, mem_offset_t offset)
{
bool enable_segue = comp_ctx->enable_segue_v128_store;
LLVMTypeRef v128_ptr_type = enable_segue ? V128_PTR_TYPE_GS : V128_PTR_TYPE;
@ -324,7 +324,7 @@ fail:
bool
aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset,
uint8 opcode, uint32 align, mem_offset_t offset,
uint8 lane_id)
{
LLVMValueRef element, vector;

View File

@ -14,32 +14,32 @@ extern "C" {
bool
aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset);
uint32 align, mem_offset_t offset);
bool
aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset);
uint8 opcode, uint32 align, mem_offset_t offset);
bool
aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset);
uint8 opcode, uint32 align, mem_offset_t offset);
bool
aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset,
uint8 opcode, uint32 align, mem_offset_t offset,
uint8 lane_id);
bool
aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset);
uint8 opcode, uint32 align, mem_offset_t offset);
bool
aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset);
uint32 align, mem_offset_t offset);
bool
aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset,
uint8 opcode, uint32 align, mem_offset_t offset,
uint8 lane_id);
#ifdef __cplusplus