Import SIMD feature and add some workload samples (#438)

This commit is contained in:
Wenyong Huang
2020-11-05 18:15:15 +08:00
committed by GitHub
parent 667282eea9
commit a3074df21b
84 changed files with 7780 additions and 318 deletions

View File

@ -14,6 +14,15 @@ aot_get_last_error()
return aot_error[0] == '\0' ? "" : aot_error;
}
void
aot_set_last_error_v(const char *format, ...)
{
va_list args;
va_start(args, format);
vsnprintf(aot_error, sizeof(aot_error), format, args);
va_end(args);
}
void
aot_set_last_error(const char *error)
{

View File

@ -230,6 +230,20 @@ aot_get_last_error();
void
aot_set_last_error(const char *error);
void
aot_set_last_error_v(const char *format, ...);
#if BH_DEBUG == 1
#define HANDLE_FAILURE(callee) do { \
aot_set_last_error_v("call %s failed in %s:%d", (callee),\
__FUNCTION__, __LINE__); \
} while (0)
#else
#define HANDLE_FAILURE(callee) do { \
aot_set_last_error_v("call %s failed", (callee)); \
} while (0)
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif

View File

@ -14,6 +14,18 @@
#include "aot_emit_control.h"
#include "aot_emit_function.h"
#include "aot_emit_parametric.h"
#include "simd/simd_access_lanes.h"
#include "simd/simd_bitmask_extracts.h"
#include "simd/simd_bit_shifts.h"
#include "simd/simd_bitwise_ops.h"
#include "simd/simd_bool_reductions.h"
#include "simd/simd_comparisons.h"
#include "simd/simd_construct_values.h"
#include "simd/simd_conversions.h"
#include "simd/simd_floating_point.h"
#include "simd/simd_int_arith.h"
#include "simd/simd_load_store.h"
#include "simd/simd_sat_int_arith.h"
#include "../aot/aot_runtime.h"
#include "../interpreter/wasm_opcode.h"
#include <errno.h>
@ -163,6 +175,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
|| value_type == VALUE_TYPE_I64
|| value_type == VALUE_TYPE_F32
|| value_type == VALUE_TYPE_F64
|| value_type == VALUE_TYPE_V128
|| value_type == VALUE_TYPE_VOID) {
param_count = 0;
param_types = NULL;
@ -280,12 +293,12 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case WASM_OP_DROP:
if (!aot_compile_op_drop(comp_ctx, func_ctx, true))
return false;
return false;
break;
case WASM_OP_DROP_64:
if (!aot_compile_op_drop(comp_ctx, func_ctx, false))
return false;
return false;
break;
case WASM_OP_SELECT:
@ -761,22 +774,22 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case WASM_OP_I32_REINTERPRET_F32:
if (!aot_compile_op_i32_reinterpret_f32(comp_ctx, func_ctx))
return false;
return false;
break;
case WASM_OP_I64_REINTERPRET_F64:
if (!aot_compile_op_i64_reinterpret_f64(comp_ctx, func_ctx))
return false;
return false;
break;
case WASM_OP_F32_REINTERPRET_I32:
if (!aot_compile_op_f32_reinterpret_i32(comp_ctx, func_ctx))
return false;
return false;
break;
case WASM_OP_F64_REINTERPRET_I64:
if (!aot_compile_op_f64_reinterpret_i64(comp_ctx, func_ctx))
return false;
return false;
break;
case WASM_OP_I32_EXTEND8_S:
@ -1019,6 +1032,722 @@ build_atomic_rmw:
}
#endif /* end of WASM_ENABLE_SHARED_MEMORY */
#if WASM_ENABLE_SIMD != 0
case WASM_OP_SIMD_PREFIX:
{
if (!comp_ctx->enable_simd) {
aot_set_last_error(
"current building does not support SIMD instructions");
return false;
}
opcode = *frame_ip++;
switch (opcode) {
case SIMD_v128_load:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_v128_load(comp_ctx, func_ctx, align, offset))
return false;
break;
}
case SIMD_i16x8_load8x8_s:
case SIMD_i16x8_load8x8_u:
case SIMD_i32x4_load16x4_s:
case SIMD_i32x4_load16x4_u:
case SIMD_i64x2_load32x2_s:
case SIMD_i64x2_load32x2_u:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_extend(comp_ctx, func_ctx,
opcode, align, offset))
return false;
break;
}
case SIMD_v8x16_load_splat:
case SIMD_v16x8_load_splat:
case SIMD_v32x4_load_splat:
case SIMD_v64x2_load_splat:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_splat(comp_ctx, func_ctx,
opcode, align, offset))
return false;
break;
}
case SIMD_v128_store:
{
read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_v128_store(comp_ctx, func_ctx, align, offset))
return false;
break;
}
case SIMD_v128_const:
{
if (!aot_compile_simd_v128_const(comp_ctx, func_ctx, frame_ip))
return false;
frame_ip += 16;
break;
}
case SIMD_v8x16_shuffle:
{
if (!aot_compile_simd_shuffle(comp_ctx, func_ctx, frame_ip))
return false;
frame_ip += 16;
break;
}
case SIMD_v8x16_swizzle:
{
if (!aot_compile_simd_swizzle(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i8x16_splat:
case SIMD_i16x8_splat:
case SIMD_i32x4_splat:
case SIMD_i64x2_splat:
case SIMD_f32x4_splat:
case SIMD_f64x2_splat:
{
if (!aot_compile_simd_splat(comp_ctx, func_ctx, opcode))
return false;
break;
}
case SIMD_i8x16_extract_lane_s:
{
if (!aot_compile_simd_extract_i8x16(comp_ctx, func_ctx, *frame_ip++,
true))
return false;
break;
}
case SIMD_i8x16_extract_lane_u:
{
if (!aot_compile_simd_extract_i8x16(comp_ctx, func_ctx, *frame_ip++,
false))
return false;
break;
}
case SIMD_i16x8_extract_lane_s:
{
if (!aot_compile_simd_extract_i16x8(comp_ctx, func_ctx, *frame_ip++,
true))
return false;
break;
}
case SIMD_i16x8_extract_lane_u:
{
if (!aot_compile_simd_extract_i16x8(comp_ctx, func_ctx, *frame_ip++,
false))
return false;
break;
}
case SIMD_i32x4_extract_lane:
{
if (!aot_compile_simd_extract_i32x4(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_i64x2_extract_lane:
{
if (!aot_compile_simd_extract_i64x2(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_f32x4_extract_lane:
{
if (!aot_compile_simd_extract_f32x4(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_f64x2_extract_lane:
{
if (!aot_compile_simd_extract_f64x2(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_i8x16_replace_lane:
{
if (!aot_compile_simd_replace_i8x16(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_i16x8_replace_lane:
{
if (!aot_compile_simd_replace_i16x8(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_i32x4_replace_lane:
{
if (!aot_compile_simd_replace_i32x4(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_i64x2_replace_lane:
{
if (!aot_compile_simd_replace_i64x2(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_f32x4_replace_lane:
{
if (!aot_compile_simd_replace_f32x4(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_f64x2_replace_lane:
{
if (!aot_compile_simd_replace_f64x2(comp_ctx, func_ctx, *frame_ip++))
return false;
break;
}
case SIMD_i8x16_eq:
case SIMD_i8x16_ne:
case SIMD_i8x16_lt_s:
case SIMD_i8x16_lt_u:
case SIMD_i8x16_gt_s:
case SIMD_i8x16_gt_u:
case SIMD_i8x16_le_s:
case SIMD_i8x16_le_u:
case SIMD_i8x16_ge_s:
case SIMD_i8x16_ge_u:
{
if (!aot_compile_simd_i8x16_compare(comp_ctx, func_ctx,
INT_EQ + opcode - SIMD_i8x16_eq))
return false;
break;
}
case SIMD_i16x8_eq:
case SIMD_i16x8_ne:
case SIMD_i16x8_lt_s:
case SIMD_i16x8_lt_u:
case SIMD_i16x8_gt_s:
case SIMD_i16x8_gt_u:
case SIMD_i16x8_le_s:
case SIMD_i16x8_le_u:
case SIMD_i16x8_ge_s:
case SIMD_i16x8_ge_u:
{
if (!aot_compile_simd_i16x8_compare(comp_ctx, func_ctx,
INT_EQ + opcode - SIMD_i16x8_eq))
return false;
break;
}
case SIMD_i32x4_eq:
case SIMD_i32x4_ne:
case SIMD_i32x4_lt_s:
case SIMD_i32x4_lt_u:
case SIMD_i32x4_gt_s:
case SIMD_i32x4_gt_u:
case SIMD_i32x4_le_s:
case SIMD_i32x4_le_u:
case SIMD_i32x4_ge_s:
case SIMD_i32x4_ge_u:
{
if (!aot_compile_simd_i32x4_compare(comp_ctx, func_ctx,
INT_EQ + opcode - SIMD_i32x4_eq))
return false;
break;
}
case SIMD_f32x4_eq:
case SIMD_f32x4_ne:
case SIMD_f32x4_lt:
case SIMD_f32x4_gt:
case SIMD_f32x4_le:
case SIMD_f32x4_ge:
{
if (!aot_compile_simd_f32x4_compare(comp_ctx, func_ctx,
FLOAT_EQ + opcode - SIMD_f32x4_eq))
return false;
break;
}
case SIMD_f64x2_eq:
case SIMD_f64x2_ne:
case SIMD_f64x2_lt:
case SIMD_f64x2_gt:
case SIMD_f64x2_le:
case SIMD_f64x2_ge:
{
if (!aot_compile_simd_f64x2_compare(comp_ctx, func_ctx,
FLOAT_EQ + opcode - SIMD_f64x2_eq))
return false;
break;
}
case SIMD_v128_not:
case SIMD_v128_and:
case SIMD_v128_andnot:
case SIMD_v128_or:
case SIMD_v128_xor:
case SIMD_v128_bitselect:
{
if (!aot_compile_simd_v128_bitwise(comp_ctx, func_ctx,
V128_NOT + opcode - SIMD_v128_not))
return false;
break;
}
case SIMD_i8x16_add:
case SIMD_i8x16_sub:
{
V128Arithmetic arith_op = (opcode == SIMD_i8x16_add)
? V128_ADD : V128_SUB;
if (!aot_compile_simd_i8x16_arith(comp_ctx, func_ctx, arith_op))
return false;
break;
}
case SIMD_i16x8_add:
case SIMD_i16x8_sub:
case SIMD_i16x8_mul:
{
V128Arithmetic arith_op = V128_ADD;
if (opcode == SIMD_i16x8_sub)
arith_op = V128_SUB;
else if (opcode == SIMD_i16x8_mul)
arith_op = V128_MUL;
if (!aot_compile_simd_i16x8_arith(comp_ctx, func_ctx, arith_op))
return false;
break;
}
case SIMD_i32x4_add:
case SIMD_i32x4_sub:
case SIMD_i32x4_mul:
{
V128Arithmetic arith_op = V128_ADD;
if (opcode == SIMD_i32x4_sub)
arith_op = V128_SUB;
else if (opcode == SIMD_i32x4_mul)
arith_op = V128_MUL;
if (!aot_compile_simd_i32x4_arith(comp_ctx, func_ctx, arith_op))
return false;
break;
}
case SIMD_i64x2_add:
case SIMD_i64x2_sub:
case SIMD_i64x2_mul:
{
V128Arithmetic arith_op = V128_ADD;
if (opcode == SIMD_i64x2_sub)
arith_op = V128_SUB;
else if (opcode == SIMD_i64x2_mul)
arith_op = V128_MUL;
if (!aot_compile_simd_i64x2_arith(comp_ctx, func_ctx, arith_op))
return false;
break;
}
case SIMD_i8x16_neg:
{
if (!aot_compile_simd_i8x16_neg(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i16x8_neg:
{
if (!aot_compile_simd_i16x8_neg(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i32x4_neg:
{
if (!aot_compile_simd_i32x4_neg(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i64x2_neg:
{
if (!aot_compile_simd_i64x2_neg(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i8x16_add_saturate_s:
case SIMD_i8x16_add_saturate_u:
{
if (!aot_compile_simd_i8x16_saturate(comp_ctx, func_ctx, V128_ADD,
opcode == SIMD_i8x16_add_saturate_s
? true : false))
return false;
break;
}
case SIMD_i8x16_sub_saturate_s:
case SIMD_i8x16_sub_saturate_u:
{
if (!aot_compile_simd_i8x16_saturate(comp_ctx, func_ctx, V128_SUB,
opcode == SIMD_i8x16_sub_saturate_s
? true : false))
return false;
break;
}
case SIMD_i16x8_add_saturate_s:
case SIMD_i16x8_add_saturate_u:
{
if (!aot_compile_simd_i16x8_saturate(comp_ctx, func_ctx, V128_ADD,
opcode == SIMD_i16x8_add_saturate_s
? true : false))
return false;
break;
}
case SIMD_i16x8_sub_saturate_s:
case SIMD_i16x8_sub_saturate_u:
{
if (!aot_compile_simd_i16x8_saturate(comp_ctx, func_ctx, V128_SUB,
opcode == SIMD_i16x8_sub_saturate_s
? true : false))
return false;
break;
}
case SIMD_i8x16_min_s:
case SIMD_i8x16_min_u:
{
if (!aot_compile_simd_i8x16_cmp(comp_ctx, func_ctx, V128_MIN,
opcode == SIMD_i8x16_min_s
? true : false))
return false;
break;
}
case SIMD_i8x16_max_s:
case SIMD_i8x16_max_u:
{
if (!aot_compile_simd_i8x16_cmp(comp_ctx, func_ctx, V128_MAX,
opcode == SIMD_i8x16_max_s
? true : false))
return false;
break;
}
case SIMD_i16x8_min_s:
case SIMD_i16x8_min_u:
{
if (!aot_compile_simd_i16x8_cmp(comp_ctx, func_ctx, V128_MIN,
opcode == SIMD_i16x8_min_s
? true : false))
return false;
break;
}
case SIMD_i16x8_max_s:
case SIMD_i16x8_max_u:
{
if (!aot_compile_simd_i16x8_cmp(comp_ctx, func_ctx, V128_MAX,
opcode == SIMD_i16x8_max_s
? true : false))
return false;
break;
}
case SIMD_i32x4_min_s:
case SIMD_i32x4_min_u:
{
if (!aot_compile_simd_i32x4_cmp(comp_ctx, func_ctx, V128_MIN,
opcode == SIMD_i32x4_min_s
? true : false))
return false;
break;
}
case SIMD_i32x4_max_s:
case SIMD_i32x4_max_u:
{
if (!aot_compile_simd_i32x4_cmp(comp_ctx, func_ctx, V128_MAX,
opcode == SIMD_i32x4_max_s
? true : false))
return false;
break;
}
case SIMD_i8x16_abs:
{
if (!aot_compile_simd_i8x16_abs(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i16x8_abs:
{
if (!aot_compile_simd_i16x8_abs(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i32x4_abs:
{
if (!aot_compile_simd_i32x4_abs(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i8x16_avgr_u:
{
if (!aot_compile_simd_i8x16_avgr_u(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i16x8_avgr_u:
{
if (!aot_compile_simd_i16x8_avgr_u(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i8x16_any_true:
{
if (!aot_compile_simd_i8x16_any_true(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i16x8_any_true:
{
if (!aot_compile_simd_i16x8_any_true(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i32x4_any_true:
{
if (!aot_compile_simd_i32x4_any_true(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i8x16_all_true:
{
if (!aot_compile_simd_i8x16_all_true(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i16x8_all_true:
{
if (!aot_compile_simd_i16x8_all_true(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i32x4_all_true:
{
if (!aot_compile_simd_i32x4_all_true(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i8x16_bitmask:
{
if (!aot_compile_simd_i8x16_bitmask(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i16x8_bitmask:
{
if (!aot_compile_simd_i16x8_bitmask(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i32x4_bitmask:
{
if (!aot_compile_simd_i32x4_bitmask(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_i8x16_shl:
case SIMD_i8x16_shr_s:
case SIMD_i8x16_shr_u:
{
if (!aot_compile_simd_i8x16_shift(comp_ctx, func_ctx,
INT_SHL + opcode - SIMD_i8x16_shl))
return false;
break;
}
case SIMD_i16x8_shl:
case SIMD_i16x8_shr_s:
case SIMD_i16x8_shr_u:
{
if (!aot_compile_simd_i16x8_shift(comp_ctx, func_ctx,
INT_SHL + opcode - SIMD_i16x8_shl))
return false;
break;
}
case SIMD_i32x4_shl:
case SIMD_i32x4_shr_s:
case SIMD_i32x4_shr_u:
{
if (!aot_compile_simd_i32x4_shift(comp_ctx, func_ctx,
INT_SHL + opcode - SIMD_i32x4_shl))
return false;
break;
}
case SIMD_i64x2_shl:
case SIMD_i64x2_shr_s:
case SIMD_i64x2_shr_u:
{
if (!aot_compile_simd_i64x2_shift(comp_ctx, func_ctx,
INT_SHL + opcode - SIMD_i64x2_shl))
return false;
break;
}
case SIMD_i8x16_narrow_i16x8_s:
case SIMD_i8x16_narrow_i16x8_u:
{
bool is_signed = (opcode == SIMD_i8x16_narrow_i16x8_s)
? true : false;
if (!aot_compile_simd_i8x16_narrow_i16x8(comp_ctx, func_ctx,
is_signed))
return false;
break;
}
case SIMD_i16x8_narrow_i32x4_s:
case SIMD_i16x8_narrow_i32x4_u:
{
bool is_signed = (opcode == SIMD_i16x8_narrow_i32x4_s)
? true : false;
if (!aot_compile_simd_i16x8_narrow_i32x4(comp_ctx, func_ctx,
is_signed))
return false;
break;
}
case SIMD_i16x8_widen_low_i8x16_s:
case SIMD_i16x8_widen_high_i8x16_s:
{
bool is_low = (opcode == SIMD_i16x8_widen_low_i8x16_s)
? true : false;
if (!aot_compile_simd_i16x8_widen_i8x16(comp_ctx, func_ctx,
is_low, true))
return false;
break;
}
case SIMD_i16x8_widen_low_i8x16_u:
case SIMD_i16x8_widen_high_i8x16_u:
{
bool is_low = (opcode == SIMD_i16x8_widen_low_i8x16_u)
? true : false;
if (!aot_compile_simd_i16x8_widen_i8x16(comp_ctx, func_ctx,
is_low, false))
return false;
break;
}
case SIMD_i32x4_widen_low_i16x8_s:
case SIMD_i32x4_widen_high_i16x8_s:
{
bool is_low = (opcode == SIMD_i32x4_widen_low_i16x8_s)
? true : false;
if (!aot_compile_simd_i32x4_widen_i16x8(comp_ctx, func_ctx,
is_low, true))
return false;
break;
}
case SIMD_i32x4_widen_low_i16x8_u:
case SIMD_i32x4_widen_high_i16x8_u:
{
bool is_low = (opcode == SIMD_i32x4_widen_low_i16x8_u)
? true : false;
if (!aot_compile_simd_i32x4_widen_i16x8(comp_ctx, func_ctx,
is_low, false))
return false;
break;
}
case SIMD_i32x4_trunc_sat_f32x4_s:
case SIMD_i32x4_trunc_sat_f32x4_u:
{
bool is_signed = (opcode == SIMD_i32x4_trunc_sat_f32x4_s)
? true : false;
if (!aot_compile_simd_i32x4_trunc_sat_f32x4(comp_ctx, func_ctx,
is_signed))
return false;
break;
}
case SIMD_f32x4_convert_i32x4_s:
case SIMD_f32x4_convert_i32x4_u:
{
bool is_signed = (opcode == SIMD_f32x4_convert_i32x4_s)
? true : false;
if (!aot_compile_simd_f32x4_convert_i32x4(comp_ctx, func_ctx,
is_signed))
return false;
break;
}
case SIMD_f32x4_add:
case SIMD_f32x4_sub:
case SIMD_f32x4_mul:
case SIMD_f32x4_div:
case SIMD_f32x4_min:
case SIMD_f32x4_max:
{
if (!aot_compile_simd_f32x4_arith(comp_ctx, func_ctx,
FLOAT_ADD + opcode - SIMD_f32x4_add))
return false;
break;
}
case SIMD_f64x2_add:
case SIMD_f64x2_sub:
case SIMD_f64x2_mul:
case SIMD_f64x2_div:
case SIMD_f64x2_min:
case SIMD_f64x2_max:
{
if (!aot_compile_simd_f64x2_arith(comp_ctx, func_ctx,
FLOAT_ADD + opcode - SIMD_f64x2_add))
return false;
break;
}
case SIMD_f32x4_neg:
{
if (!aot_compile_simd_f32x4_neg(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_f64x2_neg:
{
if (!aot_compile_simd_f64x2_neg(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_f32x4_abs:
{
if (!aot_compile_simd_f32x4_abs(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_f64x2_abs:
{
if (!aot_compile_simd_f64x2_abs(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_f32x4_sqrt:
{
if (!aot_compile_simd_f32x4_sqrt(comp_ctx, func_ctx))
return false;
break;
}
case SIMD_f64x2_sqrt:
{
if (!aot_compile_simd_f64x2_sqrt(comp_ctx, func_ctx))
return false;
break;
}
default:
break;
}
break;
}
#endif /* end of WASM_ENABLE_SIMD */
default:
aot_set_last_error("unsupported opcode");
break;

View File

@ -46,12 +46,35 @@ typedef enum IntArithmetic {
INT_REM_U
} IntArithmetic;
typedef enum V128Arithmetic {
V128_ADD = 0,
V128_ADD_SATURATE_S,
V128_ADD_SATURATE_U,
V128_SUB,
V128_SUB_SATURATE_S,
V128_SUB_SATURATE_U,
V128_MUL,
V128_DIV,
V128_NEG,
V128_MIN,
V128_MAX,
} V128Arithmetic;
typedef enum IntBitwise {
INT_AND = 0,
INT_OR,
INT_XOR,
} IntBitwise;
typedef enum V128Bitwise {
V128_NOT,
V128_AND,
V128_ANDNOT,
V128_OR,
V128_XOR,
V128_BITSELECT
} V128Bitwise;
typedef enum IntShift {
INT_SHL = 0,
INT_SHR_S,
@ -123,6 +146,7 @@ typedef enum FloatArithmetic {
#define POP_I64(v) POP(v, VALUE_TYPE_I64)
#define POP_F32(v) POP(v, VALUE_TYPE_F32)
#define POP_F64(v) POP(v, VALUE_TYPE_F64)
#define POP_V128(v) POP(v, VALUE_TYPE_V128)
#define POP_COND(llvm_value) do { \
AOTValue *aot_value; \
@ -172,6 +196,7 @@ typedef enum FloatArithmetic {
#define PUSH_I64(v) PUSH(v, VALUE_TYPE_I64)
#define PUSH_F32(v) PUSH(v, VALUE_TYPE_F32)
#define PUSH_F64(v) PUSH(v, VALUE_TYPE_F64)
#define PUSH_V128(v) PUSH(v, VALUE_TYPE_V128)
#define PUSH_COND(v) PUSH(v, VALUE_TYPE_I1)
#define TO_LLVM_TYPE(wasm_type) \
@ -218,6 +243,36 @@ typedef enum FloatArithmetic {
#define I64_63 (comp_ctx->llvm_consts.i64_63)
#define I64_64 (comp_ctx->llvm_consts.i64_64)
#define V128_TYPE comp_ctx->basic_types.v128_type
#define V128_PTR_TYPE comp_ctx->basic_types.v128_ptr_type
#define V128_i8x16_TYPE comp_ctx->basic_types.i8x16_vec_type
#define V128_i16x8_TYPE comp_ctx->basic_types.i16x8_vec_type
#define V128_i32x4_TYPE comp_ctx->basic_types.i32x4_vec_type
#define V128_i64x2_TYPE comp_ctx->basic_types.i64x2_vec_type
#define V128_f32x4_TYPE comp_ctx->basic_types.f32x4_vec_type
#define V128_f64x2_TYPE comp_ctx->basic_types.f64x2_vec_type
#define V128_ZERO (comp_ctx->llvm_consts.v128_zero)
#define V128_i8x16_ZERO (comp_ctx->llvm_consts.i8x16_vec_zero)
#define V128_i16x8_ZERO (comp_ctx->llvm_consts.i16x8_vec_zero)
#define V128_i32x4_ZERO (comp_ctx->llvm_consts.i32x4_vec_zero)
#define V128_i64x2_ZERO (comp_ctx->llvm_consts.i64x2_vec_zero)
#define V128_f32x4_ZERO (comp_ctx->llvm_consts.f32x4_vec_zero)
#define V128_f64x2_ZERO (comp_ctx->llvm_consts.f64x2_vec_zero)
#define TO_V128_i8x16(v) LLVMBuildBitCast(comp_ctx->builder, v, \
V128_i8x16_TYPE, "i8x16_val")
#define TO_V128_i16x8(v) LLVMBuildBitCast(comp_ctx->builder, v, \
V128_i16x8_TYPE, "i16x8_val")
#define TO_V128_i32x4(v) LLVMBuildBitCast(comp_ctx->builder, v, \
V128_i32x4_TYPE, "i32x4_val")
#define TO_V128_i64x2(v) LLVMBuildBitCast(comp_ctx->builder, v, \
V128_i64x2_TYPE, "i64x2_val")
#define TO_V128_f32x4(v) LLVMBuildBitCast(comp_ctx->builder, v, \
V128_f32x4_TYPE, "f32x4_val")
#define TO_V128_f64x2(v) LLVMBuildBitCast(comp_ctx->builder, v, \
V128_f64x2_TYPE, "f64x2_val")
#define CHECK_LLVM_CONST(v) do { \
if (!v) { \
aot_set_last_error("create llvm const failed."); \

View File

@ -299,9 +299,14 @@ get_import_global_info_size(AOTCompData *comp_data)
static uint32
get_global_size(AOTGlobal *global)
{
/* type (1 byte) + is_mutable (1 byte)
+ init expr type (2 byes) + init expr value (8 byes) */
return sizeof(uint8) * 2 + sizeof(uint16) + sizeof(uint64);
if (global->init_expr.init_expr_type != INIT_EXPR_TYPE_V128_CONST)
/* type (1 byte) + is_mutable (1 byte)
+ init expr type (2 byes) + init expr value (8 byes) */
return sizeof(uint8) * 2 + sizeof(uint16) + sizeof(uint64);
else
/* type (1 byte) + is_mutable (1 byte)
+ init expr type (2 byes) + v128 value (16 byes) */
return sizeof(uint8) * 2 + sizeof(uint16) + sizeof(uint64) * 2;
}
static uint32
@ -800,10 +805,28 @@ exchange_uint32(uint8 *p_data)
static void
exchange_uint64(uint8 *pData)
{
uint32 value;
value = *(uint32 *)pData;
*(uint32 *)pData = *(uint32 *)(pData + 4);
*(uint32 *)(pData + 4) = value;
exchange_uint32(pData);
exchange_uint32(pData + 4);
}
static void
exchange_uint128(uint8 *pData)
{
/* swap high 64bit and low 64bit */
uint64 value = *(uint64*)pData;
*(uint64*)pData = *(uint64*)(pData + 8);
*(uint64*)(pData + 8) = value;
/* exchange high 64bit */
exchange_uint64(pData);
/* exchange low 64bit */
exchange_uint64(pData + 8);
}
static union {
int a;
char b;
@ -851,6 +874,17 @@ static union {
offset += (uint32)sizeof(uint64); \
} while (0)
#define EMIT_V128(v) do { \
uint64 *t = (uint64*)v.i64x2; \
CHECK_BUF(16); \
if (!is_little_endian()) \
exchange_uint128((uint8 *)&t); \
PUT_U64_TO_ADDR(buf + offset, t[0]); \
offset += (uint32)sizeof(uint64); \
PUT_U64_TO_ADDR(buf + offset, t[1]); \
offset += (uint32)sizeof(uint64); \
} while (0)
#define EMIT_BUF(v, len) do { \
CHECK_BUF(len); \
memcpy(buf + offset, v, len); \
@ -1093,7 +1127,10 @@ aot_emit_global_info(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
EMIT_U8(global->type);
EMIT_U8(global->is_mutable);
EMIT_U16(global->init_expr.init_expr_type);
EMIT_U64(global->init_expr.u.i64);
if (global->init_expr.init_expr_type != INIT_EXPR_TYPE_V128_CONST)
EMIT_U64(global->init_expr.u.i64);
else
EMIT_V128(global->init_expr.u.v128);
}
if (offset - *p_offset != get_global_info_size(comp_data)) {

View File

@ -96,11 +96,17 @@ format_block_name(char *name, uint32 name_size,
} \
} while (0)
#define ADD_TO_RESULT_PHIS(block, value, idx) do { \
LLVMBasicBlockRef block_curr = CURR_BLOCK(); \
LLVMAddIncoming(block->result_phis[idx], \
&value, &block_curr, 1); \
} while (0)
#define ADD_TO_RESULT_PHIS(block, value, idx) do { \
LLVMBasicBlockRef block_curr = CURR_BLOCK(); \
LLVMTypeRef phi_ty = LLVMTypeOf(block->result_phis[idx]); \
LLVMTypeRef value_ty = LLVMTypeOf(value); \
bh_assert(LLVMGetTypeKind(phi_ty) == LLVMGetTypeKind(value_ty)); \
bh_assert(LLVMGetTypeContext(phi_ty) \
== LLVMGetTypeContext(value_ty)); \
LLVMAddIncoming(block->result_phis[idx], &value, &block_curr, 1); \
(void)phi_ty; \
(void)value_ty; \
} while (0)
#define BUILD_ICMP(op, left, right, res, name) do { \
if (!(res = LLVMBuildICmp(comp_ctx->builder, op, \
@ -686,24 +692,8 @@ check_suspend_flags(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
/* Move builder to terminate block */
SET_BUILDER_POS(terminate_block);
if (aot_func_type->result_count) {
switch (aot_func_type->types[aot_func_type->param_count]) {
case VALUE_TYPE_I32:
LLVMBuildRet(comp_ctx->builder, I32_ZERO);
break;
case VALUE_TYPE_I64:
LLVMBuildRet(comp_ctx->builder, I64_ZERO);
break;
case VALUE_TYPE_F32:
LLVMBuildRet(comp_ctx->builder, F32_ZERO);
break;
case VALUE_TYPE_F64:
LLVMBuildRet(comp_ctx->builder, F64_ZERO);
break;
}
}
else {
LLVMBuildRetVoid(comp_ctx->builder);
if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
goto fail;
}
/* Move builder to terminate block */

View File

@ -53,10 +53,8 @@ aot_emit_exception(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
func_ctx->got_exception_block);
/* Create exection id phi */
if (!(func_ctx->exception_id_phi =
LLVMBuildPhi(comp_ctx->builder,
comp_ctx->basic_types.int32_type,
"exception_id_phi"))) {
if (!(func_ctx->exception_id_phi = LLVMBuildPhi(
comp_ctx->builder, I32_TYPE, "exception_id_phi"))) {
aot_set_last_error("llvm build phi failed.");
return false;
}
@ -110,24 +108,8 @@ aot_emit_exception(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
/* Create return IR */
AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
if (aot_func_type->result_count) {
switch (aot_func_type->types[aot_func_type->param_count]) {
case VALUE_TYPE_I32:
LLVMBuildRet(comp_ctx->builder, I32_ZERO);
break;
case VALUE_TYPE_I64:
LLVMBuildRet(comp_ctx->builder, I64_ZERO);
break;
case VALUE_TYPE_F32:
LLVMBuildRet(comp_ctx->builder, F32_ZERO);
break;
case VALUE_TYPE_F64:
LLVMBuildRet(comp_ctx->builder, F64_ZERO);
break;
}
}
else {
LLVMBuildRetVoid(comp_ctx->builder);
if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
return false;
}
/* Resume the builder position */

View File

@ -25,24 +25,8 @@ create_func_return_block(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
/* Create return IR */
LLVMPositionBuilderAtEnd(comp_ctx->builder, func_ctx->func_return_block);
if (aot_func_type->result_count) {
switch (aot_func_type->types[aot_func_type->param_count]) {
case VALUE_TYPE_I32:
LLVMBuildRet(comp_ctx->builder, I32_ZERO);
break;
case VALUE_TYPE_I64:
LLVMBuildRet(comp_ctx->builder, I64_ZERO);
break;
case VALUE_TYPE_F32:
LLVMBuildRet(comp_ctx->builder, F32_ZERO);
break;
case VALUE_TYPE_F64:
LLVMBuildRet(comp_ctx->builder, F64_ZERO);
break;
}
}
else {
LLVMBuildRetVoid(comp_ctx->builder);
if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
return false;
}
}

View File

@ -53,6 +53,9 @@ get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
case 8:
mem_check_bound = func_ctx->mem_info[0].mem_bound_check_8bytes;
break;
case 16:
mem_check_bound = func_ctx->mem_info[0].mem_bound_check_16bytes;
break;
default:
bh_assert(0);
return NULL;
@ -73,9 +76,9 @@ get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
static LLVMValueRef
get_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
static LLVMValueRef
check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 offset, uint32 bytes)
LLVMValueRef
aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 offset, uint32 bytes)
{
LLVMValueRef offset_const = I32_CONST(offset);
LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
@ -348,7 +351,7 @@ aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
{
LLVMValueRef maddr, value = NULL;
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
switch (bytes) {
@ -400,7 +403,7 @@ aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
{
LLVMValueRef maddr, value = NULL;
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
switch (bytes) {
@ -454,7 +457,7 @@ aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
{
LLVMValueRef maddr, value;
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
return false;
BUILD_PTR_CAST(F32_PTR_TYPE);
@ -471,7 +474,7 @@ aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
{
LLVMValueRef maddr, value;
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
return false;
BUILD_PTR_CAST(F64_PTR_TYPE);
@ -490,7 +493,7 @@ aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
POP_I32(value);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
switch (bytes) {
@ -529,7 +532,7 @@ aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
POP_I64(value);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
switch (bytes) {
@ -572,7 +575,7 @@ aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
POP_F32(value);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 4)))
return false;
BUILD_PTR_CAST(F32_PTR_TYPE);
@ -590,7 +593,7 @@ aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
POP_F64(value);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 8)))
return false;
BUILD_PTR_CAST(F64_PTR_TYPE);
@ -877,24 +880,8 @@ aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
/* If memory.init failed, return this function
so the runtime can catch the exception */
LLVMPositionBuilderAtEnd(comp_ctx->builder, mem_init_fail);
if (aot_func_type->result_count) {
switch (aot_func_type->types[aot_func_type->param_count]) {
case VALUE_TYPE_I32:
LLVMBuildRet(comp_ctx->builder, I32_ZERO);
break;
case VALUE_TYPE_I64:
LLVMBuildRet(comp_ctx->builder, I64_ZERO);
break;
case VALUE_TYPE_F32:
LLVMBuildRet(comp_ctx->builder, F32_ZERO);
break;
case VALUE_TYPE_F64:
LLVMBuildRet(comp_ctx->builder, F64_ZERO);
break;
}
}
else {
LLVMBuildRetVoid(comp_ctx->builder);
if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
goto fail;
}
LLVMPositionBuilderAtEnd(comp_ctx->builder, init_success);
@ -1002,7 +989,7 @@ aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx,
else
POP_I64(value);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
@ -1076,7 +1063,7 @@ aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
POP_I64(expect);
}
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
@ -1175,7 +1162,7 @@ aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
CHECK_LLVM_CONST(is_wait64);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))
@ -1219,24 +1206,8 @@ aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
/* If atomic wait failed, return this function
so the runtime can catch the exception */
LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_fail);
if (aot_func_type->result_count) {
switch (aot_func_type->types[aot_func_type->param_count]) {
case VALUE_TYPE_I32:
LLVMBuildRet(comp_ctx->builder, I32_ZERO);
break;
case VALUE_TYPE_I64:
LLVMBuildRet(comp_ctx->builder, I64_ZERO);
break;
case VALUE_TYPE_F32:
LLVMBuildRet(comp_ctx->builder, F32_ZERO);
break;
case VALUE_TYPE_F64:
LLVMBuildRet(comp_ctx->builder, F64_ZERO);
break;
}
}
else {
LLVMBuildRetVoid(comp_ctx->builder);
if (!aot_build_zero_function_ret(comp_ctx, aot_func_type)) {
goto fail;
}
LLVMPositionBuilderAtEnd(comp_ctx->builder, wait_success);
@ -1259,7 +1230,7 @@ aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
POP_I32(count);
if (!(maddr = check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, bytes)))
return false;
if (!check_memory_alignment(comp_ctx, func_ctx, maddr, align))

View File

@ -49,6 +49,10 @@ bool
aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset);
LLVMValueRef
aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 offset, uint32 bytes);
bool
aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);

View File

@ -135,110 +135,6 @@
} while (0)
static LLVMValueRef
__call_llvm_intrinsic(AOTCompContext *comp_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
LLVMValueRef *param_values)
{
LLVMValueRef func, ret;
LLVMTypeRef func_type;
/* Declare llvm intrinsic function if necessary */
if (!(func = LLVMGetNamedFunction(comp_ctx->module, name))) {
if (!(func_type =
LLVMFunctionType(ret_type, param_types, (uint32)param_count, false))) {
aot_set_last_error("create LLVM function type failed.");
return NULL;
}
if (!(func = LLVMAddFunction(comp_ctx->module, name, func_type))) {
aot_set_last_error("add LLVM function failed.");
return NULL;
}
}
/* Call the LLVM intrinsic function */
if (!(ret = LLVMBuildCall(comp_ctx->builder, func, param_values,
(uint32)param_count, "call"))) {
aot_set_last_error("llvm build call failed.");
return NULL;
}
return ret;
}
static LLVMValueRef
call_llvm_intrinsic(AOTCompContext *comp_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
...)
{
LLVMValueRef *param_values, ret;
va_list argptr;
uint64 total_size;
int i = 0;
/* Create param values */
total_size = sizeof(LLVMValueRef) * (uint64)param_count;
if (total_size >= UINT32_MAX
|| !(param_values = wasm_runtime_malloc((uint32)total_size))) {
aot_set_last_error("allocate memory for param values failed.");
return false;
}
/* Load each param value */
va_start(argptr, param_count);
while (i < param_count)
param_values[i++] = va_arg(argptr, LLVMValueRef);
va_end(argptr);
ret = __call_llvm_intrinsic(comp_ctx, name, ret_type,
param_types, param_count,
param_values);
wasm_runtime_free(param_values);
return ret;
}
static LLVMValueRef
call_llvm_intrinsic_v(AOTCompContext *comp_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
va_list param_value_list)
{
LLVMValueRef *param_values, ret;
uint64 total_size;
int i = 0;
/* Create param values */
total_size = sizeof(LLVMValueRef) * (uint64)param_count;
if (total_size >= UINT32_MAX
|| !(param_values = wasm_runtime_malloc((uint32)total_size))) {
aot_set_last_error("allocate memory for param values failed.");
return false;
}
/* Load each param value */
while (i < param_count)
param_values[i++] = va_arg(param_value_list, LLVMValueRef);
ret = __call_llvm_intrinsic(comp_ctx, name, ret_type,
param_types, param_count,
param_values);
wasm_runtime_free(param_values);
return ret;
}
/* Call llvm constrained floating-point intrinsic */
static LLVMValueRef
call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
@ -255,12 +151,8 @@ call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
va_start(param_value_list, intrinsic);
ret = call_llvm_intrinsic_v(comp_ctx,
intrinsic,
ret_type,
param_types,
4,
param_value_list);
ret = aot_call_llvm_intrinsic_v(comp_ctx, intrinsic, ret_type, param_types,
4, param_value_list);
va_end(param_value_list);
@ -283,12 +175,8 @@ call_llvm_libm_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
va_start(param_value_list, intrinsic);
ret = call_llvm_intrinsic_v(comp_ctx,
intrinsic,
ret_type,
param_types,
3,
param_value_list);
ret = aot_call_llvm_intrinsic_v(comp_ctx, intrinsic, ret_type, param_types,
3, param_value_list);
va_end(param_value_list);
@ -342,13 +230,8 @@ compile_op_float_min_max(AOTCompContext *comp_ctx,
return NULL;
}
if (!(cmp = call_llvm_intrinsic(comp_ctx,
intrinsic,
ret_type,
param_types,
2,
left,
right)))
if (!(cmp = aot_call_llvm_intrinsic(comp_ctx, intrinsic, ret_type,
param_types, 2, left, right)))
return NULL;
if (!(cmp = LLVMBuildSelect(comp_ctx->builder,
@ -406,21 +289,21 @@ aot_compile_int_bit_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
/* Call the LLVM intrinsic function */
if (type < POP_CNT32)
DEF_INT_UNARY_OP(call_llvm_intrinsic(comp_ctx,
bit_cnt_llvm_intrinsic[type],
ret_type,
param_types,
2,
operand,
zero_undef),
DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(comp_ctx,
bit_cnt_llvm_intrinsic[type],
ret_type,
param_types,
2,
operand,
zero_undef),
NULL);
else
DEF_INT_UNARY_OP(call_llvm_intrinsic(comp_ctx,
bit_cnt_llvm_intrinsic[type],
ret_type,
param_types,
1,
operand),
DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(comp_ctx,
bit_cnt_llvm_intrinsic[type],
ret_type,
param_types,
1,
operand),
NULL);
return true;
@ -1032,12 +915,8 @@ call_llvm_float_math_intrinsic(AOTCompContext *comp_ctx,
va_start(param_value_list, intrinsic);
ret = call_llvm_intrinsic_v(comp_ctx,
intrinsic,
ret_type,
&param_type,
1,
param_value_list);
ret = aot_call_llvm_intrinsic_v(comp_ctx, intrinsic, ret_type, &param_type,
1, param_value_list);
va_end(param_value_list);
@ -1133,14 +1012,14 @@ compile_float_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
param_types[0] = param_types[1] = ret_type = is_f32 ? F32_TYPE : F64_TYPE;
DEF_FP_BINARY_OP(call_llvm_intrinsic(comp_ctx,
is_f32 ? "llvm.copysign.f32" :
"llvm.copysign.f64",
ret_type,
param_types,
2,
left,
right),
DEF_FP_BINARY_OP(aot_call_llvm_intrinsic(comp_ctx,
is_f32 ? "llvm.copysign.f32" :
"llvm.copysign.f64",
ret_type,
param_types,
2,
left,
right),
NULL);
return true;

View File

@ -46,7 +46,8 @@ pop_value_from_wasm_stack(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
wasm_runtime_free(aot_value);
if ((is_32
&& (type != VALUE_TYPE_I32 && type != VALUE_TYPE_F32))
&& (type != VALUE_TYPE_I32 && type != VALUE_TYPE_F32
&& type != VALUE_TYPE_V128))
|| (!is_32
&& (type != VALUE_TYPE_I64 && type != VALUE_TYPE_F64))) {
aot_set_last_error("invalid WASM stack data type.");

View File

@ -116,7 +116,7 @@ compile_global(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
+ sizeof(AOTMemoryInstance) * comp_ctx->comp_data->memory_count;
uint32 global_offset;
uint8 global_type;
LLVMValueRef offset, global_ptr, global;
LLVMValueRef offset, global_ptr, global, res;
LLVMTypeRef ptr_type = NULL;
bh_assert(global_idx < import_global_count + comp_data->global_count);
@ -153,6 +153,9 @@ compile_global(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
case VALUE_TYPE_F64:
ptr_type = comp_ctx->basic_types.float64_ptr_type;
break;
case VALUE_TYPE_V128:
ptr_type = comp_ctx->basic_types.v128_ptr_type;
break;
default:
bh_assert(0);
break;
@ -170,14 +173,19 @@ compile_global(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
aot_set_last_error("llvm build load failed.");
return false;
}
/* All globals' data is 4-byte aligned */
LLVMSetAlignment(global, 4);
PUSH(global, global_type);
}
else {
POP(global, global_type);
if (!LLVMBuildStore(comp_ctx->builder, global, global_ptr)) {
if (!(res = LLVMBuildStore(comp_ctx->builder,
global, global_ptr))) {
aot_set_last_error("llvm build store failed.");
return false;
}
/* All globals' data is 4-byte aligned */
LLVMSetAlignment(res, 4);
}
return true;

View File

@ -21,6 +21,10 @@ wasm_type_to_llvm_type(AOTLLVMTypes *llvm_types, uint8 wasm_type)
return llvm_types->float32_type;
case VALUE_TYPE_F64:
return llvm_types->float64_type;
#if WASM_ENABLE_SIMD != 0
case VALUE_TYPE_V128:
return llvm_types->i64x2_vec_type;
#endif
case VALUE_TYPE_VOID:
return llvm_types->void_type;
}
@ -444,6 +448,31 @@ create_memory_info(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
}
}
offset = I32_CONST(offsetof(AOTMemoryInstance, mem_bound_check_16bytes)
- offsetof(AOTMemoryInstance, memory_data.ptr));
if (!(func_ctx->mem_info[0].mem_bound_check_16bytes =
LLVMBuildInBoundsGEP(comp_ctx->builder, mem_info_base,
&offset, 1, "bound_check_16bytes_offset"))) {
aot_set_last_error("llvm build in bounds gep failed");
return false;
}
if (!(func_ctx->mem_info[0].mem_bound_check_16bytes =
LLVMBuildBitCast(comp_ctx->builder,
func_ctx->mem_info[0].mem_bound_check_16bytes,
bound_check_type, "bound_check_16bytes_ptr"))) {
aot_set_last_error("llvm build bit cast failed");
return false;
}
if (mem_space_unchanged) {
if (!(func_ctx->mem_info[0].mem_bound_check_16bytes =
LLVMBuildLoad(comp_ctx->builder,
func_ctx->mem_info[0].mem_bound_check_16bytes,
"bound_check_16bytes"))) {
aot_set_last_error("llvm build load failed");
return false;
}
}
return true;
}
@ -676,6 +705,11 @@ aot_create_func_context(AOTCompData *comp_data, AOTCompContext *comp_ctx,
case VALUE_TYPE_F64:
local_value = F64_ZERO;
break;
#if WASM_ENABLE_SIMD != 0
case VALUE_TYPE_V128:
local_value = V128_ZERO;
break;
#endif
default:
bh_assert(0);
break;
@ -814,23 +848,55 @@ aot_set_llvm_basic_types(AOTLLVMTypes *basic_types, LLVMContextRef context)
basic_types->float32_ptr_type = LLVMPointerType(basic_types->float32_type, 0);
basic_types->float64_ptr_type = LLVMPointerType(basic_types->float64_type, 0);
basic_types->i8x16_vec_type = LLVMVectorType(basic_types->int8_type, 16);
basic_types->i16x8_vec_type = LLVMVectorType(basic_types->int16_type, 8);
basic_types->i32x4_vec_type = LLVMVectorType(basic_types->int32_type, 4);
basic_types->i64x2_vec_type = LLVMVectorType(basic_types->int64_type, 2);
basic_types->f32x4_vec_type = LLVMVectorType(basic_types->float32_type, 4);
basic_types->f64x2_vec_type = LLVMVectorType(basic_types->float64_type, 2);
basic_types->v128_type = basic_types->i64x2_vec_type;
basic_types->v128_ptr_type = LLVMPointerType(basic_types->v128_type, 0);
return (basic_types->int8_ptr_type
&& basic_types->int16_ptr_type
&& basic_types->int32_ptr_type
&& basic_types->int64_ptr_type
&& basic_types->float32_ptr_type
&& basic_types->float64_ptr_type
&& basic_types->i8x16_vec_type
&& basic_types->i16x8_vec_type
&& basic_types->i32x4_vec_type
&& basic_types->i64x2_vec_type
&& basic_types->f32x4_vec_type
&& basic_types->f64x2_vec_type
&& basic_types->meta_data_type) ? true : false;
}
static bool
aot_create_llvm_consts(AOTLLVMConsts *consts, AOTCompContext *comp_ctx)
{
LLVMValueRef i64_consts[2];
consts->i8_zero = I8_CONST(0);
consts->i32_zero = I32_CONST(0);
consts->i64_zero = I64_CONST(0);
consts->f32_zero = F32_CONST(0);
consts->f64_zero = F64_CONST(0);
if (consts->i64_zero) {
i64_consts[0] = i64_consts[1] = consts->i64_zero;
consts->v128_zero = consts->i64x2_vec_zero =
LLVMConstVector(i64_consts, 2);
if (consts->i64x2_vec_zero) {
consts->i8x16_vec_zero = TO_V128_i8x16(consts->i64x2_vec_zero);
consts->i16x8_vec_zero = TO_V128_i16x8(consts->i64x2_vec_zero);
consts->i32x4_vec_zero = TO_V128_i32x4(consts->i64x2_vec_zero);
consts->f32x4_vec_zero = TO_V128_f32x4(consts->i64x2_vec_zero);
consts->f64x2_vec_zero = TO_V128_f64x2(consts->i64x2_vec_zero);
}
}
consts->i32_one = I32_CONST(1);
consts->i32_two = I32_CONST(2);
consts->i32_three = I32_CONST(3);
@ -850,6 +916,12 @@ aot_create_llvm_consts(AOTLLVMConsts *consts, AOTCompContext *comp_ctx)
&& consts->i64_zero
&& consts->f32_zero
&& consts->f64_zero
&& consts->i8x16_vec_zero
&& consts->i16x8_vec_zero
&& consts->i32x4_vec_zero
&& consts->i64x2_vec_zero
&& consts->f32x4_vec_zero
&& consts->f64x2_vec_zero
&& consts->i32_one
&& consts->i32_two
&& consts->i32_three
@ -1014,7 +1086,7 @@ aot_create_comp_context(AOTCompData *comp_data,
/*LLVMTypeRef elem_types[8];*/
struct LLVMMCJITCompilerOptions jit_options;
LLVMTargetRef target;
char *triple = NULL, *triple_jit = NULL, *triple_norm, *arch, *abi;
char *triple = NULL, *triple_norm, *arch, *abi;
char *cpu = NULL, *features, buf[128];
char *triple_norm_new = NULL, *cpu_new = NULL;
char *err = NULL, *fp_round= "round.tonearest", *fp_exce = "fpexcept.strict";
@ -1065,7 +1137,12 @@ aot_create_comp_context(AOTCompData *comp_data,
if (option->enable_tail_call)
comp_ctx->enable_tail_call = true;
if (option->enable_simd)
comp_ctx->enable_simd = true;
if (option->is_jit_mode) {
char *triple_jit = NULL;
/* Create LLVM execution engine */
LLVMInitializeMCJITCompilerOptions(&jit_options, sizeof(jit_options));
jit_options.OptLevel = LLVMCodeGenLevelAggressive;
@ -1186,7 +1263,8 @@ aot_create_comp_context(AOTCompData *comp_data,
if (!cpu)
cpu = "";
}
else { /* triple is NULL, cpu isn't NULL */
else {
/* triple is NULL, cpu isn't NULL */
snprintf(buf, sizeof(buf),
"target isn't specified for cpu %s.", cpu);
aot_set_last_error(buf);
@ -1283,6 +1361,23 @@ aot_create_comp_context(AOTCompData *comp_data,
}
}
if (option->enable_simd) {
char *tmp;
bool ret;
if (!(tmp = LLVMGetTargetMachineCPU(comp_ctx->target_machine))) {
aot_set_last_error("get CPU from Target Machine fail");
goto fail;
}
ret = aot_check_simd_compatibility(comp_ctx->target_arch, tmp);
LLVMDisposeMessage(tmp);
if (!ret) {
aot_set_last_error("SIMD compatibility check failed");
goto fail;
}
}
if (!(target_data_ref =
LLVMCreateTargetDataLayout(comp_ctx->target_machine))) {
aot_set_last_error("create LLVM target data layout failed.");
@ -1349,11 +1444,13 @@ aot_create_comp_context(AOTCompData *comp_data,
fail:
if (triple_norm_new)
LLVMDisposeMessage(triple_norm_new);
if (cpu_new)
LLVMDisposeMessage(cpu_new);
if (!ret)
aot_destroy_comp_context(comp_ctx);
return ret;
}
@ -1567,3 +1664,144 @@ aot_checked_addr_list_destroy(AOTFuncContext *func_ctx)
func_ctx->checked_addr_list = NULL;
}
bool
aot_build_zero_function_ret(AOTCompContext *comp_ctx,
AOTFuncType *func_type)
{
LLVMValueRef ret = NULL;
if (func_type->result_count) {
switch (func_type->types[func_type->param_count]) {
case VALUE_TYPE_I32:
ret = LLVMBuildRet(comp_ctx->builder, I32_ZERO);
break;
case VALUE_TYPE_I64:
ret = LLVMBuildRet(comp_ctx->builder, I64_ZERO);
break;
case VALUE_TYPE_F32:
ret = LLVMBuildRet(comp_ctx->builder, F32_ZERO);
break;
case VALUE_TYPE_F64:
ret = LLVMBuildRet(comp_ctx->builder, F64_ZERO);
break;
#if WASM_ENABLE_SIMD != 0
case VALUE_TYPE_V128:
ret = LLVMBuildRet(comp_ctx->builder, V128_ZERO);
break;
#endif
default:
bh_assert(0);
}
}
else {
ret = LLVMBuildRetVoid(comp_ctx->builder);
}
if (!ret) {
aot_set_last_error("llvm build ret failed.");
return false;
}
return true;
}
static LLVMValueRef
__call_llvm_intrinsic(const AOTCompContext *comp_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
LLVMValueRef *param_values)
{
LLVMValueRef func, ret;
LLVMTypeRef func_type;
/* Declare llvm intrinsic function if necessary */
if (!(func = LLVMGetNamedFunction(comp_ctx->module, name))) {
if (!(func_type = LLVMFunctionType(ret_type, param_types,
(uint32)param_count, false))) {
aot_set_last_error("create LLVM function type failed.");
return NULL;
}
if (!(func = LLVMAddFunction(comp_ctx->module, name, func_type))) {
aot_set_last_error("add LLVM function failed.");
return NULL;
}
}
/* Call the LLVM intrinsic function */
if (!(ret = LLVMBuildCall(comp_ctx->builder, func, param_values,
(uint32)param_count, "call"))) {
aot_set_last_error("llvm build call failed.");
return NULL;
}
return ret;
}
LLVMValueRef
aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
...)
{
LLVMValueRef *param_values, ret;
va_list argptr;
uint64 total_size;
int i = 0;
/* Create param values */
total_size = sizeof(LLVMValueRef) * (uint64)param_count;
if (total_size >= UINT32_MAX
|| !(param_values = wasm_runtime_malloc((uint32)total_size))) {
aot_set_last_error("allocate memory for param values failed.");
return false;
}
/* Load each param value */
va_start(argptr, param_count);
while (i < param_count)
param_values[i++] = va_arg(argptr, LLVMValueRef);
va_end(argptr);
ret = __call_llvm_intrinsic(comp_ctx, name, ret_type, param_types,
param_count, param_values);
wasm_runtime_free(param_values);
return ret;
}
LLVMValueRef
aot_call_llvm_intrinsic_v(const AOTCompContext *comp_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
va_list param_value_list)
{
LLVMValueRef *param_values, ret;
uint64 total_size;
int i = 0;
/* Create param values */
total_size = sizeof(LLVMValueRef) * (uint64)param_count;
if (total_size >= UINT32_MAX
|| !(param_values = wasm_runtime_malloc((uint32)total_size))) {
aot_set_last_error("allocate memory for param values failed.");
return false;
}
/* Load each param value */
while (i < param_count)
param_values[i++] = va_arg(param_value_list, LLVMValueRef);
ret = __call_llvm_intrinsic(comp_ctx, name, ret_type, param_types,
param_count, param_values);
wasm_runtime_free(param_values);
return ret;
}

View File

@ -106,6 +106,7 @@ typedef struct AOTMemInfo {
LLVMValueRef mem_bound_check_2bytes;
LLVMValueRef mem_bound_check_4bytes;
LLVMValueRef mem_bound_check_8bytes;
LLVMValueRef mem_bound_check_16bytes;
} AOTMemInfo;
typedef struct AOTFuncContext {
@ -152,6 +153,15 @@ typedef struct AOTLLVMTypes {
LLVMTypeRef float32_ptr_type;
LLVMTypeRef float64_ptr_type;
LLVMTypeRef v128_type;
LLVMTypeRef v128_ptr_type;
LLVMTypeRef i8x16_vec_type;
LLVMTypeRef i16x8_vec_type;
LLVMTypeRef i32x4_vec_type;
LLVMTypeRef i64x2_vec_type;
LLVMTypeRef f32x4_vec_type;
LLVMTypeRef f64x2_vec_type;
LLVMTypeRef meta_data_type;
} AOTLLVMTypes;
@ -161,6 +171,13 @@ typedef struct AOTLLVMConsts {
LLVMValueRef i64_zero;
LLVMValueRef f32_zero;
LLVMValueRef f64_zero;
LLVMValueRef v128_zero;
LLVMValueRef i8x16_vec_zero;
LLVMValueRef i16x8_vec_zero;
LLVMValueRef i32x4_vec_zero;
LLVMValueRef i64x2_vec_zero;
LLVMValueRef f32x4_vec_zero;
LLVMValueRef f64x2_vec_zero;
LLVMValueRef i32_one;
LLVMValueRef i32_two;
LLVMValueRef i32_three;
@ -201,6 +218,9 @@ typedef struct AOTCompContext {
/* Bounday Check */
bool enable_bound_check;
/* 128-bit SIMD */
bool enable_simd;
/* Thread Manager */
bool enable_thread_mgr;
@ -248,6 +268,7 @@ typedef struct AOTCompOption{
bool enable_bulk_memory;
bool enable_thread_mgr;
bool enable_tail_call;
bool enable_simd;
bool is_sgx_platform;
uint32 opt_level;
uint32 size_level;
@ -309,6 +330,29 @@ aot_checked_addr_list_find(AOTFuncContext *func_ctx,
void
aot_checked_addr_list_destroy(AOTFuncContext *func_ctx);
bool
aot_build_zero_function_ret(AOTCompContext *comp_ctx,
AOTFuncType *func_type);
LLVMValueRef
aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
...);
LLVMValueRef
aot_call_llvm_intrinsic_v(const AOTCompContext *comp_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
va_list param_value_list);
bool
aot_check_simd_compatibility(const char *arch_c_str, const char *cpu_c_str);
#ifdef __cplusplus
} /* end of extern "C" */
#endif

View File

@ -0,0 +1,381 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_access_lanes.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../../aot/aot_runtime.h"
static LLVMValueRef
build_intx16_vector(const AOTCompContext *comp_ctx,
const LLVMTypeRef element_type,
const int *element_value)
{
LLVMValueRef vector, elements[16];
unsigned i;
for (i = 0; i < 16; i++) {
if (!(elements[i] =
LLVMConstInt(element_type, element_value[i], true))) {
HANDLE_FAILURE("LLVMConstInst");
goto fail;
}
}
if (!(vector = LLVMConstVector(elements, 16))) {
HANDLE_FAILURE("LLVMConstVector");
goto fail;
}
return vector;
fail:
return NULL;
}
bool
aot_compile_simd_shuffle(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
const uint8 *frame_ip)
{
LLVMValueRef vec1, vec2, mask, result;
uint8 imm[16] = { 0 };
int values[16];
unsigned i;
wasm_runtime_read_v128(frame_ip, (uint64 *)imm, (uint64 *)(imm + 8));
for (i = 0; i < 16; i++) {
values[i] = imm[i];
}
if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
"vec2"))) {
goto fail;
}
if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
"vec1"))) {
goto fail;
}
/* build a vector <16 x i32> */
if (!(mask = build_intx16_vector(comp_ctx, I32_TYPE, values))) {
goto fail;
}
if (!(result = LLVMBuildShuffleVector(comp_ctx->builder, vec1, vec2, mask,
"new_vector"))) {
HANDLE_FAILURE("LLVMBuildShuffleVector");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
// TODO: instructions for other CPUs
/* shufflevector is not an option, since it requires *mask as a const */
bool
aot_compile_simd_swizzle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef vector, mask, max_lanes, condition, mask_lanes, result;
LLVMTypeRef param_types[2];
int max_lane_id[16] = { 16, 16, 16, 16, 16, 16, 16, 16,
16, 16, 16, 16, 16, 16, 16, 16 },
mask_lane_id[16] = { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80,
0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
if (!(mask = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
"mask"))) {
goto fail;
}
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
V128_i8x16_TYPE, "vec"))) {
goto fail;
}
/* icmp uge <16 x i8> mask, <16, 16, 16, 16, ...> */
if (!(max_lanes = build_intx16_vector(comp_ctx, INT8_TYPE, max_lane_id))) {
goto fail;
}
if (!(condition = LLVMBuildICmp(comp_ctx->builder, LLVMIntUGE, mask,
max_lanes, "compare_with_16"))) {
HANDLE_FAILURE("LLVMBuldICmp");
goto fail;
}
/* if the highest bit of every i8 of mask is 1, means doesn't pick up from vector */
/* select <16 x i1> %condition, <16 x i8> <0x80, 0x80, ...>, <16 x i8> %mask */
if (!(mask_lanes =
build_intx16_vector(comp_ctx, INT8_TYPE, mask_lane_id))) {
goto fail;
}
if (!(mask = LLVMBuildSelect(comp_ctx->builder, condition, mask_lanes,
mask, "mask"))) {
HANDLE_FAILURE("LLVMBuildSelect");
goto fail;
}
param_types[0] = V128_i8x16_TYPE;
param_types[1] = V128_i8x16_TYPE;
if (!(result = aot_call_llvm_intrinsic(
comp_ctx, "llvm.x86.ssse3.pshuf.b.128", V128_i8x16_TYPE,
param_types, 2, vector, mask))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
static bool
aot_compile_simd_extract(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id,
bool need_extend,
bool is_signed,
LLVMTypeRef vector_type,
LLVMTypeRef result_type,
unsigned aot_value_type)
{
LLVMValueRef vector, idx, result;
if (!(idx = I8_CONST(lane_id))) {
HANDLE_FAILURE("LLVMConstInt");
goto fail;
}
/* bitcast <2 x i64> %0 to <vector_type> */
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec"))) {
goto fail;
}
/* extractelement <vector_type> %vector, i8 lane_id*/
if (!(result = LLVMBuildExtractElement(comp_ctx->builder, vector, idx,
"element"))) {
HANDLE_FAILURE("LLVMBuildExtractElement");
goto fail;
}
if (need_extend) {
if (is_signed) {
/* sext <element_type> %element to <result_type> */
if (!(result = LLVMBuildSExt(comp_ctx->builder, result,
result_type, "ret"))) {
HANDLE_FAILURE("LLVMBuildSExt");
goto fail;
}
}
else {
/* sext <element_type> %element to <result_type> */
if (!(result = LLVMBuildZExt(comp_ctx->builder, result,
result_type, "ret"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
}
}
PUSH(result, aot_value_type);
return true;
fail:
return false;
}
bool
aot_compile_simd_extract_i8x16(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id,
bool is_signed)
{
return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, true,
is_signed, V128_i8x16_TYPE, I32_TYPE,
VALUE_TYPE_I32);
}
bool
aot_compile_simd_extract_i16x8(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id,
bool is_signed)
{
return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, true,
is_signed, V128_i16x8_TYPE, I32_TYPE,
VALUE_TYPE_I32);
}
bool
aot_compile_simd_extract_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
V128_i32x4_TYPE, I32_TYPE, VALUE_TYPE_I32);
}
bool
aot_compile_simd_extract_i64x2(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
V128_i64x2_TYPE, I64_TYPE, VALUE_TYPE_I64);
}
bool
aot_compile_simd_extract_f32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
V128_f32x4_TYPE, F32_TYPE, VALUE_TYPE_F32);
}
bool
aot_compile_simd_extract_f64x2(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_extract(comp_ctx, func_ctx, lane_id, false, false,
V128_f64x2_TYPE, F64_TYPE, VALUE_TYPE_F64);
}
static bool
aot_compile_simd_replace(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id,
unsigned new_value_type,
LLVMTypeRef vector_type,
bool need_reduce,
LLVMTypeRef element_type)
{
LLVMValueRef vector, new_value, idx, result;
POP(new_value, new_value_type);
if (!(idx = I8_CONST(lane_id))) {
HANDLE_FAILURE("LLVMConstInt");
goto fail;
}
/* bitcast <2 x i64> %0 to <vector_type> */
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec"))) {
goto fail;
}
/* bitcast <new_value_type> to <element_type> */
if (need_reduce) {
if (!(new_value = LLVMBuildTrunc(comp_ctx->builder, new_value,
element_type, "element"))) {
HANDLE_FAILURE("LLVMBuildTrunc");
goto fail;
}
}
/* insertelement <vector_type> %vector, <element_type> %element, i8 idx */
if (!(result = LLVMBuildInsertElement(comp_ctx->builder, vector, new_value,
idx, "new_vector"))) {
HANDLE_FAILURE("LLVMBuildInsertElement");
goto fail;
}
/* bitcast <vector_type> %result to <2 x i64> */
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_replace_i8x16(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id,
VALUE_TYPE_I32, V128_i8x16_TYPE, true,
INT8_TYPE);
}
bool
aot_compile_simd_replace_i16x8(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id,
VALUE_TYPE_I32, V128_i16x8_TYPE, true,
INT16_TYPE);
}
bool
aot_compile_simd_replace_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id,
VALUE_TYPE_I32, V128_i32x4_TYPE, false,
I32_TYPE);
}
bool
aot_compile_simd_replace_i64x2(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id,
VALUE_TYPE_I64, V128_i64x2_TYPE, false,
I64_TYPE);
}
bool
aot_compile_simd_replace_f32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id,
VALUE_TYPE_F32, V128_f32x4_TYPE, false,
F32_TYPE);
}
bool
aot_compile_simd_replace_f64x2(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id)
{
return aot_compile_simd_replace(comp_ctx, func_ctx, lane_id,
VALUE_TYPE_F64, V128_f64x2_TYPE, false,
F64_TYPE);
}

View File

@ -0,0 +1,89 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_ACCESS_LANES_H_
#define _SIMD_ACCESS_LANES_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_shuffle(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
const uint8 *frame_ip);
bool
aot_compile_simd_swizzle(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_extract_i8x16(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id,
bool is_signed);
bool
aot_compile_simd_extract_i16x8(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id,
bool is_signed);
bool
aot_compile_simd_extract_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_extract_i64x2(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_extract_f32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_extract_f64x2(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_replace_i8x16(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_replace_i16x8(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_replace_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_replace_i64x2(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_replace_f32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
bool
aot_compile_simd_replace_f64x2(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 lane_id);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_ACCESS_LANES_H_ */

View File

@ -0,0 +1,164 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_bit_shifts.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../../aot/aot_runtime.h"
static bool
simd_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op,
LLVMTypeRef vector_type,
LLVMTypeRef element_type,
unsigned lane_width)
{
LLVMValueRef vector, offset, width, undef, zeros, result;
LLVMTypeRef zeros_type;
POP_I32(offset);
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec"))) {
goto fail;
}
if (!(width = LLVMConstInt(I32_TYPE, lane_width, true))) {
HANDLE_FAILURE("LLVMConstInt");
goto fail;
}
if (!(offset =
LLVMBuildURem(comp_ctx->builder, offset, width, "remainder"))) {
HANDLE_FAILURE("LLVMBuildURem");
goto fail;
}
if (I64_TYPE == element_type) {
if (!(offset = LLVMBuildZExt(comp_ctx->builder, offset, element_type,
"offset_scalar"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
}
else {
if (!(offset = LLVMBuildTruncOrBitCast(
comp_ctx->builder, offset, element_type, "offset_scalar"))) {
HANDLE_FAILURE("LLVMBuildTrunc");
goto fail;
}
}
/* create a vector with offset */
if (!(undef = LLVMGetUndef(vector_type))) {
HANDLE_FAILURE("LLVMGetUndef");
goto fail;
}
if (!(zeros_type = LLVMVectorType(I32_TYPE, 128 / lane_width))) {
HANDLE_FAILURE("LVMVectorType");
goto fail;
}
if (!(zeros = LLVMConstNull(zeros_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
if (!(offset = LLVMBuildInsertElement(comp_ctx->builder, undef, offset,
I32_ZERO, "base_vector"))) {
HANDLE_FAILURE("LLVMBuildInsertElement");
goto fail;
}
if (!(offset = LLVMBuildShuffleVector(comp_ctx->builder, offset, undef,
zeros, "offset_vector"))) {
HANDLE_FAILURE("LLVMBuildShuffleVector");
goto fail;
}
switch (shift_op) {
case INT_SHL:
{
if (!(result =
LLVMBuildShl(comp_ctx->builder, vector, offset, "shl"))) {
HANDLE_FAILURE("LLVMBuildShl");
goto fail;
}
break;
}
case INT_SHR_S:
{
if (!(result = LLVMBuildAShr(comp_ctx->builder, vector, offset,
"ashr"))) {
HANDLE_FAILURE("LLVMBuildAShr");
goto fail;
}
break;
}
case INT_SHR_U:
{
if (!(result = LLVMBuildLShr(comp_ctx->builder, vector, offset,
"lshr"))) {
HANDLE_FAILURE("LLVMBuildLShr");
goto fail;
}
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"result"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op)
{
return simd_shift(comp_ctx, func_ctx, shift_op, V128_i8x16_TYPE, INT8_TYPE,
8);
}
bool
aot_compile_simd_i16x8_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op)
{
return simd_shift(comp_ctx, func_ctx, shift_op, V128_i16x8_TYPE,
INT16_TYPE, 16);
}
bool
aot_compile_simd_i32x4_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op)
{
return simd_shift(comp_ctx, func_ctx, shift_op, V128_i32x4_TYPE, I32_TYPE,
32);
}
bool
aot_compile_simd_i64x2_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op)
{
return simd_shift(comp_ctx, func_ctx, shift_op, V128_i64x2_TYPE, I64_TYPE,
64);
}

View File

@ -0,0 +1,39 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_BIT_SHIFTS_H_
#define _SIMD_BIT_SHIFTS_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_i8x16_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op);
bool
aot_compile_simd_i16x8_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op);
bool
aot_compile_simd_i32x4_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op);
bool
aot_compile_simd_i64x2_shift(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntShift shift_op);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_BIT_SHIFTS_H_ */

View File

@ -0,0 +1,109 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_bitmask_extracts.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../../aot/aot_runtime.h"
static bool
simd_build_bitmask(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
uint8 length,
LLVMTypeRef vector_type,
LLVMTypeRef element_type,
const char *intrinsic)
{
LLVMValueRef vector, zeros, mask, mask_elements[16], cond, result;
LLVMTypeRef param_types[1], vector_ext_type;
const uint32 numbers[16] = { 0x1, 0x2, 0x4, 0x8, 0x10, 0x20,
0x40, 0x80, 0x100, 0x200, 0x400, 0x800,
0x1000, 0x2000, 0x4000, 0x8000 };
uint8 i;
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec"))) {
goto fail;
}
if (!(vector_ext_type = LLVMVectorType(I32_TYPE, length))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
if (!(vector = LLVMBuildSExt(comp_ctx->builder, vector, vector_ext_type,
"vec_ext"))) {
HANDLE_FAILURE("LLVMBuildSExt");
goto fail;
}
if (!(zeros = LLVMConstNull(vector_ext_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
for (i = 0; i < 16; i++) {
if (!(mask_elements[i] = LLVMConstInt(I32_TYPE, numbers[i], false))) {
HANDLE_FAILURE("LLVMConstInt");
goto fail;
}
}
if (!(mask = LLVMConstVector(mask_elements, length))) {
HANDLE_FAILURE("LLVMConstVector");
goto fail;
}
if (!(cond = LLVMBuildICmp(comp_ctx->builder, LLVMIntSLT, vector, zeros,
"lt_zero"))) {
HANDLE_FAILURE("LLVMBuildICmp");
goto fail;
}
if (!(result =
LLVMBuildSelect(comp_ctx->builder, cond, mask, zeros, "select"))) {
HANDLE_FAILURE("LLVMBuildSelect");
goto fail;
}
param_types[0] = vector_ext_type;
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, I32_TYPE,
param_types, 1, result))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
}
PUSH_I32(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_bitmask(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_build_bitmask(comp_ctx, func_ctx, 16, V128_i8x16_TYPE,
INT8_TYPE,
"llvm.experimental.vector.reduce.or.v16i32");
}
bool
aot_compile_simd_i16x8_bitmask(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_build_bitmask(comp_ctx, func_ctx, 8, V128_i16x8_TYPE,
INT16_TYPE,
"llvm.experimental.vector.reduce.or.v8i32");
}
bool
aot_compile_simd_i32x4_bitmask(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_build_bitmask(comp_ctx, func_ctx, 4, V128_i32x4_TYPE, I32_TYPE,
"llvm.experimental.vector.reduce.or.v4i32");
}

View File

@ -0,0 +1,29 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_BITMASK_EXTRACTS_H_
#define _SIMD_BITMASK_EXTRACTS_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_i8x16_bitmask(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_i16x8_bitmask(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_i32x4_bitmask(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_BITMASK_EXTRACTS_H_ */

View File

@ -0,0 +1,146 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_bitwise_ops.h"
#include "../aot_emit_exception.h"
#include "../../aot/aot_runtime.h"
static bool
v128_bitwise_two_component(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Bitwise bitwise_op)
{
LLVMValueRef vector1, vector2, result;
POP_V128(vector2);
POP_V128(vector1);
switch (bitwise_op) {
case V128_AND:
if (!(result = LLVMBuildAnd(comp_ctx->builder, vector1, vector2,
"and"))) {
HANDLE_FAILURE("LLVMBuildAnd");
goto fail;
}
break;
case V128_OR:
if (!(result =
LLVMBuildOr(comp_ctx->builder, vector1, vector2, "or"))) {
HANDLE_FAILURE("LLVMBuildAnd");
goto fail;
}
break;
case V128_XOR:
if (!(result = LLVMBuildXor(comp_ctx->builder, vector1, vector2,
"xor"))) {
HANDLE_FAILURE("LLVMBuildAnd");
goto fail;
}
break;
case V128_ANDNOT:
{
/* v128.and(a, v128.not(b)) */
if (!(vector2 = LLVMBuildNot(comp_ctx->builder, vector2, "not"))) {
HANDLE_FAILURE("LLVMBuildNot");
goto fail;
}
if (!(result = LLVMBuildAnd(comp_ctx->builder, vector1, vector2,
"and"))) {
HANDLE_FAILURE("LLVMBuildAnd");
goto fail;
}
break;
}
default:
bh_assert(0);
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
static bool
v128_bitwise_not(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef vector, result;
POP_V128(vector);
if (!(result = LLVMBuildNot(comp_ctx->builder, vector, "not"))) {
HANDLE_FAILURE("LLVMBuildNot");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
/* v128.or(v128.and(v1, c), v128.and(v2, v128.not(c))) */
static bool
v128_bitwise_bit_select(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef vector1, vector2, vector3, result;
POP_V128(vector3);
POP_V128(vector2);
POP_V128(vector1);
if (!(vector1 =
LLVMBuildAnd(comp_ctx->builder, vector1, vector3, "a_and_c"))) {
HANDLE_FAILURE("LLVMBuildAdd");
goto fail;
}
if (!(vector3 = LLVMBuildNot(comp_ctx->builder, vector3, "not_c"))) {
HANDLE_FAILURE("LLVMBuildNot");
goto fail;
}
if (!(vector2 =
LLVMBuildAnd(comp_ctx->builder, vector2, vector3, "b_and_c"))) {
HANDLE_FAILURE("LLVMBuildAdd");
goto fail;
}
if (!(result =
LLVMBuildOr(comp_ctx->builder, vector1, vector2, "a_or_b"))) {
HANDLE_FAILURE("LLVMBuildOr");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_v128_bitwise(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Bitwise bitwise_op)
{
switch (bitwise_op) {
case V128_AND:
case V128_OR:
case V128_XOR:
case V128_ANDNOT:
return v128_bitwise_two_component(comp_ctx, func_ctx, bitwise_op);
case V128_NOT:
return v128_bitwise_not(comp_ctx, func_ctx);
case V128_BITSELECT:
return v128_bitwise_bit_select(comp_ctx, func_ctx);
default:
bh_assert(0);
return false;
}
}

View File

@ -0,0 +1,24 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_BITWISE_OPS_H_
#define _SIMD_BITWISE_OPS_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_v128_bitwise(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Bitwise bitwise_op);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_BITWISE_OPS_H_ */

View File

@ -0,0 +1,183 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_bool_reductions.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../../aot/aot_runtime.h"
static bool
simd_any_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type,
LLVMTypeRef element_type,
const char *intrinsic)
{
LLVMValueRef vector, zeros, non_zero, result;
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec"))) {
goto fail;
}
if (!(zeros = LLVMConstNull(vector_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
/* icmp eq <N x iX> %vector, zeroinitialize */
if (!(non_zero = LLVMBuildICmp(comp_ctx->builder, LLVMIntNE, vector, zeros,
"non_zero"))) {
HANDLE_FAILURE("LLVMBuildICmp");
goto fail;
}
/* zext <N x i1> to <N x iX> */
if (!(non_zero = LLVMBuildZExt(comp_ctx->builder, non_zero, vector_type,
"non_zero_ex"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, element_type,
&vector_type, 1, non_zero))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
}
if (!(zeros = LLVMConstNull(element_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
if (!(result = LLVMBuildICmp(comp_ctx->builder, LLVMIntNE, result, zeros,
"gt_zero"))) {
HANDLE_FAILURE("LLVMBuildICmp");
goto fail;
}
if (!(result =
LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE, "ret"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
PUSH_I32(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_any_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_any_true(comp_ctx, func_ctx, V128_i8x16_TYPE, INT8_TYPE,
"llvm.experimental.vector.reduce.add.v16i8");
}
bool
aot_compile_simd_i16x8_any_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_any_true(comp_ctx, func_ctx, V128_i16x8_TYPE, INT16_TYPE,
"llvm.experimental.vector.reduce.add.v8i16");
}
bool
aot_compile_simd_i32x4_any_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_any_true(comp_ctx, func_ctx, V128_i32x4_TYPE, I32_TYPE,
"llvm.experimental.vector.reduce.add.v4i32");
}
static bool
simd_all_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type,
LLVMTypeRef element_type,
const char *intrinsic)
{
LLVMValueRef vector, zeros, is_zero, result;
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec"))) {
goto fail;
}
if (!(zeros = LLVMConstNull(vector_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
/* icmp eq <N x iX> %vector, zeroinitialize */
if (!(is_zero = LLVMBuildICmp(comp_ctx->builder, LLVMIntEQ, vector, zeros,
"is_zero"))) {
HANDLE_FAILURE("LLVMBuildICmp");
goto fail;
}
/* zext <N x i1> to <N x iX> */
if (!(is_zero = LLVMBuildZExt(comp_ctx->builder, is_zero, vector_type,
"is_zero_ex"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, element_type,
&vector_type, 1, is_zero))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
}
if (!(zeros = LLVMConstNull(element_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
if (!(result = LLVMBuildICmp(comp_ctx->builder, LLVMIntEQ, result, zeros,
"none"))) {
HANDLE_FAILURE("LLVMBuildICmp");
goto fail;
}
if (!(result =
LLVMBuildZExt(comp_ctx->builder, result, I32_TYPE, "ret"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
PUSH_I32(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_all_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_all_true(comp_ctx, func_ctx, V128_i8x16_TYPE, INT8_TYPE,
"llvm.experimental.vector.reduce.add.v16i8");
}
bool
aot_compile_simd_i16x8_all_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_all_true(comp_ctx, func_ctx, V128_i16x8_TYPE, INT16_TYPE,
"llvm.experimental.vector.reduce.add.v8i16");
}
bool
aot_compile_simd_i32x4_all_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_all_true(comp_ctx, func_ctx, V128_i32x4_TYPE, I32_TYPE,
"llvm.experimental.vector.reduce.add.v4i32");
}

View File

@ -0,0 +1,43 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_BOOL_REDUCTIONS_H_
#define _SIMD_BOOL_REDUCTIONS_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_i8x16_any_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
bool
aot_compile_simd_i16x8_any_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
bool
aot_compile_simd_i32x4_any_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
bool
aot_compile_simd_i8x16_all_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
bool
aot_compile_simd_i16x8_all_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
bool
aot_compile_simd_i32x4_all_true(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_BOOL_REDUCTIONS_H_ */

View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_common.h"
LLVMValueRef
simd_pop_v128_and_bitcast(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
LLVMTypeRef vec_type,
const char *name)
{
LLVMValueRef number;
POP_V128(number);
if (!(number =
LLVMBuildBitCast(comp_ctx->builder, number, vec_type, name))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
return number;
fail:
return NULL;
}
bool
simd_bitcast_and_push_v128(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
LLVMValueRef vector,
const char *name)
{
if (!(vector = LLVMBuildBitCast(comp_ctx->builder, vector, V128_i64x2_TYPE,
name))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(vector);
return true;
fail:
return false;
}

View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_COMMON_H_
#define _SIMD_COMMON_H_
#include "../aot_compiler.h"
LLVMValueRef
simd_pop_v128_and_bitcast(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
LLVMTypeRef vec_type,
const char *name);
bool
simd_bitcast_and_push_v128(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
LLVMValueRef vector,
const char *name);
#endif /* _SIMD_COMMON_H_ */

View File

@ -0,0 +1,231 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_comparisons.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../../aot/aot_runtime.h"
static bool
float_cond_2_predicate(FloatCond cond, LLVMRealPredicate *out)
{
switch (cond) {
case FLOAT_EQ:
*out = LLVMRealOEQ;
break;
case FLOAT_NE:
*out = LLVMRealUNE;
break;
case FLOAT_LT:
*out = LLVMRealOLT;
break;
case FLOAT_GT:
*out = LLVMRealOGT;
break;
case FLOAT_LE:
*out = LLVMRealOLE;
break;
case FLOAT_GE:
*out = LLVMRealOGE;
break;
default:
bh_assert(0);
goto fail;
}
return true;
fail:
return false;
}
static bool
int_cond_2_predicate(IntCond cond, LLVMIntPredicate *out)
{
switch (cond) {
case INT_EQZ:
case INT_EQ:
*out = LLVMIntEQ;
break;
case INT_NE:
*out = LLVMIntNE;
break;
case INT_LT_S:
*out = LLVMIntSLT;
break;
case INT_LT_U:
*out = LLVMIntULT;
break;
case INT_GT_S:
*out = LLVMIntSGT;
break;
case INT_GT_U:
*out = LLVMIntUGT;
break;
case INT_LE_S:
*out = LLVMIntSLE;
break;
case INT_LE_U:
*out = LLVMIntULE;
break;
case INT_GE_S:
*out = LLVMIntSGE;
break;
case INT_GE_U:
*out = LLVMIntUGE;
break;
default:
bh_assert(0);
goto fail;
}
return true;
fail:
return false;
}
static bool
interger_vector_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntCond cond,
LLVMTypeRef vector_type)
{
LLVMValueRef vec1, vec2, result;
LLVMIntPredicate int_pred;
if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec2"))) {
goto fail;
}
if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec1"))) {
goto fail;
}
if (!int_cond_2_predicate(cond, &int_pred)) {
HANDLE_FAILURE("int_cond_2_predicate");
goto fail;
}
/* icmp <N x iX> %vec1, %vec2 */
if (!(result =
LLVMBuildICmp(comp_ctx->builder, int_pred, vec1, vec2, "cmp"))) {
HANDLE_FAILURE("LLVMBuildICmp");
goto fail;
}
/* sext <N x i1> %result to <N x iX> */
if (!(result =
LLVMBuildSExt(comp_ctx->builder, result, vector_type, "ext"))) {
HANDLE_FAILURE("LLVMBuildSExt");
goto fail;
}
/* bitcast <N x iX> %result to <2 x i64> */
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"result"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntCond cond)
{
return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i8x16_TYPE);
}
bool
aot_compile_simd_i16x8_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntCond cond)
{
return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i16x8_TYPE);
}
bool
aot_compile_simd_i32x4_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntCond cond)
{
return interger_vector_compare(comp_ctx, func_ctx, cond, V128_i32x4_TYPE);
}
static bool
float_vector_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatCond cond,
LLVMTypeRef vector_type,
LLVMTypeRef result_type)
{
LLVMValueRef vec1, vec2, result;
LLVMRealPredicate real_pred;
if (!(vec2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec2"))) {
goto fail;
}
if (!(vec1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec1"))) {
goto fail;
}
if (!float_cond_2_predicate(cond, &real_pred)) {
HANDLE_FAILURE("float_cond_2_predicate");
goto fail;
}
/* fcmp <N x iX> %vec1, %vec2 */
if (!(result =
LLVMBuildFCmp(comp_ctx->builder, real_pred, vec1, vec2, "cmp"))) {
HANDLE_FAILURE("LLVMBuildFCmp");
goto fail;
}
/* sext <N x i1> %result to <N x iX> */
if (!(result =
LLVMBuildSExt(comp_ctx->builder, result, result_type, "ext"))) {
HANDLE_FAILURE("LLVMBuildSExt");
goto fail;
}
/* bitcast <N x iX> %result to <2 x i64> */
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"result"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_f32x4_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatCond cond)
{
return float_vector_compare(comp_ctx, func_ctx, cond, V128_f32x4_TYPE,
V128_i32x4_TYPE);
}
bool
aot_compile_simd_f64x2_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatCond cond)
{
return float_vector_compare(comp_ctx, func_ctx, cond, V128_f64x2_TYPE,
V128_i64x2_TYPE);
}

View File

@ -0,0 +1,44 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_COMPARISONS_H_
#define _SIMD_COMPARISONS_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_i8x16_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntCond cond);
bool
aot_compile_simd_i16x8_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntCond cond);
bool
aot_compile_simd_i32x4_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
IntCond cond);
bool
aot_compile_simd_f32x4_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatCond cond);
bool
aot_compile_simd_f64x2_compare(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatCond cond);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_COMPARISONS_H_ */

View File

@ -0,0 +1,190 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_construct_values.h"
#include "../aot_emit_exception.h"
#include "../interpreter/wasm_opcode.h"
#include "../../aot/aot_runtime.h"
bool
aot_compile_simd_v128_const(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
const uint8 *imm_bytes)
{
uint64 imm1, imm2;
LLVMValueRef undef, first_long, agg1, second_long, agg2;
wasm_runtime_read_v128(imm_bytes, &imm1, &imm2);
if (!(undef = LLVMGetUndef(V128_i64x2_TYPE))) {
HANDLE_FAILURE("LLVMGetUndef");
goto fail;
}
/* %agg1 = insertelement <2 x i64> undef, i16 0, i64 ${*imm} */
if (!(first_long = I64_CONST(imm1))) {
HANDLE_FAILURE("LLVMConstInt");
goto fail;
}
if (!(agg1 = LLVMBuildInsertElement(comp_ctx->builder, undef, first_long,
I32_ZERO, "agg1"))) {
HANDLE_FAILURE("LLVMBuildInsertElement");
goto fail;
}
/* %agg2 = insertelement <2 x i64> %agg1, i16 1, i64 ${*(imm + 1)} */
if (!(second_long = I64_CONST(imm2))) {
HANDLE_FAILURE("LLVMGetUndef");
goto fail;
}
if (!(agg2 = LLVMBuildInsertElement(comp_ctx->builder, agg1, second_long,
I32_ONE, "agg2"))) {
HANDLE_FAILURE("LLVMBuildInsertElement");
goto fail;
}
PUSH_V128(agg2);
return true;
fail:
return false;
}
bool
aot_compile_simd_splat(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 splat_opcode)
{
LLVMValueRef value, undef, base, mask, new_vector, result;
LLVMTypeRef all_zero_ty;
switch (splat_opcode) {
case SIMD_i8x16_splat:
{
LLVMValueRef input;
POP_I32(input);
/* trunc i32 %input to i8 */
if (!(value = LLVMBuildTrunc(comp_ctx->builder, input, INT8_TYPE,
"trunc"))) {
HANDLE_FAILURE("LLVMBuildTrunc");
goto fail;
}
undef = LLVMGetUndef(V128_i8x16_TYPE);
if (!(all_zero_ty = LLVMVectorType(I32_TYPE, 16))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
case SIMD_i16x8_splat:
{
LLVMValueRef input;
POP_I32(input);
/* trunc i32 %input to i16 */
if (!(value = LLVMBuildTrunc(comp_ctx->builder, input, INT16_TYPE,
"trunc"))) {
HANDLE_FAILURE("LLVMBuildTrunc");
goto fail;
}
undef = LLVMGetUndef(V128_i16x8_TYPE);
if (!(all_zero_ty = LLVMVectorType(I32_TYPE, 8))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
case SIMD_i32x4_splat:
{
POP_I32(value);
undef = LLVMGetUndef(V128_i32x4_TYPE);
if (!(all_zero_ty = LLVMVectorType(I32_TYPE, 4))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
case SIMD_i64x2_splat:
{
POP(value, VALUE_TYPE_I64);
undef = LLVMGetUndef(V128_i64x2_TYPE);
if (!(all_zero_ty = LLVMVectorType(I32_TYPE, 2))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
case SIMD_f32x4_splat:
{
POP(value, VALUE_TYPE_F32);
undef = LLVMGetUndef(V128_f32x4_TYPE);
if (!(all_zero_ty = LLVMVectorType(I32_TYPE, 4))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
case SIMD_f64x2_splat:
{
POP(value, VALUE_TYPE_F64);
undef = LLVMGetUndef(V128_f64x2_TYPE);
if (!(all_zero_ty = LLVMVectorType(I32_TYPE, 2))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
if (!undef) {
HANDLE_FAILURE("LVMGetUndef");
goto fail;
}
/* insertelement <n x ty> undef, ty %value, i32 0 */
if (!(base = LLVMBuildInsertElement(comp_ctx->builder, undef, value,
I32_ZERO, "base"))) {
HANDLE_FAILURE("LLVMBuildInsertElement");
goto fail;
}
/* <n x i32> zeroinitializer */
if (!(mask = LLVMConstNull(all_zero_ty))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
/* shufflevector <ty1> %base, <ty2> undef, <n x i32> zeroinitializer */
if (!(new_vector = LLVMBuildShuffleVector(comp_ctx->builder, base, undef,
mask, "new_vector"))) {
HANDLE_FAILURE("LLVMBuildShuffleVector");
goto fail;
}
/* bitcast <ty> <value> to <2 x i64> */
if (!(result = LLVMBuildBitCast(comp_ctx->builder, new_vector,
V128_i64x2_TYPE, "ret"))) {
HANDLE_FAILURE("LLVMBuidlCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}

View File

@ -0,0 +1,29 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_CONSTRUCT_VALUES_H_
#define _SIMD_CONSTRUCT_VALUES_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_v128_const(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
const uint8 *imm_bytes);
bool
aot_compile_simd_splat(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 splat_opcode);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_CONSTRUCT_VALUES_H_ */

View File

@ -0,0 +1,422 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_conversions.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../aot_emit_numberic.h"
#include "../../aot/aot_runtime.h"
static bool
simd_integer_narrow(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed,
LLVMTypeRef in_vector_type,
LLVMTypeRef out_vector_type,
const char *instrinsic)
{
LLVMValueRef vector1, vector2, result;
LLVMTypeRef param_types[2] = { in_vector_type, in_vector_type };
if (!(vector2 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
in_vector_type, "vec2"))) {
goto fail;
}
if (!(vector1 = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
in_vector_type, "vec1"))) {
goto fail;
}
if (!(result =
aot_call_llvm_intrinsic(comp_ctx, instrinsic, out_vector_type,
param_types, 2, vector1, vector2))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_narrow_i16x8(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed)
{
return simd_integer_narrow(
comp_ctx, func_ctx, is_signed, V128_i16x8_TYPE, V128_i8x16_TYPE,
is_signed ? "llvm.x86.sse2.packsswb.128" : "llvm.x86.sse2.packuswb.128");
}
bool
aot_compile_simd_i16x8_narrow_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed)
{
return simd_integer_narrow(
comp_ctx, func_ctx, is_signed, V128_i32x4_TYPE, V128_i16x8_TYPE,
is_signed ? "llvm.x86.sse2.packssdw.128" : "llvm.x86.sse41.packusdw");
}
bool
aot_compile_simd_i16x8_widen_i8x16(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_low_half,
bool is_signed)
{
LLVMValueRef vector, undef, mask_high[8], mask_low[8], mask, shuffled,
result;
uint8 mask_high_value[8] = { 0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf },
mask_low_value[8] = { 0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7 }, i;
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
V128_i8x16_TYPE, "vec"))) {
goto fail;
}
if (!(undef = LLVMGetUndef(V128_i8x16_TYPE))) {
HANDLE_FAILURE("LLVMGetUndef");
goto fail;
}
/* create a mask */
for (i = 0; i < 8; i++) {
mask_high[i] = LLVMConstInt(I32_TYPE, mask_high_value[i], true);
mask_low[i] = LLVMConstInt(I32_TYPE, mask_low_value[i], true);
}
mask = is_low_half ? LLVMConstVector(mask_low, 8)
: LLVMConstVector(mask_high, 8);
if (!mask) {
HANDLE_FAILURE("LLVMConstVector");
goto fail;
}
/* retrive the low or high half */
if (!(shuffled = LLVMBuildShuffleVector(comp_ctx->builder, vector, undef,
mask, "shuffled"))) {
HANDLE_FAILURE("LLVMBuildShuffleVector");
goto fail;
}
if (is_signed) {
if (!(result = LLVMBuildSExt(comp_ctx->builder, shuffled,
V128_i16x8_TYPE, "ext"))) {
HANDLE_FAILURE("LLVMBuildSExt");
goto fail;
}
}
else {
if (!(result = LLVMBuildZExt(comp_ctx->builder, shuffled,
V128_i16x8_TYPE, "ext"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i32x4_widen_i16x8(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_low_half,
bool is_signed)
{
LLVMValueRef vector, undef, mask_high[4], mask_low[4], mask, shuffled,
result;
uint8 mask_high_value[4] = { 0x4, 0x5, 0x6, 0x7 },
mask_low_value[4] = { 0x0, 0x1, 0x2, 0x3 }, i;
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
V128_i16x8_TYPE, "vec"))) {
goto fail;
}
if (!(undef = LLVMGetUndef(V128_i16x8_TYPE))) {
HANDLE_FAILURE("LLVMGetUndef");
goto fail;
}
/* create a mask */
for (i = 0; i < 4; i++) {
mask_high[i] = LLVMConstInt(I32_TYPE, mask_high_value[i], true);
mask_low[i] = LLVMConstInt(I32_TYPE, mask_low_value[i], true);
}
mask = is_low_half ? LLVMConstVector(mask_low, 4)
: LLVMConstVector(mask_high, 4);
if (!mask) {
HANDLE_FAILURE("LLVMConstVector");
goto fail;
}
/* retrive the low or high half */
if (!(shuffled = LLVMBuildShuffleVector(comp_ctx->builder, vector, undef,
mask, "shuffled"))) {
HANDLE_FAILURE("LLVMBuildShuffleVector");
goto fail;
}
if (is_signed) {
if (!(result = LLVMBuildSExt(comp_ctx->builder, shuffled,
V128_i32x4_TYPE, "ext"))) {
HANDLE_FAILURE("LLVMBuildSExt");
goto fail;
}
}
else {
if (!(result = LLVMBuildZExt(comp_ctx->builder, shuffled,
V128_i32x4_TYPE, "ext"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
static LLVMValueRef
simd_build_const_f32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
float f)
{
LLVMValueRef elements[4], vector;
if (!(elements[0] = LLVMConstReal(F32_TYPE, f))) {
HANDLE_FAILURE("LLVMConstInt");
goto fail;
}
elements[1] = elements[2] = elements[3] = elements[0];
if (!(vector = LLVMConstVector(elements, 4))) {
HANDLE_FAILURE("LLVMConstVector");
goto fail;
}
return vector;
fail:
return NULL;
}
static LLVMValueRef
simd_build_const_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint64 integer,
bool is_signed)
{
LLVMValueRef elements[4], vector;
if (!(elements[0] = LLVMConstInt(I32_TYPE, integer, is_signed))) {
HANDLE_FAILURE("LLVMConstInt");
goto fail;
}
elements[1] = elements[2] = elements[3] = elements[0];
if (!(vector = LLVMConstVector(elements, 4))) {
HANDLE_FAILURE("LLVMConstVector");
goto fail;
}
return vector;
fail:
return NULL;
}
bool
aot_compile_simd_i32x4_trunc_sat_f32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed)
{
LLVMValueRef vector, zeros, is_nan, max_float_v, min_float_v, is_ge_max,
is_le_min, result, max_int_v, min_int_v;
uint32 max_ui = 0xFFffFFff, min_ui = 0x0;
int32 max_si = 0x7FFFffff, min_si = 0x80000000;
float max_f_ui = 4294967296.0f, min_f_ui = 0.0f, max_f_si = 2147483647.0f,
min_f_si = -2147483648.0f;
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
V128_f32x4_TYPE, "vec"))) {
goto fail;
}
if (!(zeros = LLVMConstNull(V128_f32x4_TYPE))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
if (is_signed) {
if (!(max_float_v =
simd_build_const_f32x4(comp_ctx, func_ctx, max_f_si))) {
goto fail;
}
if (!(min_float_v =
simd_build_const_f32x4(comp_ctx, func_ctx, min_f_si))) {
goto fail;
}
if (!(max_int_v =
simd_build_const_i32x4(comp_ctx, func_ctx, max_si, true))) {
goto fail;
}
if (!(min_int_v =
simd_build_const_i32x4(comp_ctx, func_ctx, min_si, true))) {
goto fail;
}
}
else {
if (!(max_float_v =
simd_build_const_f32x4(comp_ctx, func_ctx, max_f_ui))) {
goto fail;
}
if (!(min_float_v =
simd_build_const_f32x4(comp_ctx, func_ctx, min_f_ui))) {
goto fail;
}
if (!(max_int_v =
simd_build_const_i32x4(comp_ctx, func_ctx, max_ui, false))) {
goto fail;
}
if (!(min_int_v =
simd_build_const_i32x4(comp_ctx, func_ctx, min_ui, false))) {
goto fail;
}
}
if (!(is_nan = LLVMBuildFCmp(comp_ctx->builder, LLVMRealORD, vector, zeros,
"is_nan"))) {
HANDLE_FAILURE("LLVMBuildFCmp");
goto fail;
}
if (!(is_le_min = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOLE, vector,
min_float_v, "le_min"))) {
HANDLE_FAILURE("LLVMBuildFCmp");
goto fail;
}
if (!(is_ge_max = LLVMBuildFCmp(comp_ctx->builder, LLVMRealOGE, vector,
max_float_v, "ge_max"))) {
HANDLE_FAILURE("LLVMBuildFCmp");
goto fail;
}
if (is_signed) {
if (!(result = LLVMBuildFPToSI(comp_ctx->builder, vector,
V128_i32x4_TYPE, "truncated"))) {
HANDLE_FAILURE("LLVMBuildSIToFP");
goto fail;
}
}
else {
if (!(result = LLVMBuildFPToUI(comp_ctx->builder, vector,
V128_i32x4_TYPE, "truncated"))) {
HANDLE_FAILURE("LLVMBuildUIToFP");
goto fail;
}
}
if (!(result = LLVMBuildSelect(comp_ctx->builder, is_ge_max, max_int_v,
result, "sat_w_max"))) {
HANDLE_FAILURE("LLVMBuildSelect");
goto fail;
}
if (!(result = LLVMBuildSelect(comp_ctx->builder, is_le_min, min_int_v,
result, "sat_w_min"))) {
HANDLE_FAILURE("LLVMBuildSelect");
goto fail;
}
if (!(result = LLVMBuildSelect(comp_ctx->builder, is_nan, result,
V128_i32x4_ZERO, "sat_w_nan"))) {
HANDLE_FAILURE("LLVMBuildSelect");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_f32x4_convert_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed)
{
LLVMValueRef vector, result;
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
V128_i32x4_TYPE, "vec"))) {
goto fail;
}
if (is_signed) {
if (!(result = LLVMBuildSIToFP(comp_ctx->builder, vector,
V128_f32x4_TYPE, "converted"))) {
HANDLE_FAILURE("LLVMBuildSIToFP");
goto fail;
}
}
else {
if (!(result = LLVMBuildUIToFP(comp_ctx->builder, vector,
V128_f32x4_TYPE, "converted"))) {
HANDLE_FAILURE("LLVMBuildSIToFP");
goto fail;
}
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}

View File

@ -0,0 +1,51 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_CONVERSIONS_H_
#define _SIMD_CONVERSIONS_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_i8x16_narrow_i16x8(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed);
bool
aot_compile_simd_i16x8_narrow_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed);
bool
aot_compile_simd_i16x8_widen_i8x16(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_low,
bool is_signed);
bool
aot_compile_simd_i32x4_widen_i16x8(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_low,
bool is_signed);
bool
aot_compile_simd_i32x4_trunc_sat_f32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed);
bool
aot_compile_simd_f32x4_convert_i32x4(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_signed);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_CONVERSIONS_H_ */

View File

@ -0,0 +1,273 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_floating_point.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../aot_emit_numberic.h"
#include "../../aot/aot_runtime.h"
static LLVMValueRef
simd_v128_float_cmp(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatArithmetic arith_op,
LLVMValueRef lhs,
LLVMValueRef rhs)
{
LLVMValueRef result;
LLVMRealPredicate op;
op = FLOAT_MIN == arith_op ? LLVMRealULT : LLVMRealUGT;
if (!(result = LLVMBuildFCmp(comp_ctx->builder, op, lhs, rhs, "cmp"))) {
HANDLE_FAILURE("LLVMBuildFCmp");
goto fail;
}
if (!(result =
LLVMBuildSelect(comp_ctx->builder, result, lhs, rhs, "select"))) {
HANDLE_FAILURE("LLVMBuildSelect");
goto fail;
}
return result;
fail:
return NULL;
}
static bool
simd_v128_float_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatArithmetic arith_op,
LLVMTypeRef vector_type)
{
LLVMValueRef lhs, rhs, result;
if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"rhs"))) {
goto fail;
}
if (!(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"lhs"))) {
goto fail;
}
switch (arith_op) {
case FLOAT_ADD:
if (!(result =
LLVMBuildFAdd(comp_ctx->builder, lhs, rhs, "sum"))) {
HANDLE_FAILURE("LLVMBuildFAdd");
goto fail;
}
break;
case FLOAT_SUB:
if (!(result = LLVMBuildFSub(comp_ctx->builder, lhs, rhs,
"difference"))) {
HANDLE_FAILURE("LLVMBuildFSub");
goto fail;
}
break;
case FLOAT_MUL:
if (!(result =
LLVMBuildFMul(comp_ctx->builder, lhs, rhs, "product"))) {
HANDLE_FAILURE("LLVMBuildFMul");
goto fail;
}
break;
case FLOAT_DIV:
if (!(result =
LLVMBuildFDiv(comp_ctx->builder, lhs, rhs, "quotient"))) {
HANDLE_FAILURE("LLVMBuildFDiv");
goto fail;
}
break;
case FLOAT_MIN:
if (!(result = simd_v128_float_cmp(comp_ctx, func_ctx, FLOAT_MIN,
lhs, rhs))) {
goto fail;
}
break;
case FLOAT_MAX:
if (!(result = simd_v128_float_cmp(comp_ctx, func_ctx, FLOAT_MAX,
lhs, rhs))) {
goto fail;
}
break;
default:
result = NULL;
bh_assert(0);
break;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_f32x4_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatArithmetic arith_op)
{
return simd_v128_float_arith(comp_ctx, func_ctx, arith_op,
V128_f32x4_TYPE);
}
bool
aot_compile_simd_f64x2_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatArithmetic arith_op)
{
return simd_v128_float_arith(comp_ctx, func_ctx, arith_op,
V128_f64x2_TYPE);
}
static bool
simd_v128_float_neg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type)
{
LLVMValueRef number, result;
if (!(number = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"number"))) {
goto fail;
}
if (!(result = LLVMBuildFNeg(comp_ctx->builder, number, "neg"))) {
HANDLE_FAILURE("LLVMBuildFNeg");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_f32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_float_neg(comp_ctx, func_ctx, V128_f32x4_TYPE);
}
bool
aot_compile_simd_f64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_float_neg(comp_ctx, func_ctx, V128_f64x2_TYPE);
}
static bool
simd_v128_float_abs(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type,
const char *intrinsic)
{
LLVMValueRef vector, result;
LLVMTypeRef param_types[1] = { vector_type };
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec"))) {
goto fail;
}
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, vector_type,
param_types, 1, vector))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_f32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_float_abs(comp_ctx, func_ctx, V128_f32x4_TYPE,
"llvm.fabs.v4f32");
}
bool
aot_compile_simd_f64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_float_abs(comp_ctx, func_ctx, V128_f64x2_TYPE,
"llvm.fabs.v2f64");
}
static bool
simd_v128_float_sqrt(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type,
const char *intrinsic)
{
LLVMValueRef number, result;
LLVMTypeRef param_types[1] = { vector_type };
if (!(number = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"number"))) {
goto fail;
}
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, vector_type,
param_types, 1, number))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_f32x4_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_float_sqrt(comp_ctx, func_ctx, V128_f32x4_TYPE,
"llvm.sqrt.v4f32");
}
bool
aot_compile_simd_f64x2_sqrt(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_float_sqrt(comp_ctx, func_ctx, V128_f64x2_TYPE,
"llvm.sqrt.v2f64");
}

View File

@ -0,0 +1,49 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_FLOATING_POINT_H_
#define _SIMD_FLOATING_POINT_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_f32x4_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatArithmetic arith_op);
bool
aot_compile_simd_f64x2_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
FloatArithmetic arith_op);
bool
aot_compile_simd_f32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_f64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_f32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_f64x2_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_f32x4_sqrt(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
bool
aot_compile_simd_f64x2_sqrt(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_FLOATING_POINT_H_ */

View File

@ -0,0 +1,207 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_int_arith.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../../aot/aot_runtime.h"
static bool
simd_v128_integer_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
LLVMValueRef lhs,
LLVMValueRef rhs)
{
LLVMValueRef result;
switch (arith_op) {
case V128_ADD:
if (!(result = LLVMBuildAdd(comp_ctx->builder, lhs, rhs, "sum"))) {
HANDLE_FAILURE("LLVMBuildAdd");
goto fail;
}
break;
case V128_SUB:
if (!(result =
LLVMBuildSub(comp_ctx->builder, lhs, rhs, "difference"))) {
HANDLE_FAILURE("LLVMBuildSub");
goto fail;
}
break;
case V128_MUL:
if (!(result =
LLVMBuildMul(comp_ctx->builder, lhs, rhs, "product"))) {
HANDLE_FAILURE("LLVMBuildMul");
goto fail;
}
break;
case V128_NEG:
if (!(result = LLVMBuildNeg(comp_ctx->builder, lhs, "neg"))) {
HANDLE_FAILURE("LLVMBuildNeg");
goto fail;
}
break;
default:
result = NULL;
bh_assert(0);
break;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op)
{
LLVMValueRef lhs, rhs;
if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
"rhs"))) {
goto fail;
}
if (!(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i8x16_TYPE,
"lhs"))) {
goto fail;
}
return simd_v128_integer_arith(comp_ctx, func_ctx, arith_op, lhs, rhs);
fail:
return NULL;
}
bool
aot_compile_simd_i16x8_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op)
{
LLVMValueRef lhs, rhs;
if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i16x8_TYPE,
"rhs"))) {
goto fail;
}
if (!(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i16x8_TYPE,
"lhs"))) {
goto fail;
}
return simd_v128_integer_arith(comp_ctx, func_ctx, arith_op, lhs, rhs);
fail:
return NULL;
}
bool
aot_compile_simd_i32x4_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op)
{
LLVMValueRef lhs, rhs;
if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i32x4_TYPE,
"rhs"))) {
goto fail;
}
if (!(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, V128_i32x4_TYPE,
"lhs"))) {
goto fail;
}
return simd_v128_integer_arith(comp_ctx, func_ctx, arith_op, lhs, rhs);
fail:
return NULL;
}
bool
aot_compile_simd_i64x2_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op)
{
LLVMValueRef lhs, rhs;
POP_V128(rhs);
POP_V128(lhs);
return simd_v128_integer_arith(comp_ctx, func_ctx, arith_op, lhs, rhs);
fail:
return false;
}
bool
aot_compile_simd_i8x16_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef number;
if (!(number = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
V128_i8x16_TYPE, "number"))) {
goto fail;
}
return simd_v128_integer_arith(comp_ctx, func_ctx, V128_NEG, number, NULL);
fail:
return false;
}
bool
aot_compile_simd_i16x8_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef number;
if (!(number = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
V128_i16x8_TYPE, "number"))) {
goto fail;
}
return simd_v128_integer_arith(comp_ctx, func_ctx, V128_NEG, number, NULL);
fail:
return false;
}
bool
aot_compile_simd_i32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef number;
if (!(number = simd_pop_v128_and_bitcast(comp_ctx, func_ctx,
V128_i32x4_TYPE, "number"))) {
goto fail;
}
return simd_v128_integer_arith(comp_ctx, func_ctx, V128_NEG, number, NULL);
fail:
return false;
}
bool
aot_compile_simd_i64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef number;
POP_V128(number);
return simd_v128_integer_arith(comp_ctx, func_ctx, V128_NEG, number, NULL);
fail:
return false;
}

View File

@ -0,0 +1,51 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_INT_ARITH_H_
#define _SIMD_INT_ARITH_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_i8x16_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic cond);
bool
aot_compile_simd_i16x8_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic cond);
bool
aot_compile_simd_i32x4_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic cond);
bool
aot_compile_simd_i64x2_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic cond);
bool
aot_compile_simd_i8x16_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_i16x8_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_i32x4_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_i64x2_neg(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_INT_ARITH_H_ */

View File

@ -0,0 +1,301 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_load_store.h"
#include "../aot_emit_exception.h"
#include "../aot_emit_memory.h"
#include "../../aot/aot_runtime.h"
#include "../../interpreter/wasm_opcode.h"
/* data_length in bytes */
static LLVMValueRef
simd_load(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align,
uint32 offset,
uint32 data_length,
LLVMTypeRef ptr_type)
{
LLVMValueRef maddr, data;
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset,
data_length))) {
HANDLE_FAILURE("aot_check_memory_overflow");
goto fail;
}
if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, ptr_type,
"data_ptr"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
if (!(data = LLVMBuildLoad(comp_ctx->builder, maddr, "data"))) {
HANDLE_FAILURE("LLVMBuildLoad");
goto fail;
}
LLVMSetAlignment(data, 1);
return data;
fail:
return NULL;
}
/* data_length in bytes */
static LLVMValueRef
simd_splat(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMValueRef element,
LLVMTypeRef vectory_type,
unsigned lane_count)
{
LLVMValueRef undef, zeros, vector;
LLVMTypeRef zeros_type;
if (!(undef = LLVMGetUndef(vectory_type))) {
HANDLE_FAILURE("LLVMGetUndef");
goto fail;
}
if (!(zeros_type = LLVMVectorType(I32_TYPE, lane_count))) {
HANDLE_FAILURE("LVMVectorType");
goto fail;
}
if (!(zeros = LLVMConstNull(zeros_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
if (!(vector = LLVMBuildInsertElement(comp_ctx->builder, undef, element,
I32_ZERO, "base"))) {
HANDLE_FAILURE("LLVMBuildInsertElement");
goto fail;
}
if (!(vector = LLVMBuildShuffleVector(comp_ctx->builder, vector, undef,
zeros, "vector"))) {
HANDLE_FAILURE("LLVMBuildShuffleVector");
goto fail;
}
return vector;
fail:
return NULL;
}
bool
aot_compile_simd_v128_load(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align,
uint32 offset)
{
LLVMValueRef result;
if (!(result =
simd_load(comp_ctx, func_ctx, align, offset, 16, V128_PTR_TYPE))) {
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_v128_store(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align,
uint32 offset)
{
LLVMValueRef maddr, value, result;
POP_V128(value);
if (!(maddr = aot_check_memory_overflow(comp_ctx, func_ctx, offset, 16)))
return false;
if (!(maddr = LLVMBuildBitCast(comp_ctx->builder, maddr, V128_PTR_TYPE,
"data_ptr"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
if (!(result = LLVMBuildStore(comp_ctx->builder, value, maddr))) {
HANDLE_FAILURE("LLVMBuildStore");
goto fail;
}
LLVMSetAlignment(result, 1);
return true;
fail:
return false;
}
bool
aot_compile_simd_load_extend(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 load_opcode,
uint32 align,
uint32 offset)
{
LLVMValueRef sub_vector, result;
LLVMTypeRef sub_vector_type, vector_type;
bool is_signed;
uint32 data_length;
switch (load_opcode) {
case SIMD_i16x8_load8x8_s:
case SIMD_i16x8_load8x8_u:
{
data_length = 8;
vector_type = V128_i16x8_TYPE;
is_signed = (load_opcode == SIMD_i16x8_load8x8_s);
if (!(sub_vector_type = LLVMVectorType(INT8_TYPE, 8))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
case SIMD_i32x4_load16x4_s:
case SIMD_i32x4_load16x4_u:
{
data_length = 8;
vector_type = V128_i32x4_TYPE;
is_signed = (load_opcode == SIMD_i32x4_load16x4_s);
if (!(sub_vector_type = LLVMVectorType(INT16_TYPE, 4))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
case SIMD_i64x2_load32x2_s:
case SIMD_i64x2_load32x2_u:
{
data_length = 8;
vector_type = V128_i64x2_TYPE;
is_signed = (load_opcode == SIMD_i64x2_load32x2_s);
if (!(sub_vector_type = LLVMVectorType(I32_TYPE, 2))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
/* to vector ptr type */
if (!(sub_vector_type = LLVMPointerType(sub_vector_type, 0))) {
HANDLE_FAILURE("LLVMPointerType");
goto fail;
}
if (!(sub_vector = simd_load(comp_ctx, func_ctx, align, offset,
data_length, sub_vector_type))) {
goto fail;
}
if (is_signed) {
if (!(result = LLVMBuildSExt(comp_ctx->builder, sub_vector,
vector_type, "vector"))) {
HANDLE_FAILURE("LLVMBuildSExt");
goto fail;
}
}
else {
if (!(result = LLVMBuildZExt(comp_ctx->builder, sub_vector,
vector_type, "vector"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"result"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_load_splat(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 load_opcode,
uint32 align,
uint32 offset)
{
LLVMValueRef element, result;
LLVMTypeRef element_ptr_type, vector_type;
unsigned data_length, lane_count;
switch (load_opcode) {
case SIMD_v8x16_load_splat:
data_length = 1;
lane_count = 16;
element_ptr_type = INT8_PTR_TYPE;
vector_type = V128_i8x16_TYPE;
break;
case SIMD_v16x8_load_splat:
data_length = 2;
lane_count = 8;
element_ptr_type = INT16_PTR_TYPE;
vector_type = V128_i16x8_TYPE;
break;
case SIMD_v32x4_load_splat:
data_length = 4;
lane_count = 4;
element_ptr_type = INT32_PTR_TYPE;
vector_type = V128_i32x4_TYPE;
break;
case SIMD_v64x2_load_splat:
data_length = 8;
lane_count = 2;
element_ptr_type = INT64_PTR_TYPE;
vector_type = V128_i64x2_TYPE;
break;
default:
bh_assert(0);
goto fail;
}
if (!(element = simd_load(comp_ctx, func_ctx, align, offset, data_length,
element_ptr_type))) {
goto fail;
}
if (!(result = simd_splat(comp_ctx, func_ctx, element, vector_type,
lane_count))) {
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"result"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
PUSH_V128(result);
return true;
fail:
return false;
}

View File

@ -0,0 +1,45 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_LOAD_STORE_H_
#define _SIMD_LOAD_STORE_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_v128_load(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align,
uint32 offset);
bool
aot_compile_simd_v128_store(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint32 align,
uint32 offset);
bool
aot_compile_simd_load_extend(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 load_opcode,
uint32 align,
uint32 offset);
bool
aot_compile_simd_load_splat(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
uint8 load_opcode,
uint32 align,
uint32 offset);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_LOAD_STORE_H_ */

View File

@ -0,0 +1,367 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "simd_sat_int_arith.h"
#include "simd_common.h"
#include "../aot_emit_exception.h"
#include "../../aot/aot_runtime.h"
static bool
simd_v128_integer_arith(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type,
char *intrinsics_s_u[2],
bool is_signed)
{
LLVMValueRef lhs, rhs, result;
LLVMTypeRef param_types[2];
if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"rhs"))) {
goto fail;
}
if (!(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"lhs"))) {
goto fail;
}
param_types[0] = vector_type;
param_types[1] = vector_type;
if (!(result = aot_call_llvm_intrinsic(
comp_ctx, is_signed ? intrinsics_s_u[0] : intrinsics_s_u[1],
vector_type, param_types, 2, lhs, rhs))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_saturate(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed)
{
char *intrinsics[2] = { 0 };
bool result = false;
switch (arith_op) {
case V128_ADD:
intrinsics[0] = "llvm.sadd.sat.v16i8";
intrinsics[1] = "llvm.uadd.sat.v16i8";
result = simd_v128_integer_arith(
comp_ctx, func_ctx, V128_i8x16_TYPE, intrinsics, is_signed);
break;
case V128_SUB:
intrinsics[0] = "llvm.ssub.sat.v16i8";
intrinsics[1] = "llvm.usub.sat.v16i8";
result = simd_v128_integer_arith(
comp_ctx, func_ctx, V128_i8x16_TYPE, intrinsics, is_signed);
break;
default:
bh_assert(0);
break;
}
return result;
}
bool
aot_compile_simd_i16x8_saturate(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed)
{
char *intrinsics[2] = { 0 };
bool result = false;
switch (arith_op) {
case V128_ADD:
intrinsics[0] = "llvm.sadd.sat.v8i16";
intrinsics[1] = "llvm.uadd.sat.v8i16";
result = simd_v128_integer_arith(
comp_ctx, func_ctx, V128_i16x8_TYPE, intrinsics, is_signed);
break;
case V128_SUB:
intrinsics[0] = "llvm.ssub.sat.v8i16";
intrinsics[1] = "llvm.usub.sat.v8i16";
result = simd_v128_integer_arith(
comp_ctx, func_ctx, V128_i16x8_TYPE, intrinsics, is_signed);
break;
default:
bh_assert(0);
break;
}
return result;
}
static bool
simd_v128_cmp(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type,
V128Arithmetic arith_op,
bool is_signed)
{
LLVMValueRef lhs, rhs, result;
LLVMIntPredicate op;
if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"rhs"))) {
goto fail;
}
if (!(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"lhs"))) {
goto fail;
}
if (V128_MIN == arith_op) {
op = is_signed ? LLVMIntSLT : LLVMIntULT;
}
else {
op = is_signed ? LLVMIntSGT : LLVMIntUGT;
}
if (!(result = LLVMBuildICmp(comp_ctx->builder, op, lhs, rhs, "cmp"))) {
HANDLE_FAILURE("LLVMBuildICmp");
goto fail;
}
if (!(result =
LLVMBuildSelect(comp_ctx->builder, result, lhs, rhs, "select"))) {
HANDLE_FAILURE("LLVMBuildSelect");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_cmp(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed)
{
return simd_v128_cmp(comp_ctx, func_ctx, V128_i8x16_TYPE, arith_op,
is_signed);
}
bool
aot_compile_simd_i16x8_cmp(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed)
{
return simd_v128_cmp(comp_ctx, func_ctx, V128_i16x8_TYPE, arith_op,
is_signed);
}
bool
aot_compile_simd_i32x4_cmp(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed)
{
return simd_v128_cmp(comp_ctx, func_ctx, V128_i32x4_TYPE, arith_op,
is_signed);
}
static bool
simd_v128_abs(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type)
{
LLVMValueRef vector, negs, zeros, cond, result;
if (!(vector = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"vec"))) {
goto fail;
}
if (!(negs = LLVMBuildNeg(comp_ctx->builder, vector, "neg"))) {
HANDLE_FAILURE("LLVMBuildNeg");
goto fail;
}
if (!(zeros = LLVMConstNull(vector_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
if (!(cond = LLVMBuildICmp(comp_ctx->builder, LLVMIntSGE, vector, zeros,
"ge_zero"))) {
HANDLE_FAILURE("LLVMBuildICmp");
goto fail;
}
if (!(result = LLVMBuildSelect(comp_ctx->builder, cond, vector, negs,
"select"))) {
HANDLE_FAILURE("LLVMBuildSelect");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_abs(comp_ctx, func_ctx, V128_i8x16_TYPE);
}
bool
aot_compile_simd_i16x8_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_abs(comp_ctx, func_ctx, V128_i16x8_TYPE);
}
bool
aot_compile_simd_i32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
return simd_v128_abs(comp_ctx, func_ctx, V128_i32x4_TYPE);
}
/* (v1 + v2 + 1) / 2 */
static bool
simd_v128_avg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
LLVMTypeRef vector_type,
LLVMTypeRef element_type,
unsigned lane_width)
{
LLVMValueRef lhs, rhs, undef, zeros, ones, result;
LLVMTypeRef ext_type;
if (!(rhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"rhs"))) {
goto fail;
}
if (!(lhs = simd_pop_v128_and_bitcast(comp_ctx, func_ctx, vector_type,
"lhs"))) {
goto fail;
}
if (!(ext_type = LLVMVectorType(I32_TYPE, lane_width))) {
HANDLE_FAILURE("LLVMVectorType");
goto fail;
}
if (!(lhs = LLVMBuildZExt(comp_ctx->builder, lhs, ext_type, "left_ext"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
if (!(rhs =
LLVMBuildZExt(comp_ctx->builder, rhs, ext_type, "right_ext"))) {
HANDLE_FAILURE("LLVMBuildZExt");
goto fail;
}
if (!(undef = LLVMGetUndef(ext_type))) {
HANDLE_FAILURE("LLVMGetUndef");
goto fail;
}
if (!(zeros = LLVMConstNull(ext_type))) {
HANDLE_FAILURE("LLVMConstNull");
goto fail;
}
if (!(ones = LLVMConstInt(I32_TYPE, 1, true))) {
HANDLE_FAILURE("LLVMConstInt");
goto fail;
}
if (!(ones = LLVMBuildInsertElement(comp_ctx->builder, undef, ones,
I32_ZERO, "base_ones"))) {
HANDLE_FAILURE("LLVMBuildInsertElement");
goto fail;
}
if (!(ones = LLVMBuildShuffleVector(comp_ctx->builder, ones, undef, zeros,
"ones"))) {
HANDLE_FAILURE("LLVMBuildShuffleVector");
goto fail;
}
if (!(result = LLVMBuildAdd(comp_ctx->builder, lhs, rhs, "a_add_b"))) {
HANDLE_FAILURE("LLVMBuildAdd");
goto fail;
}
if (!(result = LLVMBuildAdd(comp_ctx->builder, result, ones, "plus_1"))) {
HANDLE_FAILURE("LLVMBuildAdd");
goto fail;
}
if (!(result = LLVMBuildLShr(comp_ctx->builder, result, ones, "avg"))) {
HANDLE_FAILURE("LLVMBuildLShr");
goto fail;
}
if (!(result = LLVMBuildTrunc(comp_ctx->builder, result, vector_type,
"avg_trunc"))) {
HANDLE_FAILURE("LLVMBuildTrunc");
goto fail;
}
if (!(result = LLVMBuildBitCast(comp_ctx->builder, result, V128_i64x2_TYPE,
"ret"))) {
HANDLE_FAILURE("LLVMBuildBitCast");
goto fail;
}
/* push result into the stack */
PUSH_V128(result);
return true;
fail:
return false;
}
bool
aot_compile_simd_i8x16_avgr_u(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_v128_avg(comp_ctx, func_ctx, V128_i8x16_TYPE, INT8_TYPE, 16);
}
bool
aot_compile_simd_i16x8_avgr_u(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx)
{
return simd_v128_avg(comp_ctx, func_ctx, V128_i16x8_TYPE, INT16_TYPE, 8);
}

View File

@ -0,0 +1,66 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _SIMD_SAT_INT_ARITH_H_
#define _SIMD_SAT_INT_ARITH_H_
#include "../aot_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
aot_compile_simd_i8x16_saturate(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed);
bool
aot_compile_simd_i16x8_saturate(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed);
bool
aot_compile_simd_i8x16_cmp(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed);
bool
aot_compile_simd_i16x8_cmp(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed);
bool
aot_compile_simd_i32x4_cmp(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
V128Arithmetic arith_op,
bool is_signed);
bool
aot_compile_simd_i8x16_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_i16x8_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_i32x4_abs(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool
aot_compile_simd_i8x16_avgr_u(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
bool
aot_compile_simd_i16x8_avgr_u(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _SIMD_SAT_INT_ARITH_H_ */