Implement XIP feature and enable ARC target support (#694)

Implement XIP (Execution In Place) feature for AOT mode to enable running the AOT code inside AOT file directly, without memory mapping the executable memory for AOT code and applying relocations for text section. Developer can use wamrc with "--enable-indirect-mode --disable-llvm-intrinsics" flags to generate the AOT file and run iwasm with "--xip" flag. Known issues: there might still be some relocations in the text section which access the ".rodata" like sections.

And also enable ARC target support for both interpreter mode and AOT mode.

Signed-off-by: Wenyong Huang <wenyong.huang@intel.com>
This commit is contained in:
Wenyong Huang
2021-08-12 17:44:39 +08:00
committed by GitHub
parent 8fd89bd415
commit db695fada4
44 changed files with 2613 additions and 263 deletions

View File

@ -0,0 +1,407 @@
/*
* Copyright (C) 2021 XiaoMi Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "aot_intrinsic.h"
typedef struct {
const char *llvm_intrinsic;
const char *native_intrinsic;
uint64 flag;
} aot_intrinsic;
static const aot_intrinsic g_intrinsic_mapping[] = {
{ "llvm.experimental.constrained.fadd.f32", "aot_intrinsic_fadd_f32",
AOT_INTRINSIC_FLAG_F32_FADD },
{ "llvm.experimental.constrained.fadd.f64", "aot_intrinsic_fadd_f64",
AOT_INTRINSIC_FLAG_F64_FADD },
{ "llvm.experimental.constrained.fsub.f32", "aot_intrinsic_fsub_f32",
AOT_INTRINSIC_FLAG_F32_FSUB },
{ "llvm.experimental.constrained.fsub.f64", "aot_intrinsic_fsub_f64",
AOT_INTRINSIC_FLAG_F64_FSUB },
{ "llvm.experimental.constrained.fmul.f32", "aot_intrinsic_fmul_f32",
AOT_INTRINSIC_FLAG_F32_FMUL },
{ "llvm.experimental.constrained.fmul.f64", "aot_intrinsic_fmul_f64",
AOT_INTRINSIC_FLAG_F64_FMUL },
{ "llvm.experimental.constrained.fdiv.f32", "aot_intrinsic_fdiv_f32",
AOT_INTRINSIC_FLAG_F32_FDIV },
{ "llvm.experimental.constrained.fdiv.f64", "aot_intrinsic_fdiv_f64",
AOT_INTRINSIC_FLAG_F64_FDIV },
{ "llvm.fabs.f32", "aot_intrinsic_fabs_f32", AOT_INTRINSIC_FLAG_F32_FABS },
{ "llvm.fabs.f64", "aot_intrinsic_fabs_f64", AOT_INTRINSIC_FLAG_F64_FABS },
{ "llvm.ceil.f32", "aot_intrinsic_ceil_f32", AOT_INTRINSIC_FLAG_F32_CEIL },
{ "llvm.ceil.f64", "aot_intrinsic_ceil_f64", AOT_INTRINSIC_FLAG_F64_CEIL },
{ "llvm.floor.f32", "aot_intrinsic_floor_f32",
AOT_INTRINSIC_FLAG_F32_FLOOR },
{ "llvm.floor.f64", "aot_intrinsic_floor_f64",
AOT_INTRINSIC_FLAG_F64_FLOOR },
{ "llvm.trunc.f32", "aot_intrinsic_trunc_f32",
AOT_INTRINSIC_FLAG_F32_TRUNC },
{ "llvm.trunc.f64", "aot_intrinsic_trunc_f64",
AOT_INTRINSIC_FLAG_F64_TRUNC },
{ "llvm.rint.f32", "aot_intrinsic_rint_f32", AOT_INTRINSIC_FLAG_F32_RINT },
{ "llvm.rint.f64", "aot_intrinsic_rint_f64", AOT_INTRINSIC_FLAG_F64_RINT },
{ "llvm.sqrt.f32", "aot_intrinsic_sqrt_f32", AOT_INTRINSIC_FLAG_F32_SQRT },
{ "llvm.sqrt.f64", "aot_intrinsic_sqrt_f64", AOT_INTRINSIC_FLAG_F64_SQRT },
{ "llvm.copysign.f32", "aot_intrinsic_copysign_f32",
AOT_INTRINSIC_FLAG_F32_COPYSIGN },
{ "llvm.copysign.f64", "aot_intrinsic_copysign_f64",
AOT_INTRINSIC_FLAG_F64_COPYSIGN },
{ "llvm.minnum.f32", "aot_intrinsic_fmin_f32", AOT_INTRINSIC_FLAG_F32_MIN },
{ "llvm.minnum.f64", "aot_intrinsic_fmin_f64", AOT_INTRINSIC_FLAG_F64_MIN },
{ "llvm.maxnum.f32", "aot_intrinsic_fmax_f32", AOT_INTRINSIC_FLAG_F32_MAX },
{ "llvm.maxnum.f64", "aot_intrinsic_fmax_f64", AOT_INTRINSIC_FLAG_F64_MAX },
{ "llvm.ctlz.i32", "aot_intrinsic_clz_i32", AOT_INTRINSIC_FLAG_I32_CLZ },
{ "llvm.ctlz.i64", "aot_intrinsic_clz_i64", AOT_INTRINSIC_FLAG_I64_CLZ },
{ "llvm.cttz.i32", "aot_intrinsic_ctz_i32", AOT_INTRINSIC_FLAG_I32_CTZ },
{ "llvm.cttz.i64", "aot_intrinsic_ctz_i64", AOT_INTRINSIC_FLAG_I64_CTZ },
{ "llvm.ctpop.i32", "aot_intrinsic_popcnt_i32", AOT_INTRINSIC_FLAG_I32_POPCNT },
{ "llvm.ctpop.i64", "aot_intrinsic_popcnt_i64", AOT_INTRINSIC_FLAG_I64_POPCNT },
};
static const uint32 g_intrinsic_count =
sizeof(g_intrinsic_mapping) / sizeof(aot_intrinsic);
float32
aot_intrinsic_fadd_f32(float32 a, float32 b)
{
return a + b;
}
float64
aot_intrinsic_fadd_f64(float64 a, float64 b)
{
return a + b;
}
float32
aot_intrinsic_fsub_f32(float32 a, float32 b)
{
return a - b;
}
float64
aot_intrinsic_fsub_f64(float64 a, float64 b)
{
return a - b;
}
float32
aot_intrinsic_fmul_f32(float32 a, float32 b)
{
return a * b;
}
float64
aot_intrinsic_fmul_f64(float64 a, float64 b)
{
return a * b;
}
float32
aot_intrinsic_fdiv_f32(float32 a, float32 b)
{
return a / b;
}
float64
aot_intrinsic_fdiv_f64(float64 a, float64 b)
{
return a / b;
}
float32
aot_intrinsic_fabs_f32(float32 a)
{
return (float32)fabs(a);
}
float64
aot_intrinsic_fabs_f64(float64 a)
{
return fabs(a);
}
float32
aot_intrinsic_ceil_f32(float32 a)
{
return (float32)ceilf(a);
}
float64
aot_intrinsic_ceil_f64(float64 a)
{
return ceil(a);
}
float32
aot_intrinsic_floor_f32(float32 a)
{
return (float32)floorf(a);
}
float64
aot_intrinsic_floor_f64(float64 a)
{
return floor(a);
}
float32
aot_intrinsic_trunc_f32(float32 a)
{
return (float32)trunc(a);
}
float64
aot_intrinsic_trunc_f64(float64 a)
{
return trunc(a);
}
float32
aot_intrinsic_rint_f32(float32 a)
{
return (float32)rint(a);
}
float64
aot_intrinsic_rint_f64(float64 a)
{
return rint(a);
}
float32
aot_intrinsic_sqrt_f32(float32 a)
{
return (float32)sqrt(a);
}
float64
aot_intrinsic_sqrt_f64(float64 a)
{
return sqrt(a);
}
float32
aot_intrinsic_copysign_f32(float32 a, float32 b)
{
return signbit(b) ? (float32)-fabs(a) : (float32)fabs(a);
}
float64
aot_intrinsic_copysign_f64(float64 a, float64 b)
{
return signbit(b) ? -fabs(a) : fabs(a);
}
float32
aot_intrinsic_fmin_f32(float32 a, float32 b)
{
if (isnan(a))
return a;
else if (isnan(b))
return b;
else
return (float32)fmin(a, b);
}
float64
aot_intrinsic_fmin_f64(float64 a, float64 b)
{
float64 c = fmin(a, b);
if (c==0 && a==b)
return signbit(a) ? a : b;
return c;
}
float32
aot_intrinsic_fmax_f32(float32 a, float32 b)
{
if (isnan(a))
return a;
else if (isnan(b))
return b;
else
return (float32)fmax(a, b);
}
float64
aot_intrinsic_fmax_f64(float64 a, float64 b)
{
float64 c = fmax(a, b);
if (c==0 && a==b)
return signbit(a) ? b : a;
return c;
}
uint32
aot_intrinsic_clz_i32(uint32 type)
{
uint32 num = 0;
if (type == 0)
return 32;
while (!(type & 0x80000000)) {
num++;
type <<= 1;
}
return num;
}
uint32
aot_intrinsic_clz_i64(uint64 type)
{
uint32 num = 0;
if (type == 0)
return 64;
while (!(type & 0x8000000000000000LL)) {
num++;
type <<= 1;
}
return num;
}
uint32
aot_intrinsic_ctz_i32(uint32 type)
{
uint32 num = 0;
if (type == 0)
return 32;
while (!(type & 1)) {
num++;
type >>= 1;
}
return num;
}
uint32
aot_intrinsic_ctz_i64(uint64 type)
{
uint32 num = 0;
if (type == 0)
return 64;
while (!(type & 1)) {
num++;
type >>= 1;
}
return num;
}
uint32
aot_intrinsic_popcnt_i32(uint32 u)
{
uint32 ret = 0;
while (u) {
u = (u & (u - 1));
ret++;
}
return ret;
}
uint32
aot_intrinsic_popcnt_i64(uint64 u)
{
uint32 ret = 0;
while (u) {
u = (u & (u - 1));
ret++;
}
return ret;
}
const char *
aot_intrinsic_get_symbol(const char *llvm_intrinsic)
{
uint32 cnt;
for (cnt = 0; cnt < g_intrinsic_count; cnt++) {
if (!strcmp(llvm_intrinsic, g_intrinsic_mapping[cnt].llvm_intrinsic)) {
return g_intrinsic_mapping[cnt].native_intrinsic;
}
}
return NULL;
}
#if WASM_ENABLE_WAMR_COMPILER != 0 || WASM_ENABLE_JIT != 0
static void
add_intrinsic_capability(AOTCompContext *comp_ctx, uint64 flag)
{
uint64 group = AOT_INTRINSIC_GET_GROUP_FROM_FLAG(flag);
if (group < sizeof(comp_ctx->flags) / sizeof(uint64)) {
comp_ctx->flags[group] |= flag;
}
else {
bh_log(BH_LOG_LEVEL_WARNING, __FILE__, __LINE__,
"intrinsic exceeds max limit.");
}
}
static void
add_f32_common_intrinsics_for_thumb2_fpu(AOTCompContext *comp_ctx)
{
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F32_FABS);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F32_FADD);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F32_FSUB);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F32_FMUL);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F32_FDIV);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F32_SQRT);
}
static void
add_f64_common_intrinsics_for_thumb2_fpu(AOTCompContext *comp_ctx)
{
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F64_FABS);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F64_FADD);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F64_FSUB);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F64_FMUL);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F64_FDIV);
add_intrinsic_capability(comp_ctx, AOT_INTRINSIC_FLAG_F64_SQRT);
}
bool
aot_intrinsic_check_capability(const AOTCompContext *comp_ctx,
const char *llvm_intrinsic)
{
uint32 cnt;
uint64 flag;
uint64 group;
for (cnt = 0; cnt < g_intrinsic_count; cnt++) {
if (!strcmp(llvm_intrinsic, g_intrinsic_mapping[cnt].llvm_intrinsic)) {
flag = g_intrinsic_mapping[cnt].flag;
group = AOT_INTRINSIC_GET_GROUP_FROM_FLAG(flag);
flag &= AOT_INTRINSIC_FLAG_MASK;
if (group < sizeof(comp_ctx->flags) / sizeof(uint64)) {
if (comp_ctx->flags[group] & flag) {
return true;
}
}
else {
bh_log(BH_LOG_LEVEL_WARNING, __FILE__, __LINE__,
"intrinsic exceeds max limit.");
}
}
}
return false;
}
void
aot_intrinsic_fill_capability_flags(AOTCompContext *comp_ctx)
{
memset(comp_ctx->flags, 0, sizeof(comp_ctx->flags));
if (!comp_ctx->target_cpu)
return;
if (!strncmp(comp_ctx->target_arch, "thumb", 5)) {
if (!strcmp(comp_ctx->target_cpu, "cortex-m4")) {
add_f32_common_intrinsics_for_thumb2_fpu(comp_ctx);
}
else if (!strcmp(comp_ctx->target_cpu, "cortex-m7")) {
add_f32_common_intrinsics_for_thumb2_fpu(comp_ctx);
add_f64_common_intrinsics_for_thumb2_fpu(comp_ctx);
}
}
}
#endif /* WASM_ENABLE_WAMR_COMPILER != 0 || WASM_ENABLE_JIT != 0 */

View File

@ -0,0 +1,180 @@
/*
* Copyright (C) 2021 XiaoMi Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _AOT_INTRINSIC_H
#define _AOT_INTRINSIC_H
#if WASM_ENABLE_WAMR_COMPILER != 0
#include "aot_llvm.h"
#endif
#include "aot_runtime.h"
#ifdef __cplusplus
extern "C" {
#endif
#define AOT_INTRINSIC_GROUPS 2
/* Use uint64 as flag container:
* - The upper 16 bits are the intrinsic group number
* - The lower 48 bits are the intrinsic capability mask
*/
#define AOT_INTRINSIC_FLAG(group, value) \
((((uint64)(group & 0xffffLL)) << 48) | (uint64)value)
#define AOT_INTRINSIC_FLAG_MASK (0x0000ffffffffffffLL)
#define AOT_INTRINSIC_GET_GROUP_FROM_FLAG(flag) \
((((uint64)flag) >> 48) & 0xffffLL)
#define AOT_INTRINSIC_FLAG_F32_FADD AOT_INTRINSIC_FLAG(0, 0x000000000001)
#define AOT_INTRINSIC_FLAG_F32_FSUB AOT_INTRINSIC_FLAG(0, 0x000000000002)
#define AOT_INTRINSIC_FLAG_F32_FMUL AOT_INTRINSIC_FLAG(0, 0x000000000004)
#define AOT_INTRINSIC_FLAG_F32_FDIV AOT_INTRINSIC_FLAG(0, 0x000000000008)
#define AOT_INTRINSIC_FLAG_F32_FABS AOT_INTRINSIC_FLAG(0, 0x000000000010)
#define AOT_INTRINSIC_FLAG_F32_CEIL AOT_INTRINSIC_FLAG(0, 0x000000000020)
#define AOT_INTRINSIC_FLAG_F32_FLOOR AOT_INTRINSIC_FLAG(0, 0x000000000040)
#define AOT_INTRINSIC_FLAG_F32_TRUNC AOT_INTRINSIC_FLAG(0, 0x000000000080)
#define AOT_INTRINSIC_FLAG_F32_RINT AOT_INTRINSIC_FLAG(0, 0x000000000100)
#define AOT_INTRINSIC_FLAG_F32_SQRT AOT_INTRINSIC_FLAG(0, 0x000000000200)
#define AOT_INTRINSIC_FLAG_F32_COPYSIGN AOT_INTRINSIC_FLAG(0, 0x000000000400)
#define AOT_INTRINSIC_FLAG_F32_MIN AOT_INTRINSIC_FLAG(0, 0x000000000800)
#define AOT_INTRINSIC_FLAG_F32_MAX AOT_INTRINSIC_FLAG(0, 0x000000001000)
#define AOT_INTRINSIC_FLAG_I32_CLZ AOT_INTRINSIC_FLAG(0, 0x000000002000)
#define AOT_INTRINSIC_FLAG_I32_CTZ AOT_INTRINSIC_FLAG(0, 0x000000004000)
#define AOT_INTRINSIC_FLAG_I32_POPCNT AOT_INTRINSIC_FLAG(0, 0x000000008000)
#define AOT_INTRINSIC_FLAG_F64_FADD AOT_INTRINSIC_FLAG(1, 0x000000000001)
#define AOT_INTRINSIC_FLAG_F64_FSUB AOT_INTRINSIC_FLAG(1, 0x000000000002)
#define AOT_INTRINSIC_FLAG_F64_FMUL AOT_INTRINSIC_FLAG(1, 0x000000000004)
#define AOT_INTRINSIC_FLAG_F64_FDIV AOT_INTRINSIC_FLAG(1, 0x000000000008)
#define AOT_INTRINSIC_FLAG_F64_FABS AOT_INTRINSIC_FLAG(1, 0x000000000010)
#define AOT_INTRINSIC_FLAG_F64_CEIL AOT_INTRINSIC_FLAG(1, 0x000000000020)
#define AOT_INTRINSIC_FLAG_F64_FLOOR AOT_INTRINSIC_FLAG(1, 0x000000000040)
#define AOT_INTRINSIC_FLAG_F64_TRUNC AOT_INTRINSIC_FLAG(1, 0x000000000080)
#define AOT_INTRINSIC_FLAG_F64_RINT AOT_INTRINSIC_FLAG(1, 0x000000000100)
#define AOT_INTRINSIC_FLAG_F64_SQRT AOT_INTRINSIC_FLAG(1, 0x000000000200)
#define AOT_INTRINSIC_FLAG_F64_COPYSIGN AOT_INTRINSIC_FLAG(1, 0x000000000400)
#define AOT_INTRINSIC_FLAG_F64_MIN AOT_INTRINSIC_FLAG(1, 0x000000000800)
#define AOT_INTRINSIC_FLAG_F64_MAX AOT_INTRINSIC_FLAG(1, 0x000000001000)
#define AOT_INTRINSIC_FLAG_I64_CLZ AOT_INTRINSIC_FLAG(1, 0x000000002000)
#define AOT_INTRINSIC_FLAG_I64_CTZ AOT_INTRINSIC_FLAG(1, 0x000000004000)
#define AOT_INTRINSIC_FLAG_I64_POPCNT AOT_INTRINSIC_FLAG(1, 0x000000008000)
float32
aot_intrinsic_fadd_f32(float32 a, float32 b);
float64
aot_intrinsic_fadd_f64(float64 a, float64 b);
float32
aot_intrinsic_fsub_f32(float32 a, float32 b);
float64
aot_intrinsic_fsub_f64(float64 a, float64 b);
float32
aot_intrinsic_fmul_f32(float32 a, float32 b);
float64
aot_intrinsic_fmul_f64(float64 a, float64 b);
float32
aot_intrinsic_fdiv_f32(float32 a, float32 b);
float64
aot_intrinsic_fdiv_f64(float64 a, float64 b);
float32
aot_intrinsic_fabs_f32(float32 a);
float64
aot_intrinsic_fabs_f64(float64 a);
float32
aot_intrinsic_ceil_f32(float32 a);
float64
aot_intrinsic_ceil_f64(float64 a);
float32
aot_intrinsic_floor_f32(float32 a);
float64
aot_intrinsic_floor_f64(float64 a);
float32
aot_intrinsic_trunc_f32(float32 a);
float64
aot_intrinsic_trunc_f64(float64 a);
float32
aot_intrinsic_rint_f32(float32 a);
float64
aot_intrinsic_rint_f64(float64 a);
float32
aot_intrinsic_sqrt_f32(float32 a);
float64
aot_intrinsic_sqrt_f64(float64 a);
float32
aot_intrinsic_copysign_f32(float32 a, float32 b);
float64
aot_intrinsic_copysign_f64(float64 a, float64 b);
float32
aot_intrinsic_fmin_f32(float32 a, float32 b);
float64
aot_intrinsic_fmin_f64(float64 a, float64 b);
float32
aot_intrinsic_fmax_f32(float32 a, float32 b);
float64
aot_intrinsic_fmax_f64(float64 a, float64 b);
uint32
aot_intrinsic_clz_i32(uint32 type);
uint32
aot_intrinsic_clz_i64(uint64 type);
uint32
aot_intrinsic_ctz_i32(uint32 type);
uint32
aot_intrinsic_ctz_i64(uint64 type);
uint32
aot_intrinsic_popcnt_i32(uint32 u);
uint32
aot_intrinsic_popcnt_i64(uint64 u);
const char *
aot_intrinsic_get_symbol(const char *llvm_intrinsic);
#if WASM_ENABLE_WAMR_COMPILER != 0 || WASM_ENABLE_JIT != 0
bool
aot_intrinsic_check_capability(const AOTCompContext *comp_ctx,
const char *llvm_intrinsic);
void
aot_intrinsic_fill_capability_flags(AOTCompContext *comp_ctx);
#endif
#ifdef __cplusplus
}
#endif
#endif /* end of _AOT_INTRINSIC_H */

View File

@ -181,6 +181,8 @@ GET_U64_FROM_ADDR(uint32 *addr)
#define E_MACHINE_IA_64 50 /* Intel Merced */
#define E_MACHINE_MIPS_X 51 /* Stanford MIPS-X */
#define E_MACHINE_X86_64 62 /* AMD x86-64 architecture */
#define E_MACHINE_ARC_COMPACT 93 /* ARC International ARCompact */
#define E_MACHINE_ARC_COMPACT2 195 /* Synopsys ARCompact V2 */
#define E_MACHINE_XTENSA 94 /* Tensilica Xtensa Architecture */
#define E_MACHINE_RISCV 243 /* RISC-V 32/64 */
#define E_MACHINE_WIN_X86_64 0x8664 /* Windowx x86-64 architecture */
@ -261,6 +263,10 @@ get_aot_file_target(AOTTargetInfo *target_info,
case E_MACHINE_RISCV:
machine_type = "riscv";
break;
case E_MACHINE_ARC_COMPACT:
case E_MACHINE_ARC_COMPACT2:
machine_type = "arc";
break;
default:
set_error_buf_v(error_buf, error_buf_size,
"unknown machine type %d",
@ -368,6 +374,90 @@ fail:
return false;
}
static void *
get_native_symbol_by_name(const char *name)
{
void *func = NULL;
uint32 symnum = 0;
SymbolMap *sym = NULL;
sym = get_target_symbol_map(&symnum);
while (symnum--) {
if (strcmp(sym->symbol_name, name) == 0) {
func = sym->symbol_addr;
break;
}
sym++;
}
return func;
}
static bool
load_native_symbol_section(const uint8 *buf, const uint8 *buf_end,
AOTModule *module,
char *error_buf, uint32 error_buf_size)
{
const uint8 *p = buf, *p_end = buf_end;
uint32 cnt;
int32 i;
const char *symbol;
read_uint32(p, p_end, cnt);
module->native_symbol_count = cnt;
if (cnt > 0) {
module->native_symbol_list = wasm_runtime_malloc(cnt * sizeof(void *));
if (module->native_symbol_list == NULL) {
set_error_buf(error_buf, error_buf_size,
"malloc native symbol list failed");
goto fail;
}
for (i = cnt - 1; i >= 0; i--) {
read_string(p, p_end, symbol);
module->native_symbol_list[i] = get_native_symbol_by_name(symbol);
if (module->native_symbol_list[i] == NULL) {
set_error_buf_v(error_buf, error_buf_size,
"missing native symbol: %s", symbol);
goto fail;
}
}
}
return true;
fail:
return false;
}
static bool
load_custom_section(const uint8 *buf, const uint8 *buf_end,
AOTModule *module,
char *error_buf, uint32 error_buf_size)
{
const uint8 *p = buf, *p_end = buf_end;
uint32 sub_section_type;
read_uint32(p, p_end, sub_section_type);
buf = p;
switch (sub_section_type) {
case AOT_CUSTOM_SECTION_NATIVE_SYMBOL:
if (!load_native_symbol_section(buf, buf_end, module,
error_buf, error_buf_size))
goto fail;
break;
default:
break;
}
return true;
fail:
return false;
}
static void
destroy_import_memories(AOTImportMemory *import_memories,
bool is_jit_mode)
@ -1167,8 +1257,8 @@ load_text_section(const uint8 *buf, const uint8 *buf_end,
module->code = (void*)(buf + module->literal_size);
module->code_size = (uint32)(buf_end - (uint8*)module->code);
if (module->code_size > 0) {
plt_base = (uint8*)buf_end - get_plt_table_size();
if ((module->code_size > 0) && (module->native_symbol_count == 0)) {
plt_base = (uint8 *)buf_end - get_plt_table_size();
init_plt_table(plt_base);
}
return true;
@ -1920,6 +2010,13 @@ load_relocation_section(const uint8 *buf, const uint8 *buf_end,
|| !strcmp(group->section_name, ".text")
#endif
) {
if (module->native_symbol_count > 0) {
set_error_buf(error_buf, error_buf_size,
"cannot apply relocation to text section "
"for aot file generated with "
"\"--enable-indirect-mode\" flag");
goto fail;
}
if (!do_text_relocation(module, group, error_buf, error_buf_size))
goto fail;
}
@ -1993,7 +2090,8 @@ load_from_sections(AOTModule *module, AOTSection *sections,
if ((last_section_type == (uint32)-1
&& section_type != AOT_SECTION_TYPE_TARGET_INFO)
|| (last_section_type != (uint32)-1
&& section_type != last_section_type + 1)) {
&& (section_type != last_section_type + 1
&& section_type != AOT_SECTION_TYPE_CUSTOM))) {
set_error_buf(error_buf, error_buf_size,
"invalid section order");
return false;
@ -2030,6 +2128,11 @@ load_from_sections(AOTModule *module, AOTSection *sections,
error_buf, error_buf_size))
return false;
break;
case AOT_SECTION_TYPE_CUSTOM:
if (!load_custom_section(buf, buf_end, module,
error_buf, error_buf_size))
return false;
break;
default:
set_error_buf(error_buf, error_buf_size,
"invalid aot section type");
@ -2039,7 +2142,8 @@ load_from_sections(AOTModule *module, AOTSection *sections,
section = section->next;
}
if (last_section_type != AOT_SECTION_TYPE_RELOCATION) {
if (last_section_type != AOT_SECTION_TYPE_RELOCATION
&& last_section_type != AOT_SECTION_TYPE_CUSTOM) {
set_error_buf(error_buf, error_buf_size,
"section missing");
return false;
@ -2208,21 +2312,69 @@ destroy_sections(AOTSection *section_list, bool destroy_aot_text)
}
static bool
create_sections(const uint8 *buf, uint32 size,
resolve_native_symbols(const uint8 *buf, uint32 size, uint32 *p_count,
char *error_buf, uint32 error_buf_size)
{
const uint8 *p = buf, *p_end = buf + size;
uint32 section_type;
uint32 section_size = 0;
p += 8;
while (p < p_end) {
read_uint32(p, p_end, section_type);
if (section_type <= AOT_SECTION_TYPE_SIGANATURE
|| section_type == AOT_SECTION_TYPE_CUSTOM) {
read_uint32(p, p_end, section_size);
CHECK_BUF(p, p_end, section_size);
if (section_type == AOT_SECTION_TYPE_CUSTOM) {
read_uint32(p, p_end, section_type);
if (section_type == AOT_CUSTOM_SECTION_NATIVE_SYMBOL) {
/* Read the count of native symbol */
read_uint32(p, p_end, *p_count);
return true;
}
p -= sizeof(uint32);
}
}
else if (section_type > AOT_SECTION_TYPE_SIGANATURE) {
set_error_buf(error_buf, error_buf_size,
"resolve native symbol failed");
break;
}
p += section_size;
}
return true;
fail:
return false;
}
static bool
create_sections(AOTModule *module,
const uint8 *buf, uint32 size,
AOTSection **p_section_list,
char *error_buf, uint32 error_buf_size)
{
AOTSection *section_list = NULL, *section_list_end = NULL, *section;
const uint8 *p = buf, *p_end = buf + size;
bool destory_aot_text = false;
uint32 native_symbol_count = 0;
uint32 section_type;
uint32 section_size;
uint64 total_size;
uint8 *aot_text;
if (!resolve_native_symbols(buf, size, &native_symbol_count,
error_buf, error_buf_size)) {
goto fail;
}
module->native_symbol_count = native_symbol_count;
p += 8;
while (p < p_end) {
read_uint32(p, p_end, section_type);
if (section_type < AOT_SECTION_TYPE_SIGANATURE) {
if (section_type < AOT_SECTION_TYPE_SIGANATURE
|| section_type == AOT_SECTION_TYPE_CUSTOM) {
read_uint32(p, p_end, section_size);
CHECK_BUF(p, p_end, section_size);
@ -2238,7 +2390,7 @@ create_sections(const uint8 *buf, uint32 size,
section->section_body_size = section_size;
if (section_type == AOT_SECTION_TYPE_TEXT) {
if (section_size > 0) {
if ((section_size > 0) && (native_symbol_count == 0)) {
int map_prot = MMAP_PROT_READ | MMAP_PROT_WRITE
| MMAP_PROT_EXEC;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) \
@ -2270,6 +2422,7 @@ create_sections(const uint8 *buf, uint32 size,
bh_memcpy_s(aot_text, (uint32)total_size,
section->section_body, (uint32)section_size);
section->section_body = aot_text;
destory_aot_text = true;
if ((uint32)total_size > section->section_body_size) {
memset(aot_text + (uint32)section_size,
@ -2277,8 +2430,6 @@ create_sections(const uint8 *buf, uint32 size,
section->section_body_size = (uint32)total_size;
}
}
else
section->section_body = NULL;
}
if (!section_list)
@ -2307,7 +2458,7 @@ create_sections(const uint8 *buf, uint32 size,
return true;
fail:
if (section_list)
destroy_sections(section_list, true);
destroy_sections(section_list, destory_aot_text);
return false;
}
@ -2333,14 +2484,16 @@ load(const uint8 *buf, uint32 size, AOTModule *module,
return false;
}
if (!create_sections(buf, size, &section_list, error_buf, error_buf_size))
if (!create_sections(module, buf, size, &section_list,
error_buf, error_buf_size))
return false;
ret = load_from_sections(module, section_list, error_buf, error_buf_size);
if (!ret) {
/* If load_from_sections() fails, then aot text is destroyed
in destroy_sections() */
destroy_sections(section_list, true);
destroy_sections(section_list,
module->native_symbol_count == 0 ? true : false);
/* aot_unload() won't destroy aot text again */
module->code = NULL;
}
@ -2616,6 +2769,9 @@ aot_unload(AOTModule *module)
module->mem_init_data_count,
module->is_jit_mode);
if (module->native_symbol_list)
wasm_runtime_free(module->native_symbol_list);
if (module->import_tables)
destroy_import_tables(module->import_tables,
module->is_jit_mode);
@ -2658,7 +2814,7 @@ aot_unload(AOTModule *module)
if (module->const_str_set)
bh_hash_map_destroy(module->const_str_set);
if (module->code) {
if (module->code && (module->native_symbol_count == 0)) {
/* The layout is: literal size + literal + code (with plt table) */
uint8 *mmap_addr = module->literal - sizeof(uint32);
uint32 total_size = sizeof(uint32)

View File

@ -4,6 +4,7 @@
*/
#include "aot_runtime.h"
#include "aot_intrinsic.h"
typedef struct {
const char *symbol_name;
@ -48,6 +49,40 @@ typedef struct {
#define REG_AOT_TRACE_SYM()
#endif
#define REG_INTRINSIC_SYM() \
REG_SYM(aot_intrinsic_fabs_f32), \
REG_SYM(aot_intrinsic_fabs_f64), \
REG_SYM(aot_intrinsic_floor_f32), \
REG_SYM(aot_intrinsic_floor_f64), \
REG_SYM(aot_intrinsic_ceil_f32), \
REG_SYM(aot_intrinsic_ceil_f64), \
REG_SYM(aot_intrinsic_trunc_f32), \
REG_SYM(aot_intrinsic_trunc_f64), \
REG_SYM(aot_intrinsic_rint_f32), \
REG_SYM(aot_intrinsic_rint_f64), \
REG_SYM(aot_intrinsic_sqrt_f32), \
REG_SYM(aot_intrinsic_sqrt_f64), \
REG_SYM(aot_intrinsic_copysign_f32), \
REG_SYM(aot_intrinsic_copysign_f64), \
REG_SYM(aot_intrinsic_fadd_f32), \
REG_SYM(aot_intrinsic_fadd_f64), \
REG_SYM(aot_intrinsic_fsub_f32), \
REG_SYM(aot_intrinsic_fsub_f64), \
REG_SYM(aot_intrinsic_fmul_f32), \
REG_SYM(aot_intrinsic_fmul_f64), \
REG_SYM(aot_intrinsic_fdiv_f32), \
REG_SYM(aot_intrinsic_fdiv_f64), \
REG_SYM(aot_intrinsic_fmin_f32), \
REG_SYM(aot_intrinsic_fmin_f64), \
REG_SYM(aot_intrinsic_fmax_f32), \
REG_SYM(aot_intrinsic_fmax_f64), \
REG_SYM(aot_intrinsic_clz_i32), \
REG_SYM(aot_intrinsic_clz_i64), \
REG_SYM(aot_intrinsic_ctz_i32), \
REG_SYM(aot_intrinsic_ctz_i64), \
REG_SYM(aot_intrinsic_popcnt_i32), \
REG_SYM(aot_intrinsic_popcnt_i64), \
#define REG_COMMON_SYMBOLS \
REG_SYM(aot_set_exception_with_id), \
REG_SYM(aot_invoke_native), \
@ -71,7 +106,8 @@ typedef struct {
REG_BULK_MEMORY_SYM() \
REG_ATOMIC_WAIT_SYM() \
REG_REF_TYPES_SYM() \
REG_AOT_TRACE_SYM()
REG_AOT_TRACE_SYM() \
REG_INTRINSIC_SYM() \
#define CHECK_RELOC_OFFSET(data_size) do { \
if (!check_reloc_offset(target_section_size, reloc_offset, data_size, \

View File

@ -2233,7 +2233,7 @@ aot_invoke_native(WASMExecEnv *exec_env, uint32 func_idx,
AOTImportFunc *import_func;
const char *signature;
void *attachment;
char buf[128];
char buf[96];
bh_assert(func_idx < aot_module->import_func_count);
@ -2282,7 +2282,7 @@ aot_call_indirect(WASMExecEnv *exec_env,
AOTImportFunc *import_func;
const char *signature = NULL;
void *attachment = NULL;
char buf[128];
char buf[96];
bool ret;
/* this function is called from native code, so exec_env->handle and

View File

@ -40,14 +40,20 @@ typedef enum AOTExceptionID {
typedef enum AOTSectionType {
AOT_SECTION_TYPE_TARGET_INFO = 0,
AOT_SECTION_TYPE_INIT_DATA,
AOT_SECTION_TYPE_TEXT,
AOT_SECTION_TYPE_FUNCTION,
AOT_SECTION_TYPE_EXPORT,
AOT_SECTION_TYPE_RELOCATION,
AOT_SECTION_TYPE_SIGANATURE
AOT_SECTION_TYPE_INIT_DATA = 1,
AOT_SECTION_TYPE_TEXT = 2,
AOT_SECTION_TYPE_FUNCTION = 3,
AOT_SECTION_TYPE_EXPORT = 4,
AOT_SECTION_TYPE_RELOCATION = 5,
AOT_SECTION_TYPE_SIGANATURE = 6,
AOT_SECTION_TYPE_CUSTOM = 100,
} AOTSectionType;
typedef enum AOTCustomSectionType {
AOT_CUSTOM_SECTION_NATIVE_SYMBOL = 1,
AOT_CUSTOM_SECTION_ACCESS_CONTROL = 2,
} AOTCustomSectionType;
typedef struct AOTObjectDataSection {
char *name;
uint8 *data;
@ -125,6 +131,10 @@ typedef struct AOTModule {
uint32 mem_init_data_count;
AOTMemInitData **mem_init_data_list;
/* native symobl */
uint32 native_symbol_count;
void **native_symbol_list;
/* import tables */
uint32 import_table_count;
AOTImportTable *import_tables;

View File

@ -0,0 +1,237 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "aot_reloc.h"
#define R_ARC_S21H_PCREL 14
#define R_ARC_S21W_PCREL 15
#define R_ARC_S25H_PCREL 16
#define R_ARC_S25W_PCREL 17
#define R_ARC_32 4
#define R_ARC_32_ME 27
void __st_r13_to_r15();
void __st_r13_to_r16();
void __st_r13_to_r17();
void __st_r13_to_r18();
void __st_r13_to_r19();
void __st_r13_to_r20();
void __st_r13_to_r21();
void __st_r13_to_r22();
void __st_r13_to_r23();
void __st_r13_to_r24();
void __st_r13_to_r25();
void __ld_r13_to_r15();
void __ld_r13_to_r16();
void __ld_r13_to_r17();
void __ld_r13_to_r18();
void __ld_r13_to_r19();
void __ld_r13_to_r20();
void __ld_r13_to_r21();
void __ld_r13_to_r22();
void __ld_r13_to_r23();
void __ld_r13_to_r24();
void __ld_r13_to_r25();
void __adddf3();
void __addsf3();
void __divdf3();
void __divdi3();
void __divsf3();
void __divsi3();
void __eqsf2();
void __extendsfdf2();
void __fixdfsi();
void __floatsidf();
void __floatsisf();
void __gedf2();
void __gtdf2();
void __ledf2();
void __lesf2();
void __ltdf2();
void __muldf3();
void __mulsf3();
void __subdf3();
void __subsf3();
void __truncdfsf2();
void __unorddf2();
static SymbolMap target_sym_map[] = {
REG_COMMON_SYMBOLS
REG_SYM(__st_r13_to_r15),
REG_SYM(__st_r13_to_r16),
REG_SYM(__st_r13_to_r17),
REG_SYM(__st_r13_to_r18),
REG_SYM(__st_r13_to_r19),
REG_SYM(__st_r13_to_r20),
REG_SYM(__st_r13_to_r21),
REG_SYM(__st_r13_to_r22),
REG_SYM(__st_r13_to_r23),
REG_SYM(__st_r13_to_r24),
REG_SYM(__st_r13_to_r25),
REG_SYM(__ld_r13_to_r15),
REG_SYM(__ld_r13_to_r16),
REG_SYM(__ld_r13_to_r17),
REG_SYM(__ld_r13_to_r18),
REG_SYM(__ld_r13_to_r19),
REG_SYM(__ld_r13_to_r20),
REG_SYM(__ld_r13_to_r21),
REG_SYM(__ld_r13_to_r22),
REG_SYM(__ld_r13_to_r23),
REG_SYM(__ld_r13_to_r24),
REG_SYM(__ld_r13_to_r25),
REG_SYM (__adddf3),
REG_SYM (__addsf3),
REG_SYM (__divdf3),
REG_SYM (__divdi3),
REG_SYM (__divsf3),
REG_SYM (__divsi3),
REG_SYM (__eqsf2),
REG_SYM (__extendsfdf2),
REG_SYM (__fixdfsi),
REG_SYM (__floatsidf),
REG_SYM (__floatsisf),
REG_SYM (__gedf2),
REG_SYM (__gtdf2),
REG_SYM (__ledf2),
REG_SYM (__lesf2),
REG_SYM (__ltdf2),
REG_SYM (__muldf3),
REG_SYM (__mulsf3),
REG_SYM (__subdf3),
REG_SYM (__subsf3),
REG_SYM (__truncdfsf2),
REG_SYM (__unorddf2),
};
static void
set_error_buf(char *error_buf, uint32 error_buf_size, const char *string)
{
if (error_buf != NULL)
snprintf(error_buf, error_buf_size, "%s", string);
}
SymbolMap *
get_target_symbol_map(uint32 *sym_num)
{
*sym_num = sizeof(target_sym_map) / sizeof(SymbolMap);
return target_sym_map;
}
void
get_current_target(char *target_buf, uint32 target_buf_size)
{
snprintf(target_buf, target_buf_size, "arc");
}
uint32
get_plt_table_size()
{
return 0;
}
void
init_plt_table(uint8 *plt)
{
(void)plt;
}
static bool
check_reloc_offset(uint32 target_section_size,
uint64 reloc_offset, uint32 reloc_data_size,
char *error_buf, uint32 error_buf_size)
{
if (!(reloc_offset < (uint64)target_section_size
&& reloc_offset + reloc_data_size <= (uint64)target_section_size)) {
set_error_buf(error_buf, error_buf_size,
"AOT module load failed: invalid relocation offset.");
return false;
}
return true;
}
static uint32
middle_endian_convert(uint32 insn)
{
return ((insn & 0xFFFF0000) >> 16) | ((insn & 0x0000FFFF) << 16);
}
bool
apply_relocation(AOTModule *module,
uint8 *target_section_addr, uint32 target_section_size,
uint64 reloc_offset, uint64 reloc_addend,
uint32 reloc_type, void *symbol_addr, int32 symbol_index,
char *error_buf, uint32 error_buf_size)
{
switch (reloc_type) {
case R_ARC_S25W_PCREL:
{
uint32 insn = LOAD_I32(target_section_addr + reloc_offset);
int32 addend, value;
uintptr_t S, A, P;
CHECK_RELOC_OFFSET(sizeof(void*));
/* Convert from middle endian */
insn = middle_endian_convert(insn);
addend = ((insn << 28) >> 28) << 10;
/* Extract the next 10 bits from Position 6 to 15 in insn */
addend |= ((insn << 16) >> 22);
addend = addend << 9;
/* Extract the remaining 9 bits from Position 18 to 26 in insn */
addend |= ((insn << 5) >> 23);
/* Fill in 2 bits to get the 25 bit Offset Value */
addend = addend << 2;
/* (S + A) - P */
S = (uintptr_t)(uint8*)symbol_addr;
A = (uintptr_t)reloc_addend;
P = (uintptr_t)(target_section_addr + reloc_offset);
P &= (uintptr_t)~3;
value = (int32)(S + A + addend - P);
insn = insn & 0xf8030030;
insn |= ((((value >> 2) & 0x1ff) << 18)
| (((value >> 2) & 0x7fe00) >> 3)
| (((value >> 2) & 0x780000) >> 19));
/* Convert to middle endian */
insn = middle_endian_convert(insn);
STORE_U32(target_section_addr + reloc_offset, insn);
break;
}
case R_ARC_32:
case R_ARC_32_ME:
{
uint32 insn;
CHECK_RELOC_OFFSET(sizeof(void*));
/* (S + A) */
insn = (uint32)(uintptr_t)
((uint8*)symbol_addr + reloc_addend);
if (reloc_type == R_ARC_32_ME)
/* Convert to middle endian */
insn = middle_endian_convert(insn);
STORE_U32(target_section_addr + reloc_offset, insn);
break;
}
default:
{
if (error_buf != NULL)
snprintf(error_buf, error_buf_size,
"Load relocation section failed: "
"invalid relocation type %d.",
reloc_type);
return false;
}
}
return true;
}

View File

@ -25,6 +25,8 @@ elseif (WAMR_BUILD_TARGET STREQUAL "XTENSA")
set (arch_source ${IWASM_AOT_DIR}/arch/aot_reloc_xtensa.c)
elseif (WAMR_BUILD_TARGET MATCHES "RISCV*")
set (arch_source ${IWASM_AOT_DIR}/arch/aot_reloc_riscv.c)
elseif (WAMR_BUILD_TARGET STREQUAL "ARC")
set (arch_source ${IWASM_AOT_DIR}/arch/aot_reloc_arc.c)
else ()
message (FATAL_ERROR "Build target isn't set")
endif ()

View File

@ -0,0 +1,69 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
#ifndef BH_PLATFORM_DARWIN
.globl invokeNative
.type invokeNative, function
invokeNative:
#else
.globl _invokeNative
_invokeNative:
#endif /* end of BH_PLATFORM_DARWIN */
/*
* Arguments passed in:
* r0: function ptr
* r1: argv
* r2: nstacks
* ARC ABI:
* r0-r7: function arguments, caller-saved
* r8-r12: temp registers, caller-saved
*/
push_s blink /* push return addr */
st.aw fp, [sp, -4] /* push fp */
mov fp, sp /* fp = sp */
mov r8, r0 /* r8 = func_ptr */
mov r9, r1 /* r9 = argv */
mov r10, r2 /* r10 = nstacks */
ld r0, [r9, 0] /* r0 = argv[0] */
ld r1, [r9, 4] /* r1 = argv[1] */
ld r2, [r9, 8] /* r2 = argv[2] */
ld r3, [r9, 12] /* r3 = argv[3] */
ld r4, [r9, 16] /* r4 = argv[4] */
ld r5, [r9, 20] /* r5 = argv[5] */
ld r6, [r9, 24] /* r6 = argv[6] */
ld r7, [r9, 28] /* r7 = argv[7] */
add r9, r9, 32 /* r9 = stack_args */
breq r10, 0, call_func /* if (r10 == 0) goto call_func */
asl r11, r10, 2 /* r11 = nstacks * 4 */
sub sp, sp, r11 /* sp = sp - nstacks * 4 */
and sp, sp, ~7 /* make sp 8-byte aligned */
mov r11, sp /* r11 = sp */
loop_stack_args:
breq r10, 0, call_func /* if (r10 == 0) goto call_func */
ld r12, [r9] /* r12 = stack_args[i] */
st r12, [r11] /* stack[i] = r12 */
add r9, r9, 4 /* r9 = r9 + 4 */
add r11, r11, 4 /* r11 = r11 + 4 */
sub r10, r10, 1 /* r10 = r10 + 1 */
j loop_stack_args
call_func:
jl [r8] /* call function */
mov sp, fp /* sp = fp */
ld.ab fp, [sp, 4] /* pop fp */
pop_s blink /* pop return addr */
j_s [blink] /* ret */
nop_s

View File

@ -68,6 +68,8 @@ elseif (WAMR_BUILD_TARGET STREQUAL "XTENSA")
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_xtensa.s)
elseif (WAMR_BUILD_TARGET MATCHES "RISCV*")
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_riscv.S)
elseif (WAMR_BUILD_TARGET STREQUAL "ARC")
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_arc.s)
else ()
message (FATAL_ERROR "Build target isn't set")
endif ()

View File

@ -12,6 +12,10 @@
#include "../aot/aot_runtime.h"
#endif
#if WASM_ENABLE_AOT != 0
#include "aot_runtime.h"
#endif
#if WASM_ENABLE_THREAD_MGR != 0
#include "../libraries/thread-mgr/thread_manager.h"
#endif
@ -50,6 +54,14 @@ wasm_exec_env_create_internal(struct WASMModuleInstanceCommon *module_inst,
exec_env->wasm_stack.s.bottom + stack_size;
exec_env->wasm_stack.s.top = exec_env->wasm_stack.s.bottom;
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT) {
AOTModuleInstance *i = (AOTModuleInstance *)module_inst;
AOTModule *m = (AOTModule *)i->aot_module.ptr;
exec_env->native_symbol = m->native_symbol_list;
}
#endif
#if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_exec_env_mem_consumption(exec_env);
#endif

View File

@ -2480,7 +2480,8 @@ fail:
#if defined(BUILD_TARGET_ARM_VFP) \
|| defined(BUILD_TARGET_THUMB_VFP) \
|| defined(BUILD_TARGET_RISCV32_ILP32D) \
|| defined(BUILD_TARGET_RISCV32_ILP32)
|| defined(BUILD_TARGET_RISCV32_ILP32) \
|| defined(BUILD_TARGET_ARC)
typedef void (*GenericFunctionPointer)();
int64 invokeNative(GenericFunctionPointer f, uint32 *args, uint32 n_stacks);
@ -2496,8 +2497,7 @@ static Int64FuncPtr invokeNative_Int64 = (Int64FuncPtr)(uintptr_t)invokeNative;
static Int32FuncPtr invokeNative_Int32 = (Int32FuncPtr)(uintptr_t)invokeNative;
static VoidFuncPtr invokeNative_Void = (VoidFuncPtr)(uintptr_t)invokeNative;
#if !defined(BUILD_TARGET_RISCV32_ILP32D) \
&& !defined(BUILD_TARGET_RISCV32_ILP32)
#if defined(BUILD_TARGET_ARM_VFP) || defined(BUILD_TARGET_THUMB_VFP)
#define MAX_REG_INTS 4
#define MAX_REG_FLOATS 16
#else
@ -2519,7 +2519,7 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
uint32 result_count = func_type->result_count;
uint32 ext_ret_count = result_count > 1 ? result_count - 1 : 0;
bool ret = false;
#if !defined(BUILD_TARGET_RISCV32_ILP32)
#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_ARC)
uint32 *fps;
int n_fps = 0;
#else
@ -2544,14 +2544,15 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
break;
case VALUE_TYPE_I64:
if (n_ints < MAX_REG_INTS - 1) {
#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_RISCV32_ILP32D)
#if defined(BUILD_TARGET_ARM_VFP) || defined(BUILD_TARGET_THUMB_VFP)
/* 64-bit data must be 8 bytes aligned in arm */
if (n_ints & 1)
n_ints++;
#endif
n_ints += 2;
}
#if defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_RISCV32_ILP32D)
#if defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_RISCV32_ILP32D) \
|| defined(BUILD_TARGET_ARC)
/* part in register, part in stack */
else if (n_ints == MAX_REG_INTS - 1) {
n_ints++;
@ -2561,8 +2562,10 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
else {
/* 64-bit data in stack must be 8 bytes aligned
in arm and riscv32 */
#if !defined(BUILD_TARGET_ARC)
if (n_stacks & 1)
n_stacks++;
#endif
n_stacks += 2;
}
break;
@ -2575,23 +2578,26 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
break;
case VALUE_TYPE_F64:
if (n_fps < MAX_REG_FLOATS - 1) {
#if !defined(BUILD_TARGET_RISCV32_ILP32)
#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_ARC)
/* 64-bit data must be 8 bytes aligned in arm */
if (n_fps & 1)
n_fps++;
#endif
n_fps += 2;
}
#if defined(BUILD_TARGET_RISCV32_ILP32)
#if defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_ARC)
else if (n_fps == MAX_REG_FLOATS - 1) {
n_fps++;
n_stacks++;
}
#endif
else {
/* 64-bit data must be 8 bytes aligned in arm */
/* 64-bit data in stack must be 8 bytes aligned
in arm and riscv32 */
#if !defined(BUILD_TARGET_ARC)
if (n_stacks & 1)
n_stacks++;
#endif
n_stacks += 2;
}
break;
@ -2634,11 +2640,11 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
n_stacks++;
}
#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_RISCV32_ILP32D)
#if defined(BUILD_TARGET_ARM_VFP) || defined(BUILD_TARGET_THUMB_VFP)
argc1 = MAX_REG_INTS + MAX_REG_FLOATS + n_stacks;
#elif defined(BUILD_TARGET_RISCV32_ILP32)
#elif defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_ARC)
argc1 = MAX_REG_INTS + n_stacks;
#else
#else /* for BUILD_TARGET_RISCV32_ILP32D */
argc1 = MAX_REG_INTS + MAX_REG_FLOATS * 2 + n_stacks;
#endif
@ -2651,12 +2657,12 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
}
ints = argv1;
#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_RISCV32_ILP32D)
#if defined(BUILD_TARGET_ARM_VFP) || defined(BUILD_TARGET_THUMB_VFP)
fps = ints + MAX_REG_INTS;
stacks = fps + MAX_REG_FLOATS;
#elif defined(BUILD_TARGET_RISCV32_ILP32)
#elif defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_ARC)
stacks = ints + MAX_REG_INTS;
#else
#else /* for BUILD_TARGET_RISCV32_ILP32D */
fps = ints + MAX_REG_INTS;
stacks = fps + MAX_REG_FLOATS * 2;
#endif
@ -2719,16 +2725,16 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
case VALUE_TYPE_I64:
{
if (n_ints < MAX_REG_INTS - 1) {
#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_RISCV32_ILP32D)
#if defined(BUILD_TARGET_ARM_VFP) || defined(BUILD_TARGET_THUMB_VFP)
/* 64-bit data must be 8 bytes aligned in arm */
if (n_ints & 1)
n_ints++;
#endif
*(uint64*)&ints[n_ints] = *(uint64*)argv_src;
n_ints += 2;
argv_src += 2;
ints[n_ints++] = *argv_src++;
ints[n_ints++] = *argv_src++;
}
#if defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_RISCV32_ILP32D)
#if defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_RISCV32_ILP32D) \
|| defined(BUILD_TARGET_ARC)
else if (n_ints == MAX_REG_INTS - 1) {
ints[n_ints++] = *argv_src++;
stacks[n_stacks++] = *argv_src++;
@ -2737,11 +2743,12 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
else {
/* 64-bit data in stack must be 8 bytes aligned
in arm and riscv32 */
#if !defined(BUILD_TARGET_ARC)
if (n_stacks & 1)
n_stacks++;
*(uint64*)&stacks[n_stacks] = *(uint64*)argv_src;
n_stacks += 2;
argv_src += 2;
#endif
stacks[n_stacks++] = *argv_src++;
stacks[n_stacks++] = *argv_src++;
}
break;
}
@ -2757,28 +2764,29 @@ wasm_runtime_invoke_native(WASMExecEnv *exec_env, void *func_ptr,
case VALUE_TYPE_F64:
{
if (n_fps < MAX_REG_FLOATS - 1) {
#if !defined(BUILD_TARGET_RISCV32_ILP32)
#if !defined(BUILD_TARGET_RISCV32_ILP32) && !defined(BUILD_TARGET_ARC)
/* 64-bit data must be 8 bytes aligned in arm */
if (n_fps & 1)
n_fps++;
#endif
*(float64*)&fps[n_fps] = *(float64*)argv_src;
n_fps += 2;
argv_src += 2;
fps[n_fps++] = *argv_src++;
fps[n_fps++] = *argv_src++;
}
#if defined(BUILD_TARGET_RISCV32_ILP32)
#if defined(BUILD_TARGET_RISCV32_ILP32) || defined(BUILD_TARGET_ARC)
else if (n_fps == MAX_REG_FLOATS - 1) {
fps[n_fps++] = *argv_src++;
stacks[n_stacks++] = *argv_src++;
}
#endif
else {
/* 64-bit data must be 8 bytes aligned in arm */
/* 64-bit data in stack must be 8 bytes aligned
in arm and riscv32 */
#if !defined(BUILD_TARGET_ARC)
if (n_stacks & 1)
n_stacks++;
*(float64*)&stacks[n_stacks] = *(float64*)argv_src;
n_stacks += 2;
argv_src += 2;
#endif
stacks[n_stacks++] = *argv_src++;
stacks[n_stacks++] = *argv_src++;
}
break;
}
@ -2885,7 +2893,8 @@ fail:
#endif /* end of defined(BUILD_TARGET_ARM_VFP)
|| defined(BUILD_TARGET_THUMB_VFP) \
|| defined(BUILD_TARGET_RISCV32_ILP32D)
|| defined(BUILD_TARGET_RISCV32_ILP32) */
|| defined(BUILD_TARGET_RISCV32_ILP32)
|| defined(BUILD_TARGET_ARC) */
#if defined(BUILD_TARGET_X86_32) \
|| defined(BUILD_TARGET_ARM) \

View File

@ -229,6 +229,12 @@ typedef struct AOTCompData {
WASMModule *wasm_module;
} AOTCompData;
typedef struct AOTNativeSymbol {
bh_list_link link;
const char *symbol;
int32 index;
} AOTNativeSymbol;
AOTCompData*
aot_create_comp_data(WASMModule *module);

View File

@ -2111,19 +2111,26 @@ bool
aot_emit_object_file(AOTCompContext *comp_ctx, char *file_name)
{
char *err = NULL;
LLVMCodeGenFileType file_type = LLVMObjectFile;
LLVMTargetRef target =
LLVMGetTargetMachineTarget(comp_ctx->target_machine);
bh_print_time("Begin to emit object file");
if (!strncmp(LLVMGetTargetName(target), "arc", 3))
/* Emit to assmelby file instead for arc target
as it cannot emit to object file */
file_type = LLVMAssemblyFile;
if (LLVMTargetMachineEmitToFile(comp_ctx->target_machine,
comp_ctx->module,
file_name,
LLVMObjectFile,
file_name, file_type,
&err) != 0) {
if (err) {
LLVMDisposeMessage(err);
err = NULL;
}
aot_set_last_error("emit elf to memory buffer failed.");
aot_set_last_error("emit elf to object file failed.");
return false;
}

View File

@ -790,6 +790,23 @@ get_relocation_section_size(AOTObjectData *obj_data)
is_32bit_binary(obj_data->binary));
}
static uint32
get_native_symbol_list_size(AOTCompContext *comp_ctx)
{
uint32 len = 0;
AOTNativeSymbol *sym = NULL;
sym = bh_list_first_elem(&comp_ctx->native_symbols);
while (sym) {
len = align_uint(len, 2);
len += get_string_size(sym->symbol);
sym = bh_list_elem_next(sym);
}
return len;
}
static uint32
get_aot_file_size(AOTCompContext *comp_ctx, AOTCompData *comp_data,
AOTObjectData *obj_data)
@ -835,6 +852,14 @@ get_aot_file_size(AOTCompContext *comp_ctx, AOTCompData *comp_data,
size += (uint32)sizeof(uint32) * 2;
size += get_relocation_section_size(obj_data);
if (get_native_symbol_list_size(comp_ctx) > 0) {
/* emit only when threre are native symbols */
size = align_uint(size, 4);
/* section id + section size + sub section id + symbol count */
size += (uint32)sizeof(uint32) * 4;
size += get_native_symbol_list_size(comp_ctx);
}
return size;
}
@ -1505,6 +1530,38 @@ aot_emit_relocation_section(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
return true;
}
static bool
aot_emit_native_symbol(uint8 *buf, uint8 *buf_end, uint32 *p_offset,
AOTCompContext *comp_ctx)
{
uint32 offset = *p_offset;
AOTNativeSymbol *sym = NULL;
if (bh_list_length(&comp_ctx->native_symbols) == 0)
/* emit only when threre are native symbols */
return true;
*p_offset = offset = align_uint(offset, 4);
EMIT_U32(AOT_SECTION_TYPE_CUSTOM);
/* sub section id + symbol count + symbol list */
EMIT_U32(sizeof(uint32) * 2 + get_native_symbol_list_size(comp_ctx));
EMIT_U32(AOT_CUSTOM_SECTION_NATIVE_SYMBOL);
EMIT_U32(bh_list_length(&comp_ctx->native_symbols));
sym = bh_list_first_elem(&comp_ctx->native_symbols);
while (sym) {
offset = align_uint(offset, 2);
EMIT_STR(sym->symbol);
sym = bh_list_elem_next(sym);
}
*p_offset = offset;
return true;
}
typedef uint32 U32;
typedef int32 I32;
typedef uint16 U16;
@ -2164,6 +2221,8 @@ aot_obj_data_create(AOTCompContext *comp_ctx)
{
char *err = NULL;
AOTObjectData *obj_data;
LLVMTargetRef target =
LLVMGetTargetMachineTarget(comp_ctx->target_machine);
bh_print_time("Begin to emit object file to buffer");
@ -2173,11 +2232,76 @@ aot_obj_data_create(AOTCompContext *comp_ctx)
}
memset(obj_data, 0, sizeof(AOTObjectData));
if (LLVMTargetMachineEmitToMemoryBuffer(comp_ctx->target_machine,
comp_ctx->module,
LLVMObjectFile,
&err,
&obj_data->mem_buf) != 0) {
bh_print_time("Begin to emit object file");
if (!strncmp(LLVMGetTargetName(target), "arc", 3)) {
/* Emit to assmelby file instead for arc target
as it cannot emit to object file */
char file_name[] = "wasm-XXXXXX", buf[128];
int fd, ret;
if ((fd = mkstemp(file_name)) <= 0) {
aot_set_last_error("make temp file failed.");
goto fail;
}
/* close and remove temp file */
close(fd);
unlink(file_name);
snprintf(buf, sizeof(buf), "%s%s", file_name, ".s");
if (LLVMTargetMachineEmitToFile(comp_ctx->target_machine,
comp_ctx->module,
buf, LLVMAssemblyFile,
&err) != 0) {
if (err) {
LLVMDisposeMessage(err);
err = NULL;
}
aot_set_last_error("emit elf to object file failed.");
goto fail;
}
/* call arc gcc to compile assembly file to object file */
/* TODO: get arc gcc from environment variable firstly
and check whether the toolchain exists actually */
snprintf(buf, sizeof(buf), "%s%s%s%s%s%s",
"/opt/zephyr-sdk/arc-zephyr-elf/bin/arc-zephyr-elf-gcc ",
"-mcpu=arcem -o ", file_name, ".o -c ", file_name, ".s");
/* TODO: use try..catch to handle possible exceptions */
ret = system(buf);
/* remove temp assembly file */
snprintf(buf, sizeof(buf), "%s%s", file_name, ".s");
unlink(buf);
if (ret != 0) {
aot_set_last_error("failed to compile asm file to obj file "
"with arc gcc toolchain.");
goto fail;
}
/* create memory buffer from object file */
snprintf(buf, sizeof(buf), "%s%s", file_name, ".o");
ret = LLVMCreateMemoryBufferWithContentsOfFile(buf,
&obj_data->mem_buf,
&err);
/* remove temp object file */
snprintf(buf, sizeof(buf), "%s%s",file_name, ".o");
unlink(buf);
if (ret != 0) {
if (err) {
LLVMDisposeMessage(err);
err = NULL;
}
aot_set_last_error("create mem buffer with file failed.");
goto fail;
}
}
else if (LLVMTargetMachineEmitToMemoryBuffer(comp_ctx->target_machine,
comp_ctx->module,
LLVMObjectFile, &err,
&obj_data->mem_buf) != 0) {
if (err) {
LLVMDisposeMessage(err);
err = NULL;
@ -2242,7 +2366,8 @@ aot_emit_aot_file_buf(AOTCompContext *comp_ctx,
|| !aot_emit_text_section(buf, buf_end, &offset, comp_data, obj_data)
|| !aot_emit_func_section(buf, buf_end, &offset, comp_data, obj_data)
|| !aot_emit_export_section(buf, buf_end, &offset, comp_data, obj_data)
|| !aot_emit_relocation_section(buf, buf_end, &offset, comp_data, obj_data))
|| !aot_emit_relocation_section(buf, buf_end, &offset, comp_data, obj_data)
|| !aot_emit_native_symbol(buf, buf_end, &offset, comp_ctx))
goto fail2;
#if 0

View File

@ -68,6 +68,24 @@ aot_emit_exception(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return false;
}
}
else if (comp_ctx->is_indirect_mode) {
int32 func_index;
if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
aot_set_last_error("create LLVM function type failed.");
return false;
}
func_index = aot_get_native_symbol_index(
comp_ctx, "aot_set_exception_with_id");
if (func_index < 0) {
return false;
}
if (!(func =
aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
func_ptr_type, func_index))) {
return false;
}
}
else {
/* Create LLVM function with external function pointer */
if (!(func = LLVMGetNamedFunction(comp_ctx->module,

View File

@ -168,10 +168,26 @@ call_aot_invoke_native_func(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return false;
}
}
else if (comp_ctx->is_indirect_mode) {
int32 func_index;
if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
aot_set_last_error("create LLVM function type failed.");
return false;
}
func_index =
aot_get_native_symbol_index(comp_ctx, func_name);
if (func_index < 0) {
return false;
}
if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
func_ptr_type, func_index))) {
return false;
}
}
else {
if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name))
&& !(func = LLVMAddFunction(comp_ctx->module,
func_name, func_type))) {
&& !(func =
LLVMAddFunction(comp_ctx->module, func_name, func_type))) {
aot_set_last_error("add LLVM function failed.");
return false;
}
@ -547,7 +563,22 @@ aot_compile_op_call(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
goto fail;
}
else {
func = func_ctxes[func_idx - import_func_count]->func;
if (comp_ctx->is_indirect_mode) {
LLVMTypeRef func_ptr_type;
if (!(func_ptr_type = LLVMPointerType(
func_ctxes[func_idx - import_func_count]->func_type, 0))) {
aot_set_last_error("construct func ptr type failed.");
goto fail;
}
if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->func_ptrs,
func_ptr_type, func_idx))) {
goto fail;
}
}
else {
func = func_ctxes[func_idx - import_func_count]->func;
}
aot_func = func_ctxes[func_idx - import_func_count]->aot_func;
callee_cell_num = aot_func->param_cell_num + aot_func->local_cell_num + 1;
@ -650,6 +681,22 @@ call_aot_call_indirect_func(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return false;
}
}
else if (comp_ctx->is_indirect_mode) {
int32 func_index;
if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
aot_set_last_error("create LLVM function type failed.");
return false;
}
func_index =
aot_get_native_symbol_index(comp_ctx, func_name);
if (func_index < 0) {
return false;
}
if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
func_ptr_type, func_index))) {
return false;
}
}
else {
if (!(func = LLVMGetNamedFunction(comp_ctx->module, func_name))
&& !(func = LLVMAddFunction(comp_ctx->module,

View File

@ -651,6 +651,7 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
LLVMValueRef delta, param_values[2], ret_value, func, value;
LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
int32 func_index;
if (!mem_size)
return false;
@ -679,6 +680,21 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
return false;
}
}
else if (comp_ctx->is_indirect_mode) {
if (!(func_ptr_type = LLVMPointerType(func_type, 0))) {
aot_set_last_error("create LLVM function type failed.");
return false;
}
func_index =
aot_get_native_symbol_index(comp_ctx, "aot_enlarge_memory");
if (func_index < 0) {
return false;
}
if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
func_ptr_type, func_index))) {
return false;
}
}
else {
char *func_name = "aot_enlarge_memory";
/* AOT mode, delcare the function */
@ -715,7 +731,6 @@ fail:
return false;
}
#if WASM_ENABLE_BULK_MEMORY != 0
static LLVMValueRef
@ -924,6 +939,8 @@ aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
return false;
/* TODO: lookup func ptr of "memmove" to call for XIP mode */
if (!(res = LLVMBuildMemMove(comp_ctx->builder, dst_addr, 1,
src_addr, 1, len))) {
aot_set_last_error("llvm build memmove failed.");
@ -947,7 +964,13 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
return false;
val = LLVMBuildIntCast2(comp_ctx->builder, val, INT8_TYPE, true, "mem_set_value");
if (!(val = LLVMBuildIntCast2(comp_ctx->builder, val, INT8_TYPE,
true, "mem_set_value"))) {
aot_set_last_error("llvm build int cast2 failed.");
return false;
}
/* TODO: lookup func ptr of "memset" to call for XIP mode */
if (!(res = LLVMBuildMemSet(comp_ctx->builder, dst_addr,
val, len, 1))) {

View File

@ -7,6 +7,7 @@
#include "aot_emit_exception.h"
#include "aot_emit_control.h"
#include "../aot/aot_runtime.h"
#include "../aot/aot_intrinsic.h"
#include <stdarg.h>
@ -138,6 +139,7 @@
/* Call llvm constrained floating-point intrinsic */
static LLVMValueRef
call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_f32,
const char *intrinsic,
...)
@ -145,14 +147,18 @@ call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
va_list param_value_list;
LLVMValueRef ret;
LLVMTypeRef param_types[4], ret_type = is_f32 ? F32_TYPE : F64_TYPE;
int param_count = ((comp_ctx->disable_llvm_intrinsics == false)
|| aot_intrinsic_check_capability(comp_ctx, intrinsic))
? 4 : 2;
param_types[0] = param_types[1] = ret_type;
param_types[2] = param_types[3] = MD_TYPE;
va_start(param_value_list, intrinsic);
ret = aot_call_llvm_intrinsic_v(comp_ctx, intrinsic, ret_type, param_types,
4, param_value_list);
ret =
aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
param_types, param_count, param_value_list);
va_end(param_value_list);
@ -162,6 +168,7 @@ call_llvm_float_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
/* Call llvm constrained libm-equivalent intrinsic */
static LLVMValueRef
call_llvm_libm_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_f32,
const char *intrinsic,
...)
@ -175,7 +182,7 @@ call_llvm_libm_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
va_start(param_value_list, intrinsic);
ret = aot_call_llvm_intrinsic_v(comp_ctx, intrinsic, ret_type, param_types,
ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type, param_types,
3, param_value_list);
va_end(param_value_list);
@ -185,6 +192,7 @@ call_llvm_libm_experimental_constrained_intrinsic(AOTCompContext *comp_ctx,
static LLVMValueRef
compile_op_float_min_max(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_f32,
LLVMValueRef left,
LLVMValueRef right,
@ -230,8 +238,9 @@ compile_op_float_min_max(AOTCompContext *comp_ctx,
return NULL;
}
if (!(cmp = aot_call_llvm_intrinsic(comp_ctx, intrinsic, ret_type,
param_types, 2, left, right)))
if (!(cmp =
aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, ret_type,
param_types, 2, left, right)))
return NULL;
if (!(cmp = LLVMBuildSelect(comp_ctx->builder,
@ -266,13 +275,14 @@ typedef enum BitCountType {
POP_CNT64
} BitCountType;
static char *bit_cnt_llvm_intrinsic[] = { "llvm.ctlz.i32",
"llvm.ctlz.i64",
"llvm.cttz.i32",
"llvm.cttz.i64",
"llvm.ctpop.i32",
"llvm.ctpop.i64",
};
static char *bit_cnt_llvm_intrinsic[] = {
"llvm.ctlz.i32",
"llvm.ctlz.i64",
"llvm.cttz.i32",
"llvm.cttz.i64",
"llvm.ctpop.i32",
"llvm.ctpop.i64",
};
static bool
aot_compile_int_bit_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
@ -290,6 +300,7 @@ aot_compile_int_bit_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
/* Call the LLVM intrinsic function */
if (type < POP_CNT32)
DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(comp_ctx,
func_ctx,
bit_cnt_llvm_intrinsic[type],
ret_type,
param_types,
@ -299,6 +310,7 @@ aot_compile_int_bit_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
NULL);
else
DEF_INT_UNARY_OP(aot_call_llvm_intrinsic(comp_ctx,
func_ctx,
bit_cnt_llvm_intrinsic[type],
ret_type,
param_types,
@ -823,6 +835,7 @@ compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
else
DEF_FP_BINARY_OP(call_llvm_float_experimental_constrained_intrinsic(
comp_ctx,
func_ctx,
is_f32,
(is_f32
? "llvm.experimental.constrained.fadd.f32"
@ -840,6 +853,7 @@ compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
else
DEF_FP_BINARY_OP(call_llvm_float_experimental_constrained_intrinsic(
comp_ctx,
func_ctx,
is_f32,
(is_f32
? "llvm.experimental.constrained.fsub.f32"
@ -857,6 +871,7 @@ compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
else
DEF_FP_BINARY_OP(call_llvm_float_experimental_constrained_intrinsic(
comp_ctx,
func_ctx,
is_f32,
(is_f32
? "llvm.experimental.constrained.fmul.f32"
@ -874,6 +889,7 @@ compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
else
DEF_FP_BINARY_OP(call_llvm_float_experimental_constrained_intrinsic(
comp_ctx,
func_ctx,
is_f32,
(is_f32
? "llvm.experimental.constrained.fdiv.f32"
@ -886,6 +902,7 @@ compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return true;
case FLOAT_MIN:
DEF_FP_BINARY_OP(compile_op_float_min_max(comp_ctx,
func_ctx,
is_f32,
left,
right,
@ -894,6 +911,7 @@ compile_op_float_arithmetic(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return true;
case FLOAT_MAX:
DEF_FP_BINARY_OP(compile_op_float_min_max(comp_ctx,
func_ctx,
is_f32,
left,
right,
@ -912,6 +930,7 @@ fail:
static LLVMValueRef
call_llvm_float_math_intrinsic(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx,
bool is_f32,
const char *intrinsic,
...)
@ -924,8 +943,8 @@ call_llvm_float_math_intrinsic(AOTCompContext *comp_ctx,
va_start(param_value_list, intrinsic);
ret = aot_call_llvm_intrinsic_v(comp_ctx, intrinsic, ret_type, &param_type,
1, param_value_list);
ret = aot_call_llvm_intrinsic_v(comp_ctx, func_ctx, intrinsic, ret_type,
&param_type, 1, param_value_list);
va_end(param_value_list);
@ -939,6 +958,7 @@ compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
switch (math_op) {
case FLOAT_ABS:
DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(comp_ctx,
func_ctx,
is_f32,
is_f32 ? "llvm.fabs.f32" :
"llvm.fabs.f64",
@ -952,6 +972,7 @@ compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
case FLOAT_CEIL:
DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(comp_ctx,
func_ctx,
is_f32,
is_f32 ? "llvm.ceil.f32" :
"llvm.ceil.f64",
@ -960,6 +981,7 @@ compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return true;
case FLOAT_FLOOR:
DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(comp_ctx,
func_ctx,
is_f32,
is_f32 ? "llvm.floor.f32":
"llvm.floor.f64",
@ -968,6 +990,7 @@ compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return true;
case FLOAT_TRUNC:
DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(comp_ctx,
func_ctx,
is_f32,
is_f32 ? "llvm.trunc.f32" :
"llvm.trunc.f64",
@ -976,6 +999,7 @@ compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
return true;
case FLOAT_NEAREST:
DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(comp_ctx,
func_ctx,
is_f32,
is_f32 ? "llvm.rint.f32" :
"llvm.rint.f64",
@ -983,8 +1007,10 @@ compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
NULL);
return true;
case FLOAT_SQRT:
if (is_targeting_soft_float(comp_ctx, is_f32))
if (is_targeting_soft_float(comp_ctx, is_f32)
|| comp_ctx->disable_llvm_intrinsics)
DEF_FP_UNARY_OP(call_llvm_float_math_intrinsic(comp_ctx,
func_ctx,
is_f32,
is_f32 ? "llvm.sqrt.f32" :
"llvm.sqrt.f64",
@ -993,6 +1019,7 @@ compile_op_float_math(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
else
DEF_FP_UNARY_OP(call_llvm_libm_experimental_constrained_intrinsic(
comp_ctx,
func_ctx,
is_f32,
(is_f32
? "llvm.experimental.constrained.sqrt.f32"
@ -1022,6 +1049,7 @@ compile_float_copysign(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
param_types[0] = param_types[1] = ret_type = is_f32 ? F32_TYPE : F64_TYPE;
DEF_FP_BINARY_OP(aot_call_llvm_intrinsic(comp_ctx,
func_ctx,
is_f32 ? "llvm.copysign.f32" :
"llvm.copysign.f64",
ret_type,

View File

@ -7,7 +7,7 @@
#include "aot_compiler.h"
#include "aot_emit_exception.h"
#include "../aot/aot_runtime.h"
#include "../aot/aot_intrinsic.h"
LLVMTypeRef
wasm_type_to_llvm_type(AOTLLVMTypes *llvm_types, uint8 wasm_type)
@ -38,7 +38,7 @@ wasm_type_to_llvm_type(AOTLLVMTypes *llvm_types, uint8 wasm_type)
*/
static LLVMValueRef
aot_add_llvm_func(AOTCompContext *comp_ctx, AOTFuncType *aot_func_type,
uint32 func_index)
uint32 func_index, LLVMTypeRef *p_func_type)
{
LLVMValueRef func = NULL;
LLVMTypeRef *param_types, ret_type, func_type;
@ -107,6 +107,9 @@ aot_add_llvm_func(AOTCompContext *comp_ctx, AOTFuncType *aot_func_type,
LLVMSetValueName(local_value, "");
}
if (p_func_type)
*p_func_type = func_type;
fail:
wasm_runtime_free(param_types);
return func;
@ -604,6 +607,7 @@ aot_create_func_context(AOTCompData *comp_data, AOTCompContext *comp_ctx,
LLVMValueRef stack_bound_offset = I32_FOUR, stack_bound_addr;
LLVMValueRef aux_stack_bound_offset = I32_SIX, aux_stack_bound_addr;
LLVMValueRef aux_stack_bottom_offset = I32_SEVEN, aux_stack_bottom_addr;
LLVMValueRef native_symbol_offset = I32_EIGHT, native_symbol_addr;
char local_name[32];
uint64 size;
uint32 i, j = 0;
@ -621,7 +625,8 @@ aot_create_func_context(AOTCompData *comp_data, AOTCompContext *comp_ctx,
func_ctx->aot_func = func;
/* Add LLVM function */
if (!(func_ctx->func = aot_add_llvm_func(comp_ctx, aot_func_type, func_index)))
if (!(func_ctx->func = aot_add_llvm_func(comp_ctx, aot_func_type,
func_index, &func_ctx->func_type)))
goto fail;
/* Create function's first AOTBlock */
@ -741,6 +746,27 @@ aot_create_func_context(AOTCompData *comp_data, AOTCompContext *comp_ctx,
goto fail;
}
if (!(native_symbol_addr =
LLVMBuildInBoundsGEP(comp_ctx->builder, func_ctx->exec_env,
&native_symbol_offset, 1, "native_symbol_addr"))) {
aot_set_last_error("llvm build in bounds gep failed");
goto fail;
}
if (!(func_ctx->native_symbol =
LLVMBuildLoad(comp_ctx->builder, native_symbol_addr,
"native_symbol_tmp"))) {
aot_set_last_error("llvm build bit cast failed");
goto fail;
}
if (!(func_ctx->native_symbol =
LLVMBuildBitCast(comp_ctx->builder, func_ctx->native_symbol,
comp_ctx->exec_env_type, "native_symbol"))) {
aot_set_last_error("llvm build bit cast failed");
goto fail;
}
for (i = 0; i < aot_func_type->param_count; i++, j++) {
snprintf(local_name, sizeof(local_name), "l%d", i);
func_ctx->locals[i] =
@ -814,7 +840,7 @@ aot_create_func_context(AOTCompData *comp_data, AOTCompContext *comp_ctx,
}
else {
if (!(func_ctx->last_alloca = LLVMBuildAlloca(comp_ctx->builder, INT8_TYPE,
"stack_ptr"))) {
"stack_ptr"))) {
aot_set_last_error("llvm build alloca failed.");
goto fail;
}
@ -1089,7 +1115,8 @@ static ArchItem valid_archs[] = {
{ "thumbv8m.main", true },
{ "thumbv8.1m.main", true },
{ "riscv32", true},
{ "riscv64", true}
{ "riscv64", true},
{ "arc", true }
};
static const char *valid_abis[] = {
@ -1230,6 +1257,10 @@ aot_create_comp_context(AOTCompData *comp_data,
goto fail;
}
if (BH_LIST_ERROR == bh_list_init(&comp_ctx->native_symbols)) {
goto fail;
}
if (option->enable_bulk_memory)
comp_ctx->enable_bulk_memory = true;
@ -1248,6 +1279,12 @@ aot_create_comp_context(AOTCompData *comp_data,
if (option->enable_aux_stack_check)
comp_ctx->enable_aux_stack_check = true;
if (option->is_indirect_mode)
comp_ctx->is_indirect_mode = true;
if (option->disable_llvm_intrinsics)
comp_ctx->disable_llvm_intrinsics = true;
if (option->is_jit_mode) {
char *triple_jit = NULL;
@ -1496,7 +1533,12 @@ aot_create_comp_context(AOTCompData *comp_data,
goto fail;
}
if (!LLVMTargetHasAsmBackend(target)) {
/* Report error if target isn't arc and hasn't asm backend.
For arc target, as it cannot emit to memory buffer of elf file currently,
we let it emit to assembly file instead, and then call arc-gcc to compile
asm file to elf file, and read elf file to memory buffer. */
if (strncmp(comp_ctx->target_arch, "arc", 3)
&& !LLVMTargetHasAsmBackend(target)) {
snprintf(buf, sizeof(buf),
"no asm backend for this target (%s).", LLVMGetTargetName(target));
aot_set_last_error(buf);
@ -1631,6 +1673,18 @@ aot_create_comp_context(AOTCompData *comp_data,
aot_create_func_contexts(comp_data, comp_ctx)))
goto fail;
if (cpu) {
int len = strlen(cpu) + 1;
if (!(comp_ctx->target_cpu = wasm_runtime_malloc(len))) {
aot_set_last_error("allocate memory failed");
goto fail;
}
bh_memcpy_s(comp_ctx->target_cpu, len, cpu, len);
}
if (comp_ctx->disable_llvm_intrinsics)
aot_intrinsic_fill_capability_flags(comp_ctx);
ret = comp_ctx;
fail:
@ -1676,9 +1730,65 @@ aot_destroy_comp_context(AOTCompContext *comp_ctx)
aot_destroy_func_contexts(comp_ctx->func_ctxes,
comp_ctx->func_ctx_count);
if (bh_list_length(&comp_ctx->native_symbols) > 0) {
AOTNativeSymbol *sym = bh_list_first_elem(&comp_ctx->native_symbols);
while (sym) {
AOTNativeSymbol *t = bh_list_elem_next(sym);
bh_list_remove(&comp_ctx->native_symbols, sym);
wasm_runtime_free(sym);
sym = t;
}
}
if (comp_ctx->target_cpu) {
wasm_runtime_free(comp_ctx->target_cpu);
}
wasm_runtime_free(comp_ctx);
}
int32
aot_get_native_symbol_index(AOTCompContext *comp_ctx, const char *symbol)
{
int32 idx = -1;
AOTNativeSymbol *sym = NULL;
sym = bh_list_first_elem(&comp_ctx->native_symbols);
/* Lookup an existing symobl record */
while (sym) {
if (strcmp(sym->symbol, symbol) == 0) {
idx = sym->index;
break;
}
sym = bh_list_elem_next(sym);
}
/* Given symbol is not exist in list, then we alloc a new index for it */
if (idx < 0) {
sym = wasm_runtime_malloc(sizeof(AOTNativeSymbol));
if (!sym) {
aot_set_last_error("alloc native symbol failed.");
return idx;
}
idx = bh_list_length(&comp_ctx->native_symbols);
sym->symbol = symbol;
sym->index = idx;
if (BH_LIST_ERROR == bh_list_insert(&comp_ctx->native_symbols, sym)) {
wasm_runtime_free(sym);
aot_set_last_error("alloc index for native symbol failed.");
return -1;
}
}
return idx;
}
void
aot_value_stack_push(AOTValueStack *stack, AOTValue *value)
{
@ -1901,6 +2011,7 @@ aot_build_zero_function_ret(AOTCompContext *comp_ctx,
static LLVMValueRef
__call_llvm_intrinsic(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
@ -1909,25 +2020,67 @@ __call_llvm_intrinsic(const AOTCompContext *comp_ctx,
{
LLVMValueRef func, ret;
LLVMTypeRef func_type;
const char *symname;
int32 func_idx;
/* Declare llvm intrinsic function if necessary */
if (!(func = LLVMGetNamedFunction(comp_ctx->module, name))) {
if (!(func_type = LLVMFunctionType(ret_type, param_types,
(uint32)param_count, false))) {
aot_set_last_error("create LLVM function type failed.");
if (comp_ctx->disable_llvm_intrinsics
&& (aot_intrinsic_check_capability(comp_ctx, name) == false)) {
if (func_ctx == NULL) {
aot_set_last_error_v("invalid func_ctx for intrinsic: %s", name);
return NULL;
}
if (!(func = LLVMAddFunction(comp_ctx->module, name, func_type))) {
aot_set_last_error("add LLVM function failed.");
if (!(func_type = LLVMFunctionType(ret_type, param_types,
(uint32)param_count, false))) {
aot_set_last_error("create LLVM intrinsic function type failed.");
return NULL;
}
if (!(func_type = LLVMPointerType(func_type, 0))) {
aot_set_last_error(
"create LLVM intrinsic function pointer type failed.");
return NULL;
}
if (!(symname = aot_intrinsic_get_symbol(name))) {
aot_set_last_error_v("runtime intrinsic not implemented: %s\n",
name);
return NULL;
}
func_idx =
aot_get_native_symbol_index((AOTCompContext *)comp_ctx, symname);
if (func_idx < 0) {
aot_set_last_error_v("get runtime intrinsc index failed: %s\n",
name);
return NULL;
}
if (!(func = aot_get_func_from_table(comp_ctx, func_ctx->native_symbol,
func_type, func_idx))) {
aot_set_last_error_v("get runtime intrinsc failed: %s\n", name);
return NULL;
}
}
else {
/* Declare llvm intrinsic function if necessary */
if (!(func = LLVMGetNamedFunction(comp_ctx->module, name))) {
if (!(func_type = LLVMFunctionType(ret_type, param_types,
(uint32)param_count, false))) {
aot_set_last_error("create LLVM intrinsic function type failed.");
return NULL;
}
if (!(func = LLVMAddFunction(comp_ctx->module, name, func_type))) {
aot_set_last_error("add LLVM intrinsic function failed.");
return NULL;
}
}
}
/* Call the LLVM intrinsic function */
if (!(ret = LLVMBuildCall(comp_ctx->builder, func, param_values,
(uint32)param_count, "call"))) {
aot_set_last_error("llvm build call failed.");
aot_set_last_error("llvm build intrinsic call failed.");
return NULL;
}
@ -1936,6 +2089,7 @@ __call_llvm_intrinsic(const AOTCompContext *comp_ctx,
LLVMValueRef
aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
@ -1961,7 +2115,7 @@ aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
param_values[i++] = va_arg(argptr, LLVMValueRef);
va_end(argptr);
ret = __call_llvm_intrinsic(comp_ctx, name, ret_type, param_types,
ret = __call_llvm_intrinsic(comp_ctx, func_ctx, name, ret_type, param_types,
param_count, param_values);
wasm_runtime_free(param_values);
@ -1971,6 +2125,7 @@ aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
LLVMValueRef
aot_call_llvm_intrinsic_v(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
@ -1993,10 +2148,46 @@ aot_call_llvm_intrinsic_v(const AOTCompContext *comp_ctx,
while (i < param_count)
param_values[i++] = va_arg(param_value_list, LLVMValueRef);
ret = __call_llvm_intrinsic(comp_ctx, name, ret_type, param_types,
ret = __call_llvm_intrinsic(comp_ctx, func_ctx, name, ret_type, param_types,
param_count, param_values);
wasm_runtime_free(param_values);
return ret;
}
LLVMValueRef
aot_get_func_from_table(const AOTCompContext *comp_ctx, LLVMValueRef base,
LLVMTypeRef func_type, int32 index)
{
LLVMValueRef func;
LLVMValueRef func_addr;
if (!(func_addr = I32_CONST(index))) {
aot_set_last_error("construct function index failed.");
goto fail;
}
if (!(func_addr = LLVMBuildInBoundsGEP(comp_ctx->builder, base, &func_addr,
1, "func_addr"))) {
aot_set_last_error("get function addr by index failed.");
goto fail;
}
func = LLVMBuildLoad(comp_ctx->builder, func_addr, "func_tmp");
if (func == NULL) {
aot_set_last_error("get function pointer failed.");
goto fail;
}
if (!(func = LLVMBuildBitCast(comp_ctx->builder, func, func_type,
"func"))) {
aot_set_last_error("cast function fialed.");
goto fail;
}
return func;
fail:
return NULL;
}

View File

@ -116,6 +116,7 @@ typedef struct AOTMemInfo {
typedef struct AOTFuncContext {
AOTFunc *aot_func;
LLVMValueRef func;
LLVMTypeRef func_type;
AOTBlockStack block_stack;
LLVMValueRef exec_env;
@ -124,6 +125,7 @@ typedef struct AOTFuncContext {
LLVMValueRef native_stack_bound;
LLVMValueRef aux_stack_bound;
LLVMValueRef aux_stack_bottom;
LLVMValueRef native_symbol;
LLVMValueRef last_alloca;
LLVMValueRef func_ptrs;
@ -221,10 +223,17 @@ typedef struct AOTCompContext {
char target_arch[16];
unsigned pointer_size;
/* Hardware intrinsic compability flags */
uint64 flags[8];
/* LLVM execution engine required by JIT */
LLVMExecutionEngineRef exec_engine;
bool is_jit_mode;
/* AOT indirect mode flag & symbol list */
bool is_indirect_mode;
bh_list native_symbols;
/* Bulk memory feature */
bool enable_bulk_memory;
@ -249,6 +258,9 @@ typedef struct AOTCompContext {
/* Reference Types */
bool enable_ref_types;
/* Disable LLVM built-in intrinsics */
bool disable_llvm_intrinsics;
/* Whether optimize the JITed code */
bool optimize;
@ -283,10 +295,12 @@ enum {
typedef struct AOTCompOption{
bool is_jit_mode;
bool is_indirect_mode;
char *target_arch;
char *target_abi;
char *target_cpu;
char *cpu_features;
bool is_sgx_platform;
bool enable_bulk_memory;
bool enable_thread_mgr;
bool enable_tail_call;
@ -294,7 +308,7 @@ typedef struct AOTCompOption{
bool enable_ref_types;
bool enable_aux_stack_check;
bool enable_aux_stack_frame;
bool is_sgx_platform;
bool disable_llvm_intrinsics;
uint32 opt_level;
uint32 size_level;
uint32 output_format;
@ -308,6 +322,9 @@ aot_create_comp_context(AOTCompData *comp_data,
void
aot_destroy_comp_context(AOTCompContext *comp_ctx);
int32
aot_get_native_symbol_index(AOTCompContext *comp_ctx, const char *symbol);
bool
aot_compile_wasm(AOTCompContext *comp_ctx);
@ -361,6 +378,7 @@ aot_build_zero_function_ret(AOTCompContext *comp_ctx,
LLVMValueRef
aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
@ -369,12 +387,19 @@ aot_call_llvm_intrinsic(const AOTCompContext *comp_ctx,
LLVMValueRef
aot_call_llvm_intrinsic_v(const AOTCompContext *comp_ctx,
const AOTFuncContext *func_ctx,
const char *name,
LLVMTypeRef ret_type,
LLVMTypeRef *param_types,
int param_count,
va_list param_value_list);
LLVMValueRef
aot_get_func_from_table(const AOTCompContext *comp_ctx,
LLVMValueRef base,
LLVMTypeRef func_type,
int32 index);
bool
aot_check_simd_compatibility(const char *arch_c_str, const char *cpu_c_str);

View File

@ -138,7 +138,7 @@ aot_compile_simd_swizzle_x86(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
param_types[0] = V128_i8x16_TYPE;
param_types[1] = V128_i8x16_TYPE;
if (!(result = aot_call_llvm_intrinsic(
comp_ctx, "llvm.x86.ssse3.pshuf.b.128", V128_i8x16_TYPE,
comp_ctx, func_ctx, "llvm.x86.ssse3.pshuf.b.128", V128_i8x16_TYPE,
param_types, 2, vector, mask))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;

View File

@ -69,7 +69,7 @@ simd_build_bitmask(const AOTCompContext *comp_ctx,
}
param_types[0] = vector_ext_type;
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, I32_TYPE,
if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, I32_TYPE,
param_types, 1, result))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;

View File

@ -41,7 +41,7 @@ simd_any_true(AOTCompContext *comp_ctx,
goto fail;
}
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, element_type,
if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, element_type,
&vector_type, 1, non_zero))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;
@ -128,7 +128,7 @@ simd_all_true(AOTCompContext *comp_ctx,
goto fail;
}
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, element_type,
if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, element_type,
&vector_type, 1, is_zero))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;

View File

@ -38,7 +38,7 @@ simd_integer_narrow(AOTCompContext *comp_ctx,
}
if (!(result =
aot_call_llvm_intrinsic(comp_ctx, instrinsic, out_vector_type,
aot_call_llvm_intrinsic(comp_ctx, func_ctx, instrinsic, out_vector_type,
param_types, 2, vector1, vector2))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;

View File

@ -191,7 +191,7 @@ simd_v128_float_intrinsic(AOTCompContext *comp_ctx,
goto fail;
}
if (!(result = aot_call_llvm_intrinsic(comp_ctx, intrinsic, vector_type,
if (!(result = aot_call_llvm_intrinsic(comp_ctx, func_ctx, intrinsic, vector_type,
param_types, 1, number))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;

View File

@ -32,7 +32,7 @@ simd_v128_integer_arith(AOTCompContext *comp_ctx,
param_types[1] = vector_type;
if (!(result = aot_call_llvm_intrinsic(
comp_ctx, is_signed ? intrinsics_s_u[0] : intrinsics_s_u[1],
comp_ctx, func_ctx, is_signed ? intrinsics_s_u[0] : intrinsics_s_u[1],
vector_type, param_types, 2, lhs, rhs))) {
HANDLE_FAILURE("LLVMBuildCall");
goto fail;

View File

@ -35,10 +35,12 @@ enum {
typedef struct AOTCompOption{
bool is_jit_mode;
bool is_indirect_mode;
char *target_arch;
char *target_abi;
char *target_cpu;
char *cpu_features;
bool is_sgx_platform;
bool enable_bulk_memory;
bool enable_thread_mgr;
bool enable_tail_call;
@ -46,7 +48,7 @@ typedef struct AOTCompOption{
bool enable_ref_types;
bool enable_aux_stack_check;
bool enable_aux_stack_frame;
bool is_sgx_platform;
bool disable_llvm_intrinsics;
uint32_t opt_level;
uint32_t size_level;
uint32_t output_format;