Add thumb target, implement xtensa invokeNative asm code and update makefiles (#151)

This commit is contained in:
wenyongh
2019-12-24 11:09:54 +08:00
committed by GitHub
parent 5875a37f34
commit 2b12e2c957
21 changed files with 372 additions and 131 deletions

View File

@ -113,9 +113,9 @@ wasm_runtime_unload(wasm_module_t module);
#if WASM_ENABLE_WASI != 0
void
wasm_runtime_set_wasi_args(wasm_module_t module,
const char *dir_list[], uint32 dir_count,
const char *map_dir_list[], uint32 map_dir_count,
const char *env[], uint32 env_count,
const char *dir_list[], uint32_t dir_count,
const char *map_dir_list[], uint32_t map_dir_count,
const char *env[], uint32_t env_count,
char *argv[], int argc);
#endif

View File

@ -10,54 +10,59 @@
/*
* Arguments passed in:
*
* r0 function pntr
* r0 function ptr
* r1 argv
* r2 argc
*/
invokeNative:
stmfd sp!, {r4, r5, r6, r7, lr}
mov ip, r0 /* get function ptr */
mov r4, r1 /* get argv */
mov r5, r2 /* get argc */
mov ip, r0 /* ip = function ptr */
mov r4, r1 /* r4 = argv */
mov r5, r2 /* r5 = argc */
cmp r5, #2 /* is argc < 2 ? */
cmp r5, #1 /* at least one argument required: module_inst */
blt return
ldr r0, [r4], #4 /* argv[0] */
ldr r1, [r4], #4 /* argv[1] */
mov r6, #0 /* increased stack size */
mov r6, #0
ldr r0, [r4], #4 /* r0 = argv[0] = module_inst */
cmp r5, #1
beq call_func
ldr r1, [r4], #4 /* r1 = argv[1] */
cmp r5, #2
beq call_func
ldr r2, [r4], #4
ldr r2, [r4], #4 /* r2 = argv[2] */
cmp r5, #3
beq call_func
ldr r3, [r4], #4
subs r5, r5, #4 /* now we have r0 ~ r3 */
ldr r3, [r4], #4 /* r3 = argv[3] */
cmp r5, #4
beq call_func
sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
/* Ensure address is 8 byte aligned */
mov r6, r5, lsl#2
add r6, r6, #7
mov r6, r5, lsl#2 /* r6 = argc * 4 */
add r6, r6, #7 /* r6 = (r6 + 7) & ~7 */
bic r6, r6, #7
add r6, r6, #4 /* +4 because only odd(5) registers are in stack */
subs sp, sp, r6 /* for stacked args */
add r6, r6, #4 /* +4 because odd(5) registers are in stack */
sub sp, sp, r6 /* reserved stack space for left arguments */
mov r7, sp
loop_args:
loop_args: /* copy left arguments to stack */
cmp r5, #0
beq call_func
ldr lr, [r4], #4
str lr, [r7], #4
subs r5, r5, #1
sub r5, r5, #1
b loop_args
call_func:
blx ip
add sp, sp, r6 /* recover sp */
add sp, sp, r6 /* restore sp */
return:
ldmfd sp!, {r4, r5, r6, r7, lr}

View File

@ -0,0 +1,84 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
.global invokeNative
.type invokeNative,function
/*
* Arguments passed in:
*
* r0 function ptr
* r1 argv
* r2 argc
*/
invokeNative:
push {r4, r5, r6, r7}
push {lr}
mov ip, r0 /* ip = function ptr */
mov r4, r1 /* r4 = argv */
mov r5, r2 /* r5 = argc */
cmp r5, #1 /* at least one argument required: module_inst */
blt return
mov r6, #0 /* increased stack size */
ldr r0, [r4] /* r0 = argv[0] = module_inst */
add r4, r4, #4 /* r4 += 4 */
cmp r5, #1
beq call_func
ldr r1, [r4] /* r1 = argv[1] */
add r4, r4, #4
cmp r5, #2
beq call_func
ldr r2, [r4] /* r2 = argv[2] */
add r4, r4, #4
cmp r5, #3
beq call_func
ldr r3, [r4] /* r3 = argv[3] */
add r4, r4, #4
cmp r5, #4
beq call_func
sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
/* Ensure address is 8 byte aligned */
lsl r6, r5, #2 /* r6 = argc * 4 */
mov r7, #7
add r6, r6, r7 /* r6 = (r6 + 7) & ~7 */
bic r6, r6, r7
add r6, r6, #4 /* +4 because odd(5) registers are in stack */
mov r7, sp
sub r7, r7, r6 /* reserved stack space for left arguments */
mov sp, r7
mov lr, r2 /* save r2 */
loop_args: /* copy left arguments to stack */
cmp r5, #0
beq call_func1
ldr r2, [r4]
add r4, r4, #4
str r2, [r7]
add r7, r7, #4
sub r5, r5, #1
b loop_args
call_func1:
mov r2, lr /* restore r2 */
call_func:
blx ip
add sp, sp, r6 /* restore sp */
return:
pop {r3}
pop {r4, r5, r6, r7}
mov lr, r3
bx lr

View File

@ -0,0 +1,74 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
.global invokeNative
.type invokeNative,function
/*
* Arguments passed in:
*
* a2 function pntr
* a3 argv
* a4 argc
*/
invokeNative:
entry a1, 256
blti a4, 1, return /* at least one argument required: module_inst */
/* register a10 ~ a15 are used to pass first 6 arguments */
l32i.n a10, a3, 0
beqi a4, 1, call_func
l32i.n a11, a3, 4
beqi a4, 2, call_func
l32i.n a12, a3, 8
beqi a4, 3, call_func
l32i.n a13, a3, 12
beqi a4, 4, call_func
l32i.n a14, a3, 16
beqi a4, 5, call_func
l32i.n a15, a3, 20
beqi a4, 6, call_func
/* left arguments are passed through stack */
addi a4, a4, -6
addi a3, a3, 24 /* move argv pointer */
mov.n a6, a1 /* store stack pointer */
addi a7, a1, 256 /* stack boundary */
loop_args:
beqi a4, 0, call_func
bge a6, a7, call_func /* reach stack boundary */
l32i.n a5, a3, 0 /* load argument to a5 */
s32i.n a5, a6, 0 /* push data to stack */
addi a4, a4, -1 /* decrease argc */
addi a3, a3, 4 /* move argv pointer */
addi a6, a6, 4 /* move stack pointer */
j loop_args
call_func:
mov.n a8, a2
callx8 a8
/* the result returned from callee is stored in a2
mov the result to a10 so the caller of this function
can receive the value */
mov.n a2, a10
mov.n a3, a11
return:
retw.n

View File

@ -13,13 +13,21 @@ if (${BUILD_TARGET} STREQUAL "X86_64" OR ${BUILD_TARGET} STREQUAL "AMD_64")
set (source_all ${c_source_all} ${VMCORE_LIB_DIR}/invokeNative_em64.s)
elseif (${BUILD_TARGET} STREQUAL "X86_32")
set (source_all ${c_source_all} ${VMCORE_LIB_DIR}/invokeNative_ia32.s)
elseif (${BUILD_TARGET} STREQUAL "ARM_32")
elseif (${BUILD_TARGET} MATCHES "ARM.*")
set (source_all ${c_source_all} ${VMCORE_LIB_DIR}/invokeNative_arm.s)
elseif (${BUILD_TARGET} STREQUAL "MIPS_32")
elseif (${BUILD_TARGET} MATCHES "THUMB.*")
set (source_all ${c_source_all} ${VMCORE_LIB_DIR}/invokeNative_thumb.s)
elseif (${BUILD_TARGET} STREQUAL "MIPS")
set (source_all ${c_source_all} ${VMCORE_LIB_DIR}/invokeNative_mips.s)
elseif (${BUILD_TARGET} STREQUAL "XTENSA_32")
elseif (${BUILD_TARGET} STREQUAL "XTENSA")
set (source_all ${c_source_all} ${VMCORE_LIB_DIR}/invokeNative_xtensa.s)
elseif (${BUILD_TARGET} STREQUAL "GENERAL")
# Use invokeNative_general.c instead of assembly code,
# but the maximum number of native arguments is limited to 20,
# and there are possible issues when passing arguments to
# native function for some cpus, e.g. int64 and double arguments
# in arm and mips need to be 8-bytes aligned, and some arguments
# of x86_64 are passed by registers but not stack
set (source_all ${c_source_all} ${VMCORE_LIB_DIR}/invokeNative_general.c)
else ()
message (FATAL_ERROR "Build target isn't set")

View File

@ -1680,7 +1680,8 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
uint32 argv_buf[32], *argv1 = argv_buf, argc1, i, j = 0;
uint64 size;
#if !defined(BUILD_TARGET_ARM_32) && !defined(BUILD_TARGET_MIPS_32)
#if !defined(BUILD_TARGET_ARM) && !defined(BUILD_TARGET_MIPS) \
&& !defined(BUILD_TARGET_THUMB) && !defined(BUILD_TARGET_XTENSA)
argc1 = argc + 2;
#else
argc1 = func_type->param_count * 2 + 2;
@ -1698,7 +1699,8 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
for (i = 0; i < sizeof(WASMModuleInstance*) / sizeof(uint32); i++)
argv1[j++] = ((uint32*)&module_inst)[i];
#if !defined(BUILD_TARGET_ARM_32) && !defined(BUILD_TARGET_MIPS_32)
#if !defined(BUILD_TARGET_ARM) && !defined(BUILD_TARGET_MIPS) \
&& !defined(BUILD_TARGET_THUMB) && !defined(BUILD_TARGET_XTENSA)
word_copy(argv1 + j, argv, argc);
j += argc;
#else
@ -1723,7 +1725,7 @@ wasm_runtime_invoke_native(void *func_ptr, WASMType *func_type,
break;
}
}
#endif /* end of !defined(BUILD_TARGET_ARM_32) && !defined(BUILD_TARGET_MIPS_32) */
#endif /* end of !defined(BUILD_TARGET_ARM) && !defined(BUILD_TARGET_MIPS) */
argc1 = j;
if (func_type->result_count == 0) {

View File

@ -330,9 +330,9 @@ wasm_runtime_addr_native_to_app(WASMModuleInstance *module_inst,
/* See wasm_export.h for description */
bool
wasm_runtime_get_app_addr_range(WASMModuleInstance *module_inst,
int32_t app_offset,
int32_t *p_app_start_offset,
int32_t *p_app_end_offset);
int32 app_offset,
int32 *p_app_start_offset,
int32 *p_app_end_offset);
/* See wasm_export.h for description */
bool