Enable AoT and wamr-sdk, and change arguments of call wasm API (#157)

* Implement memory profiler, optimize memory usage, modify code indent

* Implement memory.grow and limit heap space base offset to 1G; modify iwasm build type to Release and 64 bit by default

* Add a new extension library: connection

* Fix bug of reading magic number and version in big endian platform

* Re-org platform APIs: move most platform APIs from iwasm to shared-lib

* Enhance wasm loader to fix some security issues

* Fix issue about illegal load of EXC_RETURN into PC on stm32 board

* Updates that let a restricted version of the interpreter run in SGX

* Enable native/app address validation and conversion for wasm app

* Remove wasm_application_exectue_* APIs from wasm_export.h which makes confused

* Refine binary size and fix several minor issues

Optimize interpreter LOAD/STORE opcodes to decrease the binary size
Fix issues when using iwasm library: _bh_log undefined, bh_memory.h not found
Remove unused _stdin/_stdout/_stderr global variables resolve in libc wrapper
Add macros of global heap size, stack size, heap size for Zephyr main.c
Clear compile warning of wasm_application.c

* Add more strict security checks for libc wrapper API's

* Use one libc wrapper copy for sgx and other platforms; remove bh_printf macro for other platform header files

* Enhance security of libc strcpy/sprintf wrapper function

* Fix issue of call native for x86_64/arm/mips, add module inst parameter for native wrapper functions

* Remove get_module_inst() and fix issue of call native

* Refine wgl lib: remove module_inst parameter from widget functions; move function index check to runtime instantiate

* Refine interpreter call native process, refine memory boudary check

* Fix issues of invokeNative function of arm/mips/general version

* Add a switch to build simple sample without gui support

* Add BUILD_TARGET setting in makefile to replace cpu compiler flags in source code

* Re-org shared lib header files, remove unused info; fix compile issues of vxworks

* Add build target general

* Remove unused files

* Update license header

* test push

* Restore file

* Sync up with internal/feature

* Sync up with internal/feature

* Rename build_wamr_app to build_wasm_app

* Fix small issues of README

* Enhance malformed wasm file checking
Fix issue of print hex int and implement utf8 string check
Fix wasi file read/write right issue
Fix minor issue of build wasm app doc

* Sync up with internal/feature

* Sync up with internal/feature: fix interpreter arm issue, fix read leb issue

* Sync up with internal/feature

* Fix bug of config.h and rename wasi config.h to ssp_config.h

* Sync up with internal/feature

* Import wamr aot

* update document

* update document

* Update document, disable WASI in 32bit

* update document

* remove files

* update document

* Update document

* update document

* update document

* update samples

* Sync up with internal repo
This commit is contained in:
wenyongh
2020-01-21 13:26:14 +08:00
committed by Wang Xin
parent 2a4528c749
commit 46b93b9d22
464 changed files with 25137 additions and 7911 deletions

View File

@ -0,0 +1,69 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
.global invokeNative
.type invokeNative,function
/*
* Arguments passed in:
*
* r0 function ptr
* r1 argv
* r2 argc
*/
invokeNative:
stmfd sp!, {r4, r5, r6, r7, lr}
mov ip, r0 /* ip = function ptr */
mov r4, r1 /* r4 = argv */
mov r5, r2 /* r5 = argc */
cmp r5, #1 /* at least one argument required: exec_env */
blt return
mov r6, #0 /* increased stack size */
ldr r0, [r4], #4 /* r0 = argv[0] = exec_env */
cmp r5, #1
beq call_func
ldr r1, [r4], #4 /* r1 = argv[1] */
cmp r5, #2
beq call_func
ldr r2, [r4], #4 /* r2 = argv[2] */
cmp r5, #3
beq call_func
ldr r3, [r4], #4 /* r3 = argv[3] */
cmp r5, #4
beq call_func
sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
/* Ensure address is 8 byte aligned */
mov r6, r5, lsl#2 /* r6 = argc * 4 */
add r6, r6, #7 /* r6 = (r6 + 7) & ~7 */
bic r6, r6, #7
add r6, r6, #4 /* +4 because odd(5) registers are in stack */
sub sp, sp, r6 /* reserved stack space for left arguments */
mov r7, sp
loop_args: /* copy left arguments to stack */
cmp r5, #0
beq call_func
ldr lr, [r4], #4
str lr, [r7], #4
sub r5, r5, #1
b loop_args
call_func:
blx ip
add sp, sp, r6 /* restore sp */
return:
ldmfd sp!, {r4, r5, r6, r7, lr}
bx lr

View File

@ -0,0 +1,79 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
.global invokeNative
.type invokeNative,function
/*
* Arguments passed in:
*
* r0 function ptr
* r1 argv
* r2 nstacks
*/
invokeNative:
stmfd sp!, {r4, r5, r6, r7, lr}
mov ip, r0 /* ip = function ptr */
mov r4, r1 /* r4 = argv */
mov r5, r2 /* r5 = nstacks */
mov r6, sp
/* Fill all int args */
ldr r0, [r4], #4 /* r0 = *(int*)&argv[0] = exec_env */
ldr r1, [r4], #4 /* r1 = *(int*)&argv[1] */
ldr r2, [r4], #4 /* r2 = *(int*)&argv[2] */
ldr r3, [r4], #4 /* r3 = *(int*)&argv[3] */
/* Fill all float/double args to 16 single-precision registers, s0-s15, */
/* which may also be accessed as 8 double-precision registers, d0-d7 (with */
/* d0 overlapping s0, s1; d1 overlapping s2, s3; etc). */
vldr s0, [r4, #0] /* s0 = *(float*)&argv[4] */
vldr s1, [r4, #4]
vldr s2, [r4, #8]
vldr s3, [r4, #12]
vldr s4, [r4, #16]
vldr s5, [r4, #20]
vldr s6, [r4, #24]
vldr s7, [r4, #28]
vldr s8, [r4, #32]
vldr s9, [r4, #36]
vldr s10, [r4, #40]
vldr s11, [r4, #44]
vldr s12, [r4, #48]
vldr s13, [r4, #52]
vldr s14, [r4, #56]
vldr s15, [r4, #60]
/* Directly call the fucntion if no args in stack */
cmp r5, #0
beq call_func
/* Fill all stack args: reserve stack space and fill ony by one */
add r4, r4, #64 /* r4 points to stack args */
bic sp, sp, #7 /* Ensure stack is 8 byte aligned */
mov r7, r5, lsl#2 /* r7 = nstacks * 4 */
add r7, r7, #7 /* r7 = (r7 + 7) & ~7 */
bic r7, r7, #7
sub sp, sp, r7 /* reserved stack space for stack arguments */
mov r7, sp
loop_stack_args: /* copy stack arguments to stack */
cmp r5, #0
beq call_func
ldr lr, [r4], #4 /* Note: caller should insure int64 and */
str lr, [r7], #4 /* double are placed in 8 bytes aligned address */
sub r5, r5, #1
b loop_stack_args
call_func:
blx ip
mov sp, r6 /* restore sp */
return:
ldmfd sp!, {r4, r5, r6, r7, lr}
bx lr

View File

@ -0,0 +1,64 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
#ifndef BH_PLATFORM_DARWIN
.globl invokeNative
.type invokeNative, @function
invokeNative:
#else
.globl _invokeNative
_invokeNative:
#endif /* end of BH_PLATFORM_DARWIN */
/* rdi - function ptr */
/* rsi - argv */
/* rdx - n_stacks */
push %rbp
mov %rsp, %rbp
mov %rdx, %r10
mov %rsp, %r11 /* Check that stack is aligned on */
and $8, %r11 /* 16 bytes. This code may be removed */
je check_stack_succ /* when we are sure that compiler always */
int3 /* calls us with aligned stack */
check_stack_succ:
mov %r10, %r11 /* Align stack on 16 bytes before pushing */
and $1, %r11 /* stack arguments in case we have an odd */
shl $3, %r11 /* number of stack arguments */
sub %r11, %rsp
/* store memory args */
movq %rdi, %r11 /* func ptr */
movq %r10, %rcx /* counter */
lea 64+48-8(%rsi,%rcx,8), %r10
sub %rsp, %r10
cmpq $0, %rcx
je push_args_end
push_args:
push 0(%rsp,%r10)
loop push_args
push_args_end:
/* fill all fp args */
movq 0x00(%rsi), %xmm0
movq 0x08(%rsi), %xmm1
movq 0x10(%rsi), %xmm2
movq 0x18(%rsi), %xmm3
movq 0x20(%rsi), %xmm4
movq 0x28(%rsi), %xmm5
movq 0x30(%rsi), %xmm6
movq 0x38(%rsi), %xmm7
/* fill all int args */
movq 0x40(%rsi), %rdi
movq 0x50(%rsi), %rdx
movq 0x58(%rsi), %rcx
movq 0x60(%rsi), %r8
movq 0x68(%rsi), %r9
movq 0x48(%rsi), %rsi
call *%r11
leave
ret

View File

@ -0,0 +1,86 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "../wasm_runtime_common.h"
#include "../wasm_exec_env.h"
void invokeNative(void (*native_code)(), uint32 argv[], uint32 argc)
{
bh_assert(argc >= sizeof(WASMExecEnv*)/sizeof(uint32));
switch(argc) {
case 0:
native_code();
break;
case 1:
native_code(argv[0]);
break;
case 2:
native_code(argv[0], argv[1]);
break;
case 3:
native_code(argv[0], argv[1], argv[2]);
break;
case 4:
native_code(argv[0], argv[1], argv[2], argv[3]);
break;
case 5:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4]);
break;
case 6:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5]);
break;
case 7:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6]);
break;
case 8:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7]);
break;
case 9:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8]);
break;
case 10:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9]);
break;
case 11:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10]);
break;
case 12:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11]);
break;
case 13:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12]);
break;
case 14:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13]);
break;
case 15:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14]);
break;
case 16:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15]);
break;
case 17:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16]);
break;
case 18:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17]);
break;
case 19:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18]);
break;
case 20:
native_code(argv[0], argv[1], argv[2], argv[3], argv[4], argv[5], argv[6], argv[7], argv[8], argv[9], argv[10], argv[11], argv[12], argv[13], argv[14], argv[15], argv[16], argv[17], argv[18], argv[19]);
break;
default:
{
/* FIXME: If this happen, add more cases. */
WASMExecEnv *exec_env = *(WASMExecEnv**)argv;
WASMModuleInstanceCommon *module_inst = exec_env->module_inst;
wasm_runtime_set_exception(module_inst, "the argument number of native function exceeds maximum");
return;
}
}
}

View File

@ -0,0 +1,32 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
#ifndef BH_PLATFORM_DARWIN
.globl invokeNative
.type invokeNative, @function
invokeNative:
#else
.globl _invokeNative
_invokeNative:
#endif /* end of BH_PLATFORM_DARWIN */
push %ebp
movl %esp, %ebp
movl 16(%ebp), %ecx /* ecx = argc */
movl 12(%ebp), %edx /* edx = argv */
test %ecx, %ecx
jz skip_push_args /* if ecx == 0, skip pushing arguments */
leal -4(%edx,%ecx,4), %edx /* edx = edx + ecx * 4 - 4 */
subl %esp, %edx /* edx = edx - esp */
1:
push 0(%esp,%edx)
loop 1b /* loop ecx counts */
skip_push_args:
movl 8(%ebp), %edx /* edx = func_ptr */
call *%edx
leave
ret

View File

@ -0,0 +1,74 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
.globl invokeNative
.ent invokeNative
.type invokeNative, @function
/**
* On function entry parameters:
* $4 = func_ptr
* $5 = args
* $6 = arg_num
*/
invokeNative:
.frame $fp, 8, $0
.mask 0x00000000, 0
.fmask 0x00000000, 0
/* Fixed part of frame */
subu $sp, 8
/* save registers */
sw $31, 4($sp)
sw $fp, 0($sp)
/* set frame pointer to bottom of fixed frame */
move $fp, $sp
/* allocate enough stack space */
sll $11, $6, 2 /* $11 == arg_num * 4 */
subu $sp, $11
/* make 8-byte aligned */
and $sp, ~7
move $9, $sp
move $25, $4 /* $25 = func_ptr */
push_args:
beq $6, 0, done /* arg_num == 0 ? */
lw $8, 0($5) /* $8 = *args */
sw $8, 0($9) /* store $8 to stack */
addu $5, 4 /* args++ */
addu $9, 4 /* sp++ */
subu $6, 1 /* arg_num-- */
j push_args
done:
lw $4, 0($sp) /* Load $4..$7 from stack */
lw $5, 4($sp)
lw $6, 8($sp)
lw $7, 12($sp)
ldc1 $f12, 0($sp) /* Load $f12, $f13, $f14, $f15 */
ldc1 $f14, 8($sp)
jalr $25 /* call function */
nop
/* restore saved registers */
move $sp, $fp
lw $31, 4($sp)
lw $fp, 0($sp)
/* pop frame */
addu $sp, $sp, 8
j $31
.end invokeNative

View File

@ -0,0 +1,84 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
.global invokeNative
.type invokeNative,function
/*
* Arguments passed in:
*
* r0 function ptr
* r1 argv
* r2 argc
*/
invokeNative:
push {r4, r5, r6, r7}
push {lr}
mov ip, r0 /* ip = function ptr */
mov r4, r1 /* r4 = argv */
mov r5, r2 /* r5 = argc */
cmp r5, #1 /* at least one argument required: exec_env */
blt return
mov r6, #0 /* increased stack size */
ldr r0, [r4] /* r0 = argv[0] = exec_env */
add r4, r4, #4 /* r4 += 4 */
cmp r5, #1
beq call_func
ldr r1, [r4] /* r1 = argv[1] */
add r4, r4, #4
cmp r5, #2
beq call_func
ldr r2, [r4] /* r2 = argv[2] */
add r4, r4, #4
cmp r5, #3
beq call_func
ldr r3, [r4] /* r3 = argv[3] */
add r4, r4, #4
cmp r5, #4
beq call_func
sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */
/* Ensure address is 8 byte aligned */
lsl r6, r5, #2 /* r6 = argc * 4 */
mov r7, #7
add r6, r6, r7 /* r6 = (r6 + 7) & ~7 */
bic r6, r6, r7
add r6, r6, #4 /* +4 because odd(5) registers are in stack */
mov r7, sp
sub r7, r7, r6 /* reserved stack space for left arguments */
mov sp, r7
mov lr, r2 /* save r2 */
loop_args: /* copy left arguments to stack */
cmp r5, #0
beq call_func1
ldr r2, [r4]
add r4, r4, #4
str r2, [r7]
add r7, r7, #4
sub r5, r5, #1
b loop_args
call_func1:
mov r2, lr /* restore r2 */
call_func:
blx ip
add sp, sp, r6 /* restore sp */
return:
pop {r3}
pop {r4, r5, r6, r7}
mov lr, r3
bx lr

View File

@ -0,0 +1,93 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
.global invokeNative
.type invokeNative,function
/*
* Arguments passed in:
*
* r0 function ptr
* r1 argv
* r2 nstacks
*/
invokeNative:
push {r4, r5, r6, r7}
push {lr}
mov ip, r0 /* ip = function ptr */
mov r4, r1 /* r4 = argv */
mov r5, r2 /* r5 = nstacks */
mov r7, sp
/* Fill all int args */
ldr r0, [r4, #0] /* r0 = *(int*)&argv[0] = exec_env */
ldr r1, [r4, #4] /* r1 = *(int*)&argv[1] */
ldr r2, [r4, #8] /* r2 = *(int*)&argv[2] */
ldr r3, [r4, #12] /* r3 = *(int*)&argv[3] */
add r4, r4, #16 /* r4 points to float args */
/* Fill all float/double args to 16 single-precision registers, s0-s15, */
/* which may also be accessed as 8 double-precision registers, d0-d7 (with */
/* d0 overlapping s0, s1; d1 overlapping s2, s3; etc). */
vldr s0, [r4, #0] /* s0 = *(float*)&argv[4] */
vldr s1, [r4, #4]
vldr s2, [r4, #8]
vldr s3, [r4, #12]
vldr s4, [r4, #16]
vldr s5, [r4, #20]
vldr s6, [r4, #24]
vldr s7, [r4, #28]
vldr s8, [r4, #32]
vldr s9, [r4, #36]
vldr s10, [r4, #40]
vldr s11, [r4, #44]
vldr s12, [r4, #48]
vldr s13, [r4, #52]
vldr s14, [r4, #56]
vldr s15, [r4, #60]
/* Directly call the fucntion if no args in stack */
cmp r5, #0
beq call_func
mov lr, r2 /* save r2 */
/* Fill all stack args: reserve stack space and fill ony by one */
add r4, r4, #64 /* r4 points to stack args */
mov r6, sp
mov r7, #7
bic r6, r6, r7 /* Ensure stack is 8 byte aligned */
lsl r2, r5, #2 /* r2 = nstacks * 4 */
add r2, r2, #7 /* r2 = (r2 + 7) & ~7 */
bic r2, r2, r7
sub r6, r6, r2 /* reserved stack space for stack arguments */
mov r7, sp
mov sp, r6
loop_stack_args: /* copy stack arguments to stack */
cmp r5, #0
beq call_func1
ldr r2, [r4] /* Note: caller should insure int64 and */
add r4, r4, #4 /* double are placed in 8 bytes aligned address */
str r2, [r6]
add r6, r6, #4
sub r5, r5, #1
b loop_stack_args
call_func1:
mov r2, lr /* restore r2 */
call_func:
blx ip
mov sp, r7 /* restore sp */
return:
pop {r3}
pop {r4, r5, r6, r7}
mov lr, r3
bx lr

View File

@ -0,0 +1,74 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
.global invokeNative
.type invokeNative,function
/*
* Arguments passed in:
*
* a2 function pntr
* a3 argv
* a4 argc
*/
invokeNative:
entry a1, 256
blti a4, 1, return /* at least one argument required: exec_env */
/* register a10 ~ a15 are used to pass first 6 arguments */
l32i.n a10, a3, 0
beqi a4, 1, call_func
l32i.n a11, a3, 4
beqi a4, 2, call_func
l32i.n a12, a3, 8
beqi a4, 3, call_func
l32i.n a13, a3, 12
beqi a4, 4, call_func
l32i.n a14, a3, 16
beqi a4, 5, call_func
l32i.n a15, a3, 20
beqi a4, 6, call_func
/* left arguments are passed through stack */
addi a4, a4, -6
addi a3, a3, 24 /* move argv pointer */
mov.n a6, a1 /* store stack pointer */
addi a7, a1, 256 /* stack boundary */
loop_args:
beqi a4, 0, call_func
bge a6, a7, call_func /* reach stack boundary */
l32i.n a5, a3, 0 /* load argument to a5 */
s32i.n a5, a6, 0 /* push data to stack */
addi a4, a4, -1 /* decrease argc */
addi a3, a3, 4 /* move argv pointer */
addi a6, a6, 4 /* move stack pointer */
j loop_args
call_func:
mov.n a8, a2
callx8 a8
/* the result returned from callee is stored in a2
mov the result to a10 so the caller of this function
can receive the value */
mov.n a2, a10
mov.n a3, a11
return:
retw.n