Enable SIMD for AARCH64 Platform (#11) (#610)

Signed-off-by: Wu Zhongmin <zhongmin.wzm@antgroup.com>
Signed-off-by: Xiaokang Qin <xiaokang.qxk@antgroup.com>

Co-authored-by: Wu Zhongmin <zhongmin.wzm@antgroup.com>

Co-authored-by: Wu Zhongmin <zhongmin.wzm@antgroup.com>
This commit is contained in:
Xiaokang Qin
2021-04-13 14:45:51 +08:00
committed by GitHub
parent 8b96f4fb71
commit 46db353017
7 changed files with 557 additions and 8 deletions

View File

@ -0,0 +1,79 @@
/*
* Copyright (C) 2020 Intel Corporation Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
.text
.align 2
#ifndef BH_PLATFORM_DARWIN
.globl invokeNative
.type invokeNative, function
invokeNative:
#else
.globl _invokeNative
_invokeNative:
#endif /* end of BH_PLATFORM_DARWIN */
/*
* Arguments passed in:
*
* x0 function ptr
* x1 argv
* x2 nstacks
*/
sub sp, sp, #0x30
stp x19, x20, [sp, #0x20] /* save the registers */
stp x21, x22, [sp, #0x10]
stp x23, x24, [sp, #0x0]
mov x19, x0 /* x19 = function ptr */
mov x20, x1 /* x20 = argv */
mov x21, x2 /* x21 = nstacks */
mov x22, sp /* save the sp before call function */
/* Fill in float-point registers */
ld1 {v0.2D, v1.2D, v2.2D, v3.2D}, [x20], #64 /* v0 = argv[0], v1 = argv[1], v2 = argv[2], v3 = argv[3]*/
ld1 {v4.2D, v5.2D, v6.2D, v7.2D}, [x20], #64 /* v4 = argv[4], v5 = argv[5], v6 = argv[6], v7 = argv[7]*/
/* Fill inteter registers */
ldp x0, x1, [x20], #16 /* x0 = argv[8] = exec_env, x1 = argv[9] */
ldp x2, x3, [x20], #16 /* x2 = argv[10], x3 = argv[11] */
ldp x4, x5, [x20], #16 /* x4 = argv[12], x5 = argv[13] */
ldp x6, x7, [x20], #16 /* x6 = argv[14], x7 = argv[15] */
/* Now x20 points to stack args */
/* Directly call the fucntion if no args in stack */
cmp x21, #0
beq call_func
/* Fill all stack args: reserve stack space and fill one by one */
mov x23, sp
bic sp, x23, #15 /* Ensure stack is 16 bytes aligned */
lsl x23, x21, #3 /* x23 = nstacks * 8 */
add x23, x23, #15 /* x23 = (x23 + 15) & ~15 */
bic x23, x23, #15
sub sp, sp, x23 /* reserved stack space for stack arguments */
mov x23, sp
loop_stack_args: /* copy stack arguments to stack */
cmp x21, #0
beq call_func
ldr x24, [x20], #8
str x24, [x23], #8
sub x21, x21, #1
b loop_stack_args
call_func:
mov x20, x30 /* save x30(lr) */
blr x19
mov sp, x22 /* restore sp which is saved before calling fuction*/
return:
mov x30, x20 /* restore x30(lr) */
ldp x19, x20, [sp, #0x20] /* restore the registers in stack */
ldp x21, x22, [sp, #0x10]
ldp x23, x24, [sp, #0x0]
add sp, sp, #0x30 /* restore sp */
ret

View File

@ -43,7 +43,11 @@ elseif (WAMR_BUILD_TARGET MATCHES "THUMB.*")
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_thumb.s)
endif ()
elseif (WAMR_BUILD_TARGET MATCHES "AARCH64.*")
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_aarch64.s)
if (NOT WAMR_BUILD_SIMD EQUAL 1)
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_aarch64.s)
else()
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_aarch64_simd.s)
endif()
elseif (WAMR_BUILD_TARGET STREQUAL "MIPS")
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_mips.s)
elseif (WAMR_BUILD_TARGET STREQUAL "XTENSA")

View File

@ -3414,10 +3414,14 @@ typedef union __declspec(intrin_type) __declspec(align(8)) v128 {
unsigned __int32 m128i_u32[4];
unsigned __int64 m128i_u64[2];
} v128;
#else
#elif defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
typedef long long v128 __attribute__ ((__vector_size__ (16),
__may_alias__, __aligned__ (1)));
#endif /* end of defined(_WIN32) || defined(_WIN32_) */
#elif defined(BUILD_TARGET_AARCH64)
#include <arm_neon.h>
typedef uint32x4_t __m128i;
#define v128 __m128i
#endif
#endif /* end of WASM_ENABLE_SIMD != 0 */