Refine lock/unlock shared memory (#2682)

Split memory instance's field `uint32 ref_count` into `bool is_shared_memory`
and `uint16 ref_count`, and lock the memory only when `is_shared_memory`
flag is true, no need to acquire a lock for non-shared memory when shared
memory feature is enabled.
This commit is contained in:
Wenyong Huang
2023-10-31 11:46:03 +08:00
committed by GitHub
parent 3570a94a08
commit 52db362b89
7 changed files with 110 additions and 64 deletions

View File

@ -45,6 +45,7 @@ extern "C" {
*/
typedef uint32 bh_atomic_32_t;
typedef uint16 bh_atomic_16_t;
#if defined(__GNUC_PREREQ)
#if __GNUC_PREREQ(4, 7)
@ -59,6 +60,7 @@ typedef uint32 bh_atomic_32_t;
#if defined(CLANG_GCC_HAS_ATOMIC_BUILTIN)
#define BH_ATOMIC_32_IS_ATOMIC 1
#define BH_ATOMIC_32_LOAD(v) __atomic_load_n(&(v), __ATOMIC_SEQ_CST)
#define BH_ATOMIC_32_STORE(v, val) __atomic_store_n(&(v), val, __ATOMIC_SEQ_CST)
#define BH_ATOMIC_32_FETCH_OR(v, val) \
__atomic_fetch_or(&(v), (val), __ATOMIC_SEQ_CST)
#define BH_ATOMIC_32_FETCH_AND(v, val) \
@ -67,13 +69,33 @@ typedef uint32 bh_atomic_32_t;
__atomic_fetch_add(&(v), (val), __ATOMIC_SEQ_CST)
#define BH_ATOMIC_32_FETCH_SUB(v, val) \
__atomic_fetch_sub(&(v), (val), __ATOMIC_SEQ_CST)
#define BH_ATOMIC_16_IS_ATOMIC 1
#define BH_ATOMIC_16_LOAD(v) __atomic_load_n(&(v), __ATOMIC_SEQ_CST)
#define BH_ATOMIC_16_STORE(v, val) __atomic_store_n(&(v), val, __ATOMIC_SEQ_CST)
#define BH_ATOMIC_16_FETCH_OR(v, val) \
__atomic_fetch_or(&(v), (val), __ATOMIC_SEQ_CST)
#define BH_ATOMIC_16_FETCH_AND(v, val) \
__atomic_fetch_and(&(v), (val), __ATOMIC_SEQ_CST)
#define BH_ATOMIC_16_FETCH_ADD(v, val) \
__atomic_fetch_add(&(v), (val), __ATOMIC_SEQ_CST)
#define BH_ATOMIC_16_FETCH_SUB(v, val) \
__atomic_fetch_sub(&(v), (val), __ATOMIC_SEQ_CST)
#else /* else of defined(CLANG_GCC_HAS_ATOMIC_BUILTIN) */
#define BH_ATOMIC_32_LOAD(v) (v)
#define BH_ATOMIC_32_STORE(v, val) (v) = val
#define BH_ATOMIC_32_FETCH_OR(v, val) nonatomic_32_fetch_or(&(v), val)
#define BH_ATOMIC_32_FETCH_AND(v, val) nonatomic_32_fetch_and(&(v), val)
#define BH_ATOMIC_32_FETCH_ADD(v, val) nonatomic_32_fetch_add(&(v), val)
#define BH_ATOMIC_32_FETCH_SUB(v, val) nonatomic_32_fetch_sub(&(v), val)
#define BH_ATOMIC_16_LOAD(v) (v)
#define BH_ATOMIC_16_STORE(v) (v) = val
#define BH_ATOMIC_16_FETCH_OR(v, val) nonatomic_16_fetch_or(&(v), val)
#define BH_ATOMIC_16_FETCH_AND(v, val) nonatomic_16_fetch_and(&(v), val)
#define BH_ATOMIC_16_FETCH_ADD(v, val) nonatomic_16_fetch_add(&(v), val)
#define BH_ATOMIC_16_FETCH_SUB(v, val) nonatomic_16_fetch_sub(&(v), val)
static inline uint32
nonatomic_32_fetch_or(bh_atomic_32_t *p, uint32 val)
{
@ -106,6 +128,38 @@ nonatomic_32_fetch_sub(bh_atomic_32_t *p, uint32 val)
return old;
}
static inline uint16
nonatomic_16_fetch_or(bh_atomic_16_t *p, uint16 val)
{
uint16 old = *p;
*p |= val;
return old;
}
static inline uint16
nonatomic_16_fetch_and(bh_atomic_16_t *p, uint16 val)
{
uint16 old = *p;
*p &= val;
return old;
}
static inline uint16
nonatomic_16_fetch_add(bh_atomic_16_t *p, uint16 val)
{
uint16 old = *p;
*p += val;
return old;
}
static inline uint16
nonatomic_16_fetch_sub(bh_atomic_16_t *p, uint16 val)
{
uint16 old = *p;
*p -= val;
return old;
}
/* The flag can be defined by the user if the platform
supports atomic access to uint32 aligned memory. */
#ifdef WASM_UINT32_IS_ATOMIC
@ -114,6 +168,12 @@ nonatomic_32_fetch_sub(bh_atomic_32_t *p, uint32 val)
#define BH_ATOMIC_32_IS_ATOMIC 0
#endif /* WASM_UINT32_IS_ATOMIC */
#ifdef WASM_UINT16_IS_ATOMIC
#define BH_ATOMIC_16_IS_ATOMIC 1
#else /* else of WASM_UINT16_IS_ATOMIC */
#define BH_ATOMIC_16_IS_ATOMIC 0
#endif /* WASM_UINT16_IS_ATOMIC */
#endif
#ifdef __cplusplus