Implement GC (Garbage Collection) feature for interpreter, AOT and LLVM-JIT (#3125)

Implement the GC (Garbage Collection) feature for interpreter mode,
AOT mode and LLVM-JIT mode, and support most features of the latest
spec proposal, and also enable the stringref feature.

Use `cmake -DWAMR_BUILD_GC=1/0` to enable/disable the feature,
and `wamrc --enable-gc` to generate the AOT file with GC supported.

And update the AOT file version from 2 to 3 since there are many AOT
ABI breaks, including the changes of AOT file format, the changes of
AOT module/memory instance layouts, the AOT runtime APIs for the
AOT code to invoke and so on.
This commit is contained in:
Wenyong Huang
2024-02-06 20:47:11 +08:00
committed by GitHub
parent 5931aaacbe
commit 16a4d71b34
98 changed files with 33469 additions and 3159 deletions

View File

@ -5,6 +5,27 @@
#include "ems_gc_internal.h"
#if WASM_ENABLE_GC != 0
#define LOCK_HEAP(heap) \
do { \
if (!heap->is_doing_reclaim) \
/* If the heap is doing reclaim, it must have been locked, \
we should not lock the heap again. */ \
os_mutex_lock(&heap->lock); \
} while (0)
#define UNLOCK_HEAP(heap) \
do { \
if (!heap->is_doing_reclaim) \
/* If the heap is doing reclaim, it must have been locked, \
and will be unlocked after reclaim, we should not \
unlock the heap again. */ \
os_mutex_unlock(&heap->lock); \
} while (0)
#else
#define LOCK_HEAP(heap) os_mutex_lock(&heap->lock)
#define UNLOCK_HEAP(heap) os_mutex_unlock(&heap->lock)
#endif
static inline bool
hmu_is_in_heap(void *hmu, gc_uint8 *heap_base_addr, gc_uint8 *heap_end_addr)
{
@ -332,6 +353,11 @@ alloc_hmu(gc_heap_t *heap, gc_size_t size)
bh_assert(gci_is_heap_valid(heap));
bh_assert(size > 0 && !(size & 7));
#if WASM_ENABLE_GC != 0
/* In doing reclaim, gc must not alloc memory again. */
bh_assert(!heap->is_doing_reclaim);
#endif
base_addr = heap->base_addr;
end_addr = base_addr + heap->current_size;
@ -454,6 +480,34 @@ alloc_hmu(gc_heap_t *heap, gc_size_t size)
return NULL;
}
#if WASM_ENABLE_GC != 0
static int
do_gc_heap(gc_heap_t *heap)
{
int ret = GC_SUCCESS;
#if WASM_ENABLE_GC_PERF_PROFILING != 0
uint64 start = 0, end = 0, time = 0;
start = os_time_get_boot_microsecond();
#endif
if (heap->is_reclaim_enabled) {
UNLOCK_HEAP(heap);
ret = gci_gc_heap(heap);
LOCK_HEAP(heap);
}
#if WASM_ENABLE_GC_PERF_PROFILING != 0
end = os_time_get_boot_microsecond();
time = end - start;
heap->total_gc_time += time;
if (time > heap->max_gc_time) {
heap->max_gc_time = time;
}
heap->total_gc_count += 1;
#endif
return ret;
}
#endif
/**
* Find a proper HMU with given size
*
@ -475,12 +529,29 @@ alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
bh_assert(gci_is_heap_valid(heap));
bh_assert(size > 0 && !(size & 7));
#if WASM_ENABLE_GC != 0
#if GC_IN_EVERY_ALLOCATION != 0
if (GC_SUCCESS != do_gc_heap(heap))
return NULL;
#else
if (heap->total_free_size < heap->gc_threshold) {
if (GC_SUCCESS != do_gc_heap(heap))
return NULL;
}
else {
hmu_t *ret = NULL;
if ((ret = alloc_hmu(heap, size))) {
return ret;
}
if (GC_SUCCESS != do_gc_heap(heap))
return NULL;
}
#endif
#endif
return alloc_hmu(heap, size);
}
static unsigned long g_total_malloc = 0;
static unsigned long g_total_free = 0;
#if BH_ENABLE_GC_VERIFY == 0
gc_object_t
gc_alloc_vo(void *vheap, gc_size_t size)
@ -509,7 +580,7 @@ gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
}
#endif
os_mutex_lock(&heap->lock);
LOCK_HEAP(heap);
hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
@ -520,7 +591,9 @@ gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
the required size, reset it here */
tot_size = hmu_get_size(hmu);
g_total_malloc += tot_size;
#if GC_STAT_DATA != 0
heap->total_size_allocated += tot_size;
#endif
hmu_set_ut(hmu, HMU_VO);
hmu_unfree_vo(hmu);
@ -535,7 +608,7 @@ gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
finish:
os_mutex_unlock(&heap->lock);
UNLOCK_HEAP(heap);
return ret;
}
@ -582,7 +655,7 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
base_addr = heap->base_addr;
end_addr = base_addr + heap->current_size;
os_mutex_lock(&heap->lock);
LOCK_HEAP(heap);
if (hmu_old) {
hmu_next = (hmu_t *)((char *)hmu_old + tot_size_old);
@ -592,7 +665,7 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
if (ut == HMU_FC && tot_size <= tot_size_old + tot_size_next) {
/* current node and next node meets requirement */
if (!unlink_hmu(heap, hmu_next)) {
os_mutex_unlock(&heap->lock);
UNLOCK_HEAP(heap);
return NULL;
}
hmu_set_size(hmu_old, tot_size);
@ -605,12 +678,12 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
hmu_next = (hmu_t *)((char *)hmu_old + tot_size);
tot_size_next = tot_size_old + tot_size_next - tot_size;
if (!gci_add_fc(heap, hmu_next, tot_size_next)) {
os_mutex_unlock(&heap->lock);
UNLOCK_HEAP(heap);
return NULL;
}
hmu_mark_pinuse(hmu_next);
}
os_mutex_unlock(&heap->lock);
UNLOCK_HEAP(heap);
return obj_old;
}
}
@ -624,7 +697,10 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
/* the total size allocated may be larger than
the required size, reset it here */
tot_size = hmu_get_size(hmu);
g_total_malloc += tot_size;
#if GC_STAT_DATA != 0
heap->total_size_allocated += tot_size;
#endif
hmu_set_ut(hmu, HMU_VO);
hmu_unfree_vo(hmu);
@ -647,7 +723,7 @@ finish:
}
}
os_mutex_unlock(&heap->lock);
UNLOCK_HEAP(heap);
if (ret && obj_old)
gc_free_vo(vheap, obj_old);
@ -655,6 +731,93 @@ finish:
return ret;
}
#if GC_MANUALLY != 0
void
gc_free_wo(void *vheap, void *ptr)
{
gc_heap_t *heap = (gc_heap_t *)vheap;
gc_object_t *obj = (gc_object_t *)ptr;
hmu_t *hmu = obj_to_hmu(obj);
bh_assert(gci_is_heap_valid(heap));
bh_assert(obj);
bh_assert((gc_uint8 *)hmu >= heap->base_addr
&& (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
bh_assert(hmu_get_ut(hmu) == HMU_WO);
hmu_unmark_wo(hmu);
(void)heap;
}
#endif
/* see ems_gc.h for description*/
#if BH_ENABLE_GC_VERIFY == 0
gc_object_t
gc_alloc_wo(void *vheap, gc_size_t size)
#else
gc_object_t
gc_alloc_wo_internal(void *vheap, gc_size_t size, const char *file, int line)
#endif
{
gc_heap_t *heap = (gc_heap_t *)vheap;
hmu_t *hmu = NULL;
gc_object_t ret = (gc_object_t)NULL;
gc_size_t tot_size = 0, tot_size_unaligned;
/* hmu header + prefix + obj + suffix */
tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
/* aligned size*/
tot_size = GC_ALIGN_8(tot_size_unaligned);
if (tot_size < size)
/* integer overflow */
return NULL;
#if BH_ENABLE_GC_CORRUPTION_CHECK != 0
if (heap->is_heap_corrupted) {
os_printf("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
return NULL;
}
#endif
LOCK_HEAP(heap);
hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
goto finish;
/* Do we need to memset the memory to 0? */
/* memset((char *)hmu + sizeof(*hmu), 0, tot_size - sizeof(*hmu)); */
bh_assert(hmu_get_size(hmu) >= tot_size);
/* the total size allocated may be larger than
the required size, reset it here */
tot_size = hmu_get_size(hmu);
#if GC_STAT_DATA != 0
heap->total_size_allocated += tot_size;
#endif
hmu_set_ut(hmu, HMU_WO);
#if GC_MANUALLY != 0
hmu_mark_wo(hmu);
#else
hmu_unmark_wo(hmu);
#endif
#if BH_ENABLE_GC_VERIFY != 0
hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
#endif
ret = hmu_to_obj(hmu);
if (tot_size > tot_size_unaligned)
/* clear buffer appended by GC_ALIGN_8() */
memset((uint8 *)ret + size, 0, tot_size - tot_size_unaligned);
finish:
UNLOCK_HEAP(heap);
return ret;
}
/**
* Do some checking to see if given pointer is a possible valid heap
* @return GC_TRUE if all checking passed, GC_FALSE otherwise
@ -703,7 +866,7 @@ gc_free_vo_internal(void *vheap, gc_object_t obj, const char *file, int line)
base_addr = heap->base_addr;
end_addr = base_addr + heap->current_size;
os_mutex_lock(&heap->lock);
LOCK_HEAP(heap);
if (hmu_is_in_heap(hmu, base_addr, end_addr)) {
#if BH_ENABLE_GC_VERIFY != 0
@ -719,10 +882,12 @@ gc_free_vo_internal(void *vheap, gc_object_t obj, const char *file, int line)
size = hmu_get_size(hmu);
g_total_free += size;
heap->total_free_size += size;
#if GC_STAT_DATA != 0
heap->total_size_freed += size;
#endif
if (!hmu_get_pinuse(hmu)) {
prev = (hmu_t *)((char *)hmu - *((int *)hmu - 1));
@ -767,7 +932,7 @@ gc_free_vo_internal(void *vheap, gc_object_t obj, const char *file, int line)
}
out:
os_mutex_unlock(&heap->lock);
UNLOCK_HEAP(heap);
return ret;
}
@ -778,8 +943,12 @@ gc_dump_heap_stats(gc_heap_t *heap)
os_printf("total free: %" PRIu32 ", current: %" PRIu32
", highmark: %" PRIu32 "\n",
heap->total_free_size, heap->current_size, heap->highmark_size);
os_printf("g_total_malloc=%lu, g_total_free=%lu, occupied=%lu\n",
g_total_malloc, g_total_free, g_total_malloc - g_total_free);
#if GC_STAT_DATA != 0
os_printf("total size allocated: %" PRIu64 ", total size freed: %" PRIu64
", total occupied: %" PRIu64 "\n",
heap->total_size_allocated, heap->total_size_freed,
heap->total_size_allocated - heap->total_size_freed);
#endif
}
uint32
@ -804,12 +973,12 @@ gci_dump(gc_heap_t *heap)
ut = hmu_get_ut(cur);
size = hmu_get_size(cur);
p = hmu_get_pinuse(cur);
mark = hmu_is_jo_marked(cur);
mark = hmu_is_wo_marked(cur);
if (ut == HMU_VO)
inuse = 'V';
else if (ut == HMU_JO)
inuse = hmu_is_jo_marked(cur) ? 'J' : 'j';
else if (ut == HMU_WO)
inuse = hmu_is_wo_marked(cur) ? 'W' : 'w';
else if (ut == HMU_FC)
inuse = 'F';
@ -845,3 +1014,156 @@ gci_dump(gc_heap_t *heap)
bh_assert(cur == end);
#endif
}
#if WASM_ENABLE_GC != 0
extra_info_node_t *
gc_search_extra_info_node(gc_handle_t handle, gc_object_t obj,
gc_size_t *p_index)
{
gc_heap_t *vheap = (gc_heap_t *)handle;
int32 low = 0, high = vheap->extra_info_node_cnt - 1;
int32 mid;
extra_info_node_t *node;
if (!vheap->extra_info_nodes)
return NULL;
while (low <= high) {
mid = (low + high) / 2;
node = vheap->extra_info_nodes[mid];
if (obj == node->obj) {
if (p_index) {
*p_index = mid;
}
return node;
}
else if (obj < node->obj) {
high = mid - 1;
}
else {
low = mid + 1;
}
}
if (p_index) {
*p_index = low;
}
return NULL;
}
static bool
insert_extra_info_node(gc_heap_t *vheap, extra_info_node_t *node)
{
gc_size_t index;
extra_info_node_t *orig_node;
if (!vheap->extra_info_nodes) {
vheap->extra_info_nodes = vheap->extra_info_normal_nodes;
vheap->extra_info_node_capacity = sizeof(vheap->extra_info_normal_nodes)
/ sizeof(extra_info_node_t *);
vheap->extra_info_nodes[0] = node;
vheap->extra_info_node_cnt = 1;
return true;
}
/* extend array */
if (vheap->extra_info_node_cnt == vheap->extra_info_node_capacity) {
extra_info_node_t **new_nodes = NULL;
gc_size_t new_capacity = vheap->extra_info_node_capacity * 3 / 2;
gc_size_t total_size = sizeof(extra_info_node_t *) * new_capacity;
new_nodes = (extra_info_node_t **)BH_MALLOC(total_size);
if (!new_nodes) {
LOG_ERROR("alloc extra info nodes failed");
return false;
}
bh_memcpy_s(new_nodes, total_size, vheap->extra_info_nodes,
sizeof(extra_info_node_t *) * vheap->extra_info_node_cnt);
if (vheap->extra_info_nodes != vheap->extra_info_normal_nodes) {
BH_FREE(vheap->extra_info_nodes);
}
vheap->extra_info_nodes = new_nodes;
vheap->extra_info_node_capacity = new_capacity;
}
orig_node = gc_search_extra_info_node(vheap, node->obj, &index);
if (orig_node) {
/* replace the old node */
vheap->extra_info_nodes[index] = node;
BH_FREE(orig_node);
}
else {
bh_memmove_s(vheap->extra_info_nodes + index + 1,
(vheap->extra_info_node_capacity - index - 1)
* sizeof(extra_info_node_t *),
vheap->extra_info_nodes + index,
(vheap->extra_info_node_cnt - index)
* sizeof(extra_info_node_t *));
vheap->extra_info_nodes[index] = node;
vheap->extra_info_node_cnt += 1;
}
return true;
}
bool
gc_set_finalizer(gc_handle_t handle, gc_object_t obj, gc_finalizer_t cb,
void *data)
{
extra_info_node_t *node = NULL;
gc_heap_t *vheap = (gc_heap_t *)handle;
node = (extra_info_node_t *)BH_MALLOC(sizeof(extra_info_node_t));
if (!node) {
LOG_ERROR("alloc a new extra info node failed");
return GC_FALSE;
}
memset(node, 0, sizeof(extra_info_node_t));
node->finalizer = cb;
node->obj = obj;
node->data = data;
LOCK_HEAP(vheap);
if (!insert_extra_info_node(vheap, node)) {
BH_FREE(node);
UNLOCK_HEAP(vheap);
return GC_FALSE;
}
UNLOCK_HEAP(vheap);
gct_vm_set_extra_info_flag(obj, true);
return GC_TRUE;
}
void
gc_unset_finalizer(gc_handle_t handle, gc_object_t obj)
{
gc_size_t index;
gc_heap_t *vheap = (gc_heap_t *)handle;
extra_info_node_t *node;
LOCK_HEAP(vheap);
node = gc_search_extra_info_node(vheap, obj, &index);
if (!node) {
UNLOCK_HEAP(vheap);
return;
}
BH_FREE(node);
bh_memmove_s(
vheap->extra_info_nodes + index,
(vheap->extra_info_node_capacity - index) * sizeof(extra_info_node_t *),
vheap->extra_info_nodes + index + 1,
(vheap->extra_info_node_cnt - index - 1) * sizeof(extra_info_node_t *));
vheap->extra_info_node_cnt -= 1;
UNLOCK_HEAP(vheap);
gct_vm_set_extra_info_flag(obj, false);
}
#endif

View File

@ -0,0 +1,493 @@
/*
* Copyright (C) 2022 Tencent Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "ems_gc.h"
#include "ems_gc_internal.h"
#define GB (1 << 30UL)
#define MARK_NODE_OBJ_CNT 256
#if WASM_ENABLE_GC != 0
/* mark node is used for gc marker*/
typedef struct mark_node_struct {
/* number of to-expand objects can be saved in this node */
gc_size_t cnt;
/* the first unused index */
uint32 idx;
/* next node on the node list */
struct mark_node_struct *next;
/* the actual to-expand objects list */
gc_object_t set[MARK_NODE_OBJ_CNT];
} mark_node_t;
/**
* Alloc a mark node from the native heap
*
* @return a valid mark node if success, NULL otherwise
*/
static mark_node_t *
alloc_mark_node(void)
{
mark_node_t *ret = (mark_node_t *)BH_MALLOC(sizeof(mark_node_t));
if (!ret) {
LOG_ERROR("alloc a new mark node failed");
return NULL;
}
ret->cnt = sizeof(ret->set) / sizeof(ret->set[0]);
ret->idx = 0;
ret->next = NULL;
return ret;
}
/* Free a mark node to the native heap
*
* @param node the mark node to free, should not be NULL
*/
static void
free_mark_node(mark_node_t *node)
{
bh_assert(node);
BH_FREE((gc_object_t)node);
}
/**
* Sweep phase of mark_sweep algorithm
* @param heap the heap to sweep, should be a valid instance heap
* which has already been marked
*/
static void
sweep_instance_heap(gc_heap_t *heap)
{
hmu_t *cur = NULL, *end = NULL, *last = NULL;
hmu_type_t ut;
gc_size_t size;
int i, lsize;
gc_size_t tot_free = 0;
bh_assert(gci_is_heap_valid(heap));
cur = (hmu_t *)heap->base_addr;
last = NULL;
end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
/* reset KFC */
lsize =
(int)(sizeof(heap->kfc_normal_list) / sizeof(heap->kfc_normal_list[0]));
for (i = 0; i < lsize; i++) {
heap->kfc_normal_list[i].next = NULL;
}
heap->kfc_tree_root->right = NULL;
heap->root_set = NULL;
while (cur < end) {
ut = hmu_get_ut(cur);
size = hmu_get_size(cur);
bh_assert(size > 0);
if (ut == HMU_FC || ut == HMU_FM
|| (ut == HMU_VO && hmu_is_vo_freed(cur))
|| (ut == HMU_WO && !hmu_is_wo_marked(cur))) {
/* merge previous free areas with current one */
if (!last)
last = cur;
if (ut == HMU_WO) {
/* Invoke registered finalizer */
gc_object_t cur_obj = hmu_to_obj(cur);
if (gct_vm_get_extra_info_flag(cur_obj)) {
extra_info_node_t *node = gc_search_extra_info_node(
(gc_handle_t)heap, cur_obj, NULL);
bh_assert(node);
node->finalizer(node->obj, node->data);
gc_unset_finalizer((gc_handle_t)heap, cur_obj);
}
}
}
else {
/* current block is still live */
if (last) {
tot_free += (char *)cur - (char *)last;
gci_add_fc(heap, last, (char *)cur - (char *)last);
hmu_mark_pinuse(last);
last = NULL;
}
if (ut == HMU_WO) {
/* unmark it */
hmu_unmark_wo(cur);
}
}
cur = (hmu_t *)((char *)cur + size);
}
bh_assert(cur == end);
if (last) {
tot_free += (char *)cur - (char *)last;
gci_add_fc(heap, last, (char *)cur - (char *)last);
hmu_mark_pinuse(last);
}
heap->total_free_size = tot_free;
#if GC_STAT_DATA != 0
heap->total_gc_count++;
if ((heap->current_size - tot_free) > heap->highmark_size)
heap->highmark_size = heap->current_size - tot_free;
#endif
gc_update_threshold(heap);
}
/**
* Add a to-expand node to the to-expand list
*
* @param heap should be a valid instance heap
* @param obj should be a valid wo inside @heap
*
* @return GC_ERROR if there is no more resource for marking,
* GC_SUCCESS if success
*/
static int
add_wo_to_expand(gc_heap_t *heap, gc_object_t obj)
{
mark_node_t *mark_node = NULL, *new_node = NULL;
hmu_t *hmu = NULL;
bh_assert(obj);
hmu = obj_to_hmu(obj);
bh_assert(gci_is_heap_valid(heap));
bh_assert((gc_uint8 *)hmu >= heap->base_addr
&& (gc_uint8 *)hmu < heap->base_addr + heap->current_size);
bh_assert(hmu_get_ut(hmu) == HMU_WO);
if (hmu_is_wo_marked(hmu))
return GC_SUCCESS; /* already marked*/
mark_node = (mark_node_t *)heap->root_set;
if (!mark_node || mark_node->idx == mark_node->cnt) {
new_node = alloc_mark_node();
if (!new_node) {
LOG_ERROR("can not add obj to mark node because of mark node "
"allocation failed");
return GC_ERROR;
}
new_node->next = mark_node;
heap->root_set = new_node;
mark_node = new_node;
}
mark_node->set[mark_node->idx++] = obj;
hmu_mark_wo(hmu);
return GC_SUCCESS;
}
/* Check ems_gc.h for description*/
int
gc_add_root(void *heap_p, gc_object_t obj)
{
gc_heap_t *heap = (gc_heap_t *)heap_p;
hmu_t *hmu = NULL;
if (!obj) {
LOG_ERROR("gc_add_root with NULL obj");
return GC_ERROR;
}
hmu = obj_to_hmu(obj);
if (!gci_is_heap_valid(heap)) {
LOG_ERROR("vm_get_gc_handle_for_current_instance returns invalid heap");
return GC_ERROR;
}
if (!((gc_uint8 *)hmu >= heap->base_addr
&& (gc_uint8 *)hmu < heap->base_addr + heap->current_size)) {
LOG_ERROR("Obj is not a object in current instance heap");
return GC_ERROR;
}
if (hmu_get_ut(hmu) != HMU_WO) {
LOG_ERROR("Given object is not wo");
return GC_ERROR;
}
if (add_wo_to_expand(heap, obj) != GC_SUCCESS) {
heap->is_fast_marking_failed = 1;
return GC_ERROR;
}
return GC_SUCCESS;
}
/**
* Unmark all marked objects to do rollback
*
* @param heap the heap to do rollback, should be a valid instance heap
*/
static void
rollback_mark(gc_heap_t *heap)
{
mark_node_t *mark_node = NULL, *next_mark_node = NULL;
hmu_t *cur = NULL, *end = NULL;
hmu_type_t ut;
gc_size_t size;
bh_assert(gci_is_heap_valid(heap));
/* roll back*/
mark_node = (mark_node_t *)heap->root_set;
while (mark_node) {
next_mark_node = mark_node->next;
free_mark_node(mark_node);
mark_node = next_mark_node;
}
heap->root_set = NULL;
/* then traverse the heap to unmark all marked wos*/
cur = (hmu_t *)heap->base_addr;
end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
while (cur < end) {
ut = hmu_get_ut(cur);
size = hmu_get_size(cur);
if (ut == HMU_WO && hmu_is_wo_marked(cur)) {
hmu_unmark_wo(cur);
}
cur = (hmu_t *)((char *)cur + size);
}
bh_assert(cur == end);
}
/**
* Reclaim GC instance heap
*
* @param heap the heap to reclaim, should be a valid instance heap
*
* @return GC_SUCCESS if success, GC_ERROR otherwise
*/
static int
reclaim_instance_heap(gc_heap_t *heap)
{
mark_node_t *mark_node = NULL;
int idx = 0, j = 0;
bool ret, is_compact_mode = false;
gc_object_t obj = NULL, ref = NULL;
hmu_t *hmu = NULL;
gc_uint32 ref_num = 0, ref_start_offset = 0, size = 0, offset = 0;
gc_uint16 *ref_list = NULL;
bh_assert(gci_is_heap_valid(heap));
heap->root_set = NULL;
#if WASM_ENABLE_THREAD_MGR == 0
if (!heap->exec_env)
return GC_SUCCESS;
ret = gct_vm_begin_rootset_enumeration(heap->exec_env, heap);
#else
if (!heap->cluster)
return GC_SUCCESS;
ret = gct_vm_begin_rootset_enumeration(heap->cluster, heap);
#endif
if (!ret)
return GC_ERROR;
#if BH_ENABLE_GC_VERIFY != 0
/* no matter whether the enumeration is successful or not, the data
collected should be checked at first */
mark_node = (mark_node_t *)heap->root_set;
while (mark_node) {
/* all nodes except first should be full filled */
bh_assert(mark_node == (mark_node_t *)heap->root_set
|| mark_node->idx == mark_node->cnt);
/* all nodes should be non-empty */
bh_assert(mark_node->idx > 0);
for (idx = 0; idx < (int)mark_node->idx; idx++) {
obj = mark_node->set[idx];
hmu = obj_to_hmu(obj);
bh_assert(hmu_is_wo_marked(hmu));
bh_assert((gc_uint8 *)hmu >= heap->base_addr
&& (gc_uint8 *)hmu
< heap->base_addr + heap->current_size);
}
mark_node = mark_node->next;
}
#endif
/* TODO: when fast marking failed, we can still do slow
marking, currently just simply roll it back. */
if (heap->is_fast_marking_failed) {
LOG_ERROR("enumerate rootset failed");
LOG_ERROR("all marked wos will be unmarked to keep heap consistency");
rollback_mark(heap);
heap->is_fast_marking_failed = 0;
return GC_ERROR;
}
/* the algorithm we use to mark all objects */
/* 1. mark rootset and organize them into a mark_node list (last marked
* roots at list header, i.e. stack top) */
/* 2. in every iteration, we use the top node to expand*/
/* 3. execute step 2 till no expanding */
/* this is a BFS & DFS mixed algorithm, but more like DFS */
mark_node = (mark_node_t *)heap->root_set;
while (mark_node) {
heap->root_set = mark_node->next;
/* note that mark_node->idx may change in each loop */
for (idx = 0; idx < (int)mark_node->idx; idx++) {
obj = mark_node->set[idx];
hmu = obj_to_hmu(obj);
size = hmu_get_size(hmu);
if (!gct_vm_get_wasm_object_ref_list(obj, &is_compact_mode,
&ref_num, &ref_list,
&ref_start_offset)) {
LOG_ERROR("mark process failed because failed "
"vm_get_wasm_object_ref_list");
break;
}
if (ref_num >= 2U * GB) {
LOG_ERROR("Invalid ref_num returned");
break;
}
if (is_compact_mode) {
for (j = 0; j < (int)ref_num; j++) {
offset = ref_start_offset + j * sizeof(void *);
bh_assert(offset + sizeof(void *) < size);
ref = *(gc_object_t *)(((gc_uint8 *)obj) + offset);
if (ref == NULL_REF || ((uintptr_t)ref & 1))
continue; /* null object or i31 object */
if (add_wo_to_expand(heap, ref) == GC_ERROR) {
LOG_ERROR("add_wo_to_expand failed");
break;
}
}
if (j < (int)ref_num)
break;
}
else {
for (j = 0; j < (int)ref_num; j++) {
offset = ref_list[j];
bh_assert(offset + sizeof(void *) < size);
ref = *(gc_object_t *)(((gc_uint8 *)obj) + offset);
if (ref == NULL_REF || ((uintptr_t)ref & 1))
continue; /* null object or i31 object */
if (add_wo_to_expand(heap, ref) == GC_ERROR) {
LOG_ERROR("mark process failed");
break;
}
}
if (j < (int)ref_num)
break;
}
}
if (idx < (int)mark_node->idx)
break; /* not yet done */
/* obj's in mark_node are all expanded */
free_mark_node(mark_node);
mark_node = heap->root_set;
}
if (mark_node) {
LOG_ERROR("mark process is not successfully finished");
free_mark_node(mark_node);
/* roll back is required */
rollback_mark(heap);
return GC_ERROR;
}
/* now sweep */
sweep_instance_heap(heap);
(void)size;
return GC_SUCCESS;
}
/**
* Do GC on given heap
*
* @param the heap to do GC, should be a valid heap
*
* @return GC_SUCCESS if success, GC_ERROR otherwise
*/
int
gci_gc_heap(void *h)
{
int ret = GC_ERROR;
gc_heap_t *heap = (gc_heap_t *)h;
bh_assert(gci_is_heap_valid(heap));
LOG_VERBOSE("#reclaim instance heap %p", heap);
gct_vm_gc_prepare();
gct_vm_mutex_lock(&heap->lock);
heap->is_doing_reclaim = 1;
ret = reclaim_instance_heap(heap);
heap->is_doing_reclaim = 0;
gct_vm_mutex_unlock(&heap->lock);
gct_vm_gc_finished();
LOG_VERBOSE("#reclaim instance heap %p done", heap);
#if BH_ENABLE_GC_VERIFY != 0
gci_verify_heap(heap);
#endif
#if GC_STAT_SHOW != 0
gc_show_stat(heap);
gc_show_fragment(heap);
#endif
return ret;
}
int
gc_is_dead_object(void *obj)
{
return !hmu_is_wo_marked(obj_to_hmu(obj));
}
#else
int
gci_gc_heap(void *h)
{
(void)h;
return GC_ERROR;
}
#endif /* end of WASM_ENABLE_GC != 0 */

View File

@ -19,9 +19,27 @@
extern "C" {
#endif
#ifndef GC_STAT_DATA
#define GC_STAT_DATA 0
#endif
#ifndef GC_STAT_SHOW
#define GC_STAT_SHOW 0
#endif
#ifndef GC_IN_EVERY_ALLOCATION
#define GC_IN_EVERY_ALLOCATION 0
#endif
#ifndef GC_MANUALLY
#define GC_MANUALLY 0
#endif
#define GC_HEAD_PADDING 4
#ifndef NULL_REF
#define NULL_REF ((gc_object_t)NULL)
#endif
#define GC_SUCCESS (0)
#define GC_ERROR (-1)
@ -33,6 +51,7 @@ extern "C" {
typedef void *gc_handle_t;
typedef void *gc_object_t;
typedef uint64 gc_uint64;
typedef int64 gc_int64;
typedef uint32 gc_uint32;
typedef int32 gc_int32;
@ -46,8 +65,24 @@ typedef enum {
GC_STAT_TOTAL = 0,
GC_STAT_FREE,
GC_STAT_HIGHMARK,
GC_STAT_COUNT,
GC_STAT_TIME,
GC_STAT_MAX
} GC_STAT_INDEX;
typedef void (*gc_finalizer_t)(void *obj, void *data);
#ifndef EXTRA_INFO_NORMAL_NODE_CNT
#define EXTRA_INFO_NORMAL_NODE_CNT 32
#endif
/* extra information attached to specific object */
typedef struct extra_info_node {
gc_object_t obj;
gc_finalizer_t finalizer;
void *data;
} extra_info_node_t;
/**
* GC initialization from a buffer, which is separated into
* two parts: the beginning of the buffer is used to create
@ -87,6 +122,28 @@ gc_init_with_struct_and_pool(char *struct_buf, gc_size_t struct_buf_size,
int
gc_destroy_with_pool(gc_handle_t handle);
#if WASM_ENABLE_GC != 0
/**
* Enable or disable GC reclaim for a heap
*
* @param handle handle of the heap
* @param exec_env the exec_env of current module instance
*/
#if WASM_ENABLE_THREAD_MGR == 0
void
gc_enable_gc_reclaim(gc_handle_t handle, void *exec_env);
#else
/**
* Enable or disable GC reclaim for a heap
*
* @param handle handle of the heap
* @param cluster the tread cluster of current module instance
*/
void
gc_enable_gc_reclaim(gc_handle_t handle, void *cluster);
#endif
#endif
/**
* Return heap struct size
*/
@ -136,6 +193,14 @@ gc_realloc_vo(void *heap, void *ptr, gc_size_t size);
int
gc_free_vo(void *heap, gc_object_t obj);
#if WASM_ENABLE_GC != 0
gc_object_t
gc_alloc_wo(void *heap, gc_size_t size);
void
gc_free_wo(void *vheap, void *ptr);
#endif
#else /* else of BH_ENABLE_GC_VERIFY */
gc_object_t
@ -148,6 +213,14 @@ gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size, const char *file,
int
gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line);
#if WASM_ENABLE_GC != 0
gc_object_t
gc_alloc_wo_internal(void *heap, gc_size_t size, const char *file, int line);
void
gc_free_wo_internal(void *vheap, void *ptr, const char *file, int line);
#endif
/* clang-format off */
#define gc_alloc_vo(heap, size) \
gc_alloc_vo_internal(heap, size, __FILE__, __LINE__)
@ -157,10 +230,116 @@ gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line);
#define gc_free_vo(heap, obj) \
gc_free_vo_internal(heap, obj, __FILE__, __LINE__)
#if WASM_ENABLE_GC != 0
#define gc_alloc_wo(heap, size) \
gc_alloc_wo_internal(heap, size, __FILE__, __LINE__)
#define gc_free_wo(heap, obj) \
gc_free_wo_internal(heap, obj, __FILE__, __LINE__)
#endif
/* clang-format on */
#endif /* end of BH_ENABLE_GC_VERIFY */
#if WASM_ENABLE_GC != 0
/**
* Add gc object ref to the rootset of a gc heap.
*
* @param heap the heap to add the gc object to its rootset
* @param obj pointer to a valid WASM object managed by the gc heap.
*
* @return GC_SUCCESS if success, GC_ERROR otherwise
*/
int
gc_add_root(void *heap, gc_object_t obj);
int
gci_gc_heap(void *heap);
extra_info_node_t *
gc_search_extra_info_node(gc_handle_t handle, gc_object_t obj,
gc_size_t *p_index);
/**
* Set finalizer to the given object, if another finalizer is set to the same
* object, the previous one will be cancelled
*
* @param handle handle of the heap
* @param obj object to set finalizer
* @param cb finalizer function to be called before this object is freed
* @param data custom data to be passed to finalizer function
*
* @return true if success, false otherwise
*/
bool
gc_set_finalizer(gc_handle_t handle, gc_object_t obj, gc_finalizer_t cb,
void *data);
/**
* Unset finalizer to the given object
*
* @param handle handle of the heap
* @param obj object to unset finalizer
*/
void
gc_unset_finalizer(gc_handle_t handle, gc_object_t obj);
#if WASM_ENABLE_THREAD_MGR == 0
bool
wasm_runtime_traverse_gc_rootset(void *exec_env, void *heap);
#else
bool
wasm_runtime_traverse_gc_rootset(void *cluster, void *heap);
#endif
bool
wasm_runtime_get_wasm_object_ref_list(gc_object_t obj, bool *p_is_compact_mode,
gc_uint32 *p_ref_num,
gc_uint16 **p_ref_list,
gc_uint32 *p_ref_start_offset);
bool
wasm_runtime_get_wasm_object_extra_info_flag(gc_object_t obj);
void
wasm_runtime_set_wasm_object_extra_info_flag(gc_object_t obj, bool set);
void
wasm_runtime_gc_prepare();
void
wasm_runtime_gc_finalize();
#endif /* end of WASM_ENABLE_GC != 0 */
#define GC_HEAP_STAT_SIZE (128 / 4)
typedef struct {
int usage;
int usage_block;
int vo_usage;
int wo_usage;
int free;
int free_block;
int vo_free;
int wo_free;
int usage_sizes[GC_HEAP_STAT_SIZE];
int free_sizes[GC_HEAP_STAT_SIZE];
} gc_stat_t;
void
gc_show_stat(gc_handle_t handle);
#if WASM_ENABLE_GC != 0
void
gc_show_fragment(gc_handle_t handle);
#if WASM_ENABLE_GC_PERF_PROFILING != 0
void
gc_dump_perf_profiling(gc_handle_t *handle);
#endif
#endif
#ifdef __cplusplus
}
#endif

View File

@ -17,8 +17,8 @@ extern "C" {
typedef enum hmu_type_enum {
HMU_TYPE_MIN = 0,
HMU_TYPE_MAX = 3,
HMU_JO = 3,
HMU_VO = 2,
HMU_WO = 3, /* WASM Object */
HMU_VO = 2, /* VM Object */
HMU_FC = 1,
HMU_FM = 0
} hmu_type_t;
@ -135,13 +135,13 @@ hmu_verify(void *vheap, hmu_t *hmu);
#define hmu_unmark_pinuse(hmu) CLRBIT((hmu)->header, HMU_P_OFFSET)
#define hmu_get_pinuse(hmu) GETBIT((hmu)->header, HMU_P_OFFSET)
#define HMU_JO_VT_SIZE 27
#define HMU_JO_VT_OFFSET 0
#define HMU_JO_MB_OFFSET 28
#define HMU_WO_VT_SIZE 27
#define HMU_WO_VT_OFFSET 0
#define HMU_WO_MB_OFFSET 28
#define hmu_mark_jo(hmu) SETBIT((hmu)->header, HMU_JO_MB_OFFSET)
#define hmu_unmark_jo(hmu) CLRBIT((hmu)->header, HMU_JO_MB_OFFSET)
#define hmu_is_jo_marked(hmu) GETBIT((hmu)->header, HMU_JO_MB_OFFSET)
#define hmu_mark_wo(hmu) SETBIT((hmu)->header, HMU_WO_MB_OFFSET)
#define hmu_unmark_wo(hmu) CLRBIT((hmu)->header, HMU_WO_MB_OFFSET)
#define hmu_is_wo_marked(hmu) GETBIT((hmu)->header, HMU_WO_MB_OFFSET)
/**
* The hmu size is divisible by 8, its lowest 3 bits are 0, so we only
@ -271,6 +271,33 @@ typedef struct gc_heap_struct {
size[left] <= size[cur] < size[right] */
hmu_tree_node_t *kfc_tree_root;
#if WASM_ENABLE_GC != 0
/* for rootset enumeration of private heap*/
void *root_set;
#if WASM_ENABLE_THREAD_MGR == 0
/* exec_env of current wasm module instance */
void *exec_env;
#else
/* thread cluster of current module instances */
void *cluster;
#endif
/* whether the fast mode of marking process that requires
additional memory fails. When the fast mode fails, the
marking process can still be done in the slow mode, which
doesn't need additional memory (by walking through all
blocks and marking sucessors of marked nodes until no new
node is marked). TODO: slow mode is not implemented. */
unsigned is_fast_marking_failed : 1;
/* whether the heap is doing reclaim */
unsigned is_doing_reclaim : 1;
/* Whether the heap can do reclaim */
unsigned is_reclaim_enabled : 1;
#endif
#if BH_ENABLE_GC_CORRUPTION_CHECK != 0
/* whether heap is corrupted, e.g. the hmu nodes are modified
by user */
@ -280,8 +307,54 @@ typedef struct gc_heap_struct {
gc_size_t init_size;
gc_size_t highmark_size;
gc_size_t total_free_size;
#if WASM_ENABLE_GC != 0
gc_size_t gc_threshold;
gc_size_t gc_threshold_factor;
gc_size_t total_gc_count;
gc_size_t total_gc_time;
gc_size_t max_gc_time;
/* Usually there won't be too many extra info node, so we try to use a fixed
* array to store them, if the fixed array don't have enough space to store
* the nodes, a new space will be allocated from heap */
extra_info_node_t *extra_info_normal_nodes[EXTRA_INFO_NORMAL_NODE_CNT];
/* Used to store extra information such as finalizer for specified nodes, we
* introduce a seperate space to store these information so only nodes who
* really require extra information will occupy additional memory spaces. */
extra_info_node_t **extra_info_nodes;
gc_size_t extra_info_node_cnt;
gc_size_t extra_info_node_capacity;
#endif
#if GC_STAT_DATA != 0
gc_uint64 total_size_allocated;
gc_uint64 total_size_freed;
#endif
} gc_heap_t;
#if WASM_ENABLE_GC != 0
#define GC_DEFAULT_THRESHOLD_FACTOR 300
static inline void
gc_update_threshold(gc_heap_t *heap)
{
heap->gc_threshold =
heap->total_free_size * heap->gc_threshold_factor / 1000;
}
#define gct_vm_mutex_init os_mutex_init
#define gct_vm_mutex_destroy os_mutex_destroy
#define gct_vm_mutex_lock os_mutex_lock
#define gct_vm_mutex_unlock os_mutex_unlock
#define gct_vm_gc_prepare wasm_runtime_gc_prepare
#define gct_vm_gc_finished wasm_runtime_gc_finalize
#define gct_vm_begin_rootset_enumeration wasm_runtime_traverse_gc_rootset
#define gct_vm_get_wasm_object_ref_list wasm_runtime_get_wasm_object_ref_list
#define gct_vm_get_extra_info_flag wasm_runtime_get_wasm_object_extra_info_flag
#define gct_vm_set_extra_info_flag wasm_runtime_set_wasm_object_extra_info_flag
#endif /* end of WAMS_ENABLE_GC != 0 */
/**
* MISC internal used APIs
*/

View File

@ -24,7 +24,7 @@ hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
gc_uint32 i = 0;
bh_assert(hmu);
bh_assert(hmu_get_ut(hmu) == HMU_JO || hmu_get_ut(hmu) == HMU_VO);
bh_assert(hmu_get_ut(hmu) == HMU_WO || hmu_get_ut(hmu) == HMU_VO);
bh_assert(tot_size >= OBJ_EXTRA_SIZE);
bh_assert(!(tot_size & 7));
bh_assert(hmu_get_ut(hmu) != HMU_VO || hmu_get_size(hmu) >= tot_size);
@ -48,7 +48,9 @@ hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
void
hmu_verify(void *vheap, hmu_t *hmu)
{
#if BH_ENABLE_GC_CORRUPTION_CHECK != 0
gc_heap_t *heap = (gc_heap_t *)vheap;
#endif
gc_object_prefix_t *prefix = NULL;
gc_object_suffix_t *suffix = NULL;
gc_uint32 i = 0;
@ -64,7 +66,7 @@ hmu_verify(void *vheap, hmu_t *hmu)
size = prefix->size;
suffix = (gc_object_suffix_t *)((gc_uint8 *)hmu + size - OBJ_SUFFIX_SIZE);
if (ut == HMU_VO || ut == HMU_JO) {
if (ut == HMU_VO || ut == HMU_WO) {
/* check padding*/
for (i = 0; i < GC_OBJECT_PREFIX_PADDING_CNT; i++) {
if (prefix->padding[i] != GC_OBJECT_PADDING_VALUE) {

View File

@ -12,6 +12,7 @@ gc_init_internal(gc_heap_t *heap, char *base_addr, gc_size_t heap_max_size)
int ret;
memset(heap, 0, sizeof *heap);
memset(base_addr, 0, heap_max_size);
ret = os_mutex_init(&heap->lock);
if (ret != BHT_OK) {
@ -26,6 +27,10 @@ gc_init_internal(gc_heap_t *heap, char *base_addr, gc_size_t heap_max_size)
heap->total_free_size = heap->current_size;
heap->highmark_size = 0;
#if WASM_ENABLE_GC != 0
heap->gc_threshold_factor = GC_DEFAULT_THRESHOLD_FACTOR;
gc_update_threshold(heap);
#endif
root = heap->kfc_tree_root = (hmu_tree_node_t *)heap->kfc_tree_root_buf;
memset(root, 0, sizeof *root);
@ -129,6 +134,28 @@ gc_destroy_with_pool(gc_handle_t handle)
gc_heap_t *heap = (gc_heap_t *)handle;
int ret = GC_SUCCESS;
#if WASM_ENABLE_GC != 0
gc_size_t i = 0;
if (heap->extra_info_node_cnt > 0) {
for (i = 0; i < heap->extra_info_node_cnt; i++) {
extra_info_node_t *node = heap->extra_info_nodes[i];
#if BH_ENABLE_GC_VERIFY != 0
os_printf("Memory leak detected: gc object [%p] not claimed\n",
node->obj);
#endif
bh_assert(heap->is_reclaim_enabled);
node->finalizer(node->obj, node->data);
BH_FREE(heap->extra_info_nodes[i]);
}
if (heap->extra_info_nodes != heap->extra_info_normal_nodes) {
BH_FREE(heap->extra_info_nodes);
}
}
#endif
#if BH_ENABLE_GC_VERIFY != 0
hmu_t *cur = (hmu_t *)heap->base_addr;
hmu_t *end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
@ -145,10 +172,33 @@ gc_destroy_with_pool(gc_handle_t handle)
#endif
os_mutex_destroy(&heap->lock);
memset(heap->base_addr, 0, heap->current_size);
memset(heap, 0, sizeof(gc_heap_t));
return ret;
}
#if WASM_ENABLE_GC != 0
#if WASM_ENABLE_THREAD_MGR == 0
void
gc_enable_gc_reclaim(gc_handle_t handle, void *exec_env)
{
gc_heap_t *heap = (gc_heap_t *)handle;
heap->is_reclaim_enabled = 1;
heap->exec_env = exec_env;
}
#else
void
gc_enable_gc_reclaim(gc_handle_t handle, void *cluster)
{
gc_heap_t *heap = (gc_heap_t *)handle;
heap->is_reclaim_enabled = 1;
heap->cluster = cluster;
}
#endif
#endif
uint32
gc_get_heap_struct_size()
{
@ -287,12 +337,103 @@ gci_verify_heap(gc_heap_t *heap)
}
#endif
void
gc_heap_stat(void *heap_ptr, gc_stat_t *stat)
{
hmu_t *cur = NULL, *end = NULL;
hmu_type_t ut;
gc_size_t size;
gc_heap_t *heap = (gc_heap_t *)heap_ptr;
memset(stat, 0, sizeof(gc_stat_t));
cur = (hmu_t *)heap->base_addr;
end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
while (cur < end) {
ut = hmu_get_ut(cur);
size = hmu_get_size(cur);
bh_assert(size > 0);
if (ut == HMU_FC || ut == HMU_FM
|| (ut == HMU_VO && hmu_is_vo_freed(cur))
|| (ut == HMU_WO && !hmu_is_wo_marked(cur))) {
if (ut == HMU_VO)
stat->vo_free += size;
if (ut == HMU_WO)
stat->wo_free += size;
stat->free += size;
stat->free_block++;
if (size / sizeof(int) < GC_HEAP_STAT_SIZE - 1)
stat->free_sizes[size / sizeof(int)] += 1;
else
stat->free_sizes[GC_HEAP_STAT_SIZE - 1] += 1;
}
else {
if (ut == HMU_VO)
stat->vo_usage += size;
if (ut == HMU_WO)
stat->wo_usage += size;
stat->usage += size;
stat->usage_block++;
if (size / sizeof(int) < GC_HEAP_STAT_SIZE - 1)
stat->usage_sizes[size / sizeof(int)] += 1;
else
stat->usage_sizes[GC_HEAP_STAT_SIZE - 1] += 1;
}
cur = (hmu_t *)((char *)cur + size);
}
}
void
gc_print_stat(void *heap_ptr, int verbose)
{
gc_stat_t stat;
int i;
bh_assert(heap_ptr != NULL);
gc_heap_t *heap = (gc_heap_t *)(heap_ptr);
gc_heap_stat(heap, &stat);
os_printf("# stat %s %p use %d free %d \n", "instance", heap, stat.usage,
stat.free);
os_printf("# stat %s %p wo_usage %d vo_usage %d \n", "instance", heap,
stat.wo_usage, stat.vo_usage);
os_printf("# stat %s %p wo_free %d vo_free %d \n", "instance", heap,
stat.wo_free, stat.vo_free);
#if WASM_ENABLE_GC == 0
os_printf("# stat free size %" PRIu32 " high %" PRIu32 "\n",
heap->total_free_size, heap->highmark_size);
#else
os_printf("# stat gc %" PRIu32 " free size %" PRIu32 " high %" PRIu32 "\n",
heap->total_gc_count, heap->total_free_size, heap->highmark_size);
#endif
if (verbose) {
os_printf("usage sizes: \n");
for (i = 0; i < GC_HEAP_STAT_SIZE; i++)
if (stat.usage_sizes[i])
os_printf(" %d: %d; ", i * 4, stat.usage_sizes[i]);
os_printf(" \n");
os_printf("free sizes: \n");
for (i = 0; i < GC_HEAP_STAT_SIZE; i++)
if (stat.free_sizes[i])
os_printf(" %d: %d; ", i * 4, stat.free_sizes[i]);
}
}
void *
gc_heap_stats(void *heap_arg, uint32 *stats, int size)
{
int i;
gc_heap_t *heap = (gc_heap_t *)heap_arg;
if (!gci_is_heap_valid(heap)) {
for (i = 0; i < size; i++)
stats[i] = 0;
return NULL;
}
for (i = 0; i < size; i++) {
switch (i) {
case GC_STAT_TOTAL:
@ -304,9 +445,83 @@ gc_heap_stats(void *heap_arg, uint32 *stats, int size)
case GC_STAT_HIGHMARK:
stats[i] = heap->highmark_size;
break;
#if WASM_ENABLE_GC != 0
case GC_STAT_COUNT:
stats[i] = heap->total_gc_count;
break;
case GC_STAT_TIME:
stats[i] = heap->total_gc_time;
break;
#endif
default:
break;
}
}
return heap;
}
void
gc_traverse_tree(hmu_tree_node_t *node, gc_size_t *stats, int *n)
{
if (!node)
return;
if (*n > 0)
gc_traverse_tree(node->right, stats, n);
if (*n > 0) {
(*n)--;
stats[*n] = node->size;
}
if (*n > 0)
gc_traverse_tree(node->left, stats, n);
}
void
gc_show_stat(void *heap)
{
uint32 stats[GC_STAT_MAX];
heap = gc_heap_stats(heap, stats, GC_STAT_MAX);
os_printf("\n[GC stats %p] %" PRIu32 " %" PRIu32 " %" PRIu32 " %" PRIu32
" %" PRIu32 "\n",
heap, stats[0], stats[1], stats[2], stats[3], stats[4]);
}
#if WASM_ENABLE_GC != 0
void
gc_show_fragment(void *heap_arg)
{
uint32 stats[3];
int n = 3;
gc_heap_t *heap = (gc_heap_t *)heap_arg;
memset(stats, 0, n * sizeof(int));
gct_vm_mutex_lock(&heap->lock);
gc_traverse_tree(heap->kfc_tree_root, (gc_size_t *)stats, &n);
gct_vm_mutex_unlock(&heap->lock);
os_printf("\n[GC %p top sizes] %" PRIu32 " %" PRIu32 " %" PRIu32 "\n", heap,
stats[0], stats[1], stats[2]);
}
#if WASM_ENABLE_GC_PERF_PROFILING != 0
void
gc_dump_perf_profiling(gc_handle_t *handle)
{
gc_heap_t *gc_heap_handle = (void *)handle;
if (gc_heap_handle) {
os_printf("\nGC performance summary\n");
os_printf(" Total GC time (ms): %u\n",
gc_heap_handle->total_gc_time);
os_printf(" Max GC time (ms): %u\n", gc_heap_handle->max_gc_time);
}
else {
os_printf("Failed to dump GC performance\n");
}
}
#endif
#endif

View File

@ -4,6 +4,7 @@
*/
#include "mem_alloc.h"
#include <stdbool.h>
#if DEFAULT_MEM_ALLOCATOR == MEM_ALLOCATOR_EMS
@ -56,6 +57,43 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr)
gc_free_vo((gc_handle_t)allocator, ptr);
}
#if WASM_ENABLE_GC != 0
void *
mem_allocator_malloc_with_gc(mem_allocator_t allocator, uint32_t size)
{
return gc_alloc_wo((gc_handle_t)allocator, size);
}
#if WASM_GC_MANUALLY != 0
void
mem_allocator_free_with_gc(mem_allocator_t allocator, void *ptr)
{
if (ptr)
gc_free_wo((gc_handle_t)allocator, ptr);
}
#endif
#if WASM_ENABLE_THREAD_MGR == 0
void
mem_allocator_enable_gc_reclaim(mem_allocator_t allocator, void *exec_env)
{
return gc_enable_gc_reclaim((gc_handle_t)allocator, exec_env);
}
#else
void
mem_allocator_enable_gc_reclaim(mem_allocator_t allocator, void *cluster)
{
return gc_enable_gc_reclaim((gc_handle_t)allocator, cluster);
}
#endif
int
mem_allocator_add_root(mem_allocator_t allocator, WASMObjectRef obj)
{
return gc_add_root((gc_handle_t)allocator, (gc_object_t)obj);
}
#endif
int
mem_allocator_migrate(mem_allocator_t allocator, char *pool_buf_new,
uint32 pool_buf_size)
@ -76,6 +114,30 @@ mem_allocator_get_alloc_info(mem_allocator_t allocator, void *mem_alloc_info)
return true;
}
#if WASM_ENABLE_GC != 0
bool
mem_allocator_set_gc_finalizer(mem_allocator_t allocator, void *obj,
gc_finalizer_t cb, void *data)
{
return gc_set_finalizer((gc_handle_t)allocator, (gc_object_t)obj, cb, data);
}
void
mem_allocator_unset_gc_finalizer(mem_allocator_t allocator, void *obj)
{
gc_unset_finalizer((gc_handle_t)allocator, (gc_object_t)obj);
}
#if WASM_ENABLE_GC_PERF_PROFILING != 0
void
mem_allocator_dump_perf_profiling(mem_allocator_t allocator)
{
gc_dump_perf_profiling((gc_handle_t)allocator);
}
#endif
#endif
#else /* else of DEFAULT_MEM_ALLOCATOR */
#include "tlsf/tlsf.h"

View File

@ -11,7 +11,13 @@ if (WAMR_BUILD_GC_VERIFY EQUAL 1)
endif ()
if (NOT DEFINED WAMR_BUILD_GC_CORRUPTION_CHECK)
set (WAMR_BUILD_GC_CORRUPTION_CHECK 1)
# Disable memory allocator heap corruption check
# when GC is enabled
if (WAMR_BUILD_GC EQUAL 1)
set (WAMR_BUILD_GC_CORRUPTION_CHECK 0)
else ()
set (WAMR_BUILD_GC_CORRUPTION_CHECK 1)
endif ()
endif ()
if (WAMR_BUILD_GC_CORRUPTION_CHECK EQUAL 0)

View File

@ -7,6 +7,9 @@
#define __MEM_ALLOC_H
#include "bh_platform.h"
#if WASM_ENABLE_GC != 0
#include "../../common/gc/gc_object.h"
#endif
#ifdef __cplusplus
extern "C" {
@ -14,6 +17,8 @@ extern "C" {
typedef void *mem_allocator_t;
typedef void (*gc_finalizer_t)(void *obj, void *data);
mem_allocator_t
mem_allocator_create(void *mem, uint32_t size);
@ -45,6 +50,39 @@ mem_allocator_migrate(mem_allocator_t allocator, char *pool_buf_new,
bool
mem_allocator_is_heap_corrupted(mem_allocator_t allocator);
#if WASM_ENABLE_GC != 0
void *
mem_allocator_malloc_with_gc(mem_allocator_t allocator, uint32_t size);
#if WASM_GC_MANUALLY != 0
void
mem_allocator_free_with_gc(mem_allocator_t allocator, void *ptr);
#endif
#if WASM_ENABLE_THREAD_MGR == 0
void
mem_allocator_enable_gc_reclaim(mem_allocator_t allocator, void *exec_env);
#else
void
mem_allocator_enable_gc_reclaim(mem_allocator_t allocator, void *cluster);
#endif
int
mem_allocator_add_root(mem_allocator_t allocator, WASMObjectRef obj);
bool
mem_allocator_set_gc_finalizer(mem_allocator_t allocator, void *obj,
gc_finalizer_t cb, void *data);
void
mem_allocator_unset_gc_finalizer(mem_allocator_t allocator, void *obj);
#if WASM_ENABLE_GC_PERF_PROFILING != 0
void
mem_allocator_dump_perf_profiling(mem_allocator_t allocator);
#endif
#endif /* end of WASM_ENABLE_GC != 0 */
bool
mem_allocator_get_alloc_info(mem_allocator_t allocator, void *mem_alloc_info);