Make heap and linear memory contiguous to refine compilation time and footprint (#233)
Use FastISel for JIT mode Use united aot version in aot file and aot runtime Disable check signature failed warning for wamrc Fix fast interpreter x86_32 float issue Remove unused empty lvgl folder
This commit is contained in:
@ -404,10 +404,11 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size,
|
||||
#endif
|
||||
{
|
||||
gc_heap_t* heap = (gc_heap_t*) vheap;
|
||||
hmu_t *hmu = NULL, *hmu_old = NULL;
|
||||
hmu_t *hmu = NULL, *hmu_old = NULL, *hmu_next;
|
||||
gc_object_t ret = (gc_object_t) NULL, obj_old = (gc_object_t)ptr;
|
||||
gc_size_t tot_size, tot_size_unaligned, tot_size_old = 0;
|
||||
gc_size_t tot_size, tot_size_unaligned, tot_size_old = 0, tot_size_next;
|
||||
gc_size_t obj_size, obj_size_old;
|
||||
hmu_type_t ut;
|
||||
|
||||
/* hmu header + prefix + obj + suffix */
|
||||
tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
|
||||
@ -427,6 +428,32 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size,
|
||||
|
||||
os_mutex_lock(&heap->lock);
|
||||
|
||||
if (hmu_old) {
|
||||
hmu_next = (hmu_t*)((char *)hmu_old + tot_size_old);
|
||||
if (hmu_is_in_heap(heap, hmu_next)) {
|
||||
ut = hmu_get_ut(hmu_next);
|
||||
tot_size_next = hmu_get_size(hmu_next);
|
||||
if (ut == HMU_FC
|
||||
&& tot_size <= tot_size_old + tot_size_next) {
|
||||
/* current node and next node meets requirement */
|
||||
unlink_hmu(heap, hmu_next);
|
||||
hmu_set_size(hmu_old, tot_size);
|
||||
memset((char*)hmu_old + tot_size_old, 0, tot_size - tot_size_old);
|
||||
#if BH_ENABLE_GC_VERIFY != 0
|
||||
hmu_init_prefix_and_suffix(hmu_old, tot_size, file, line);
|
||||
#endif
|
||||
if (tot_size < tot_size_old + tot_size_next) {
|
||||
hmu_next = (hmu_t*)((char*)hmu_old + tot_size);
|
||||
tot_size_next = tot_size_old + tot_size_next - tot_size;
|
||||
gci_add_fc(heap, hmu_next, tot_size_next);
|
||||
}
|
||||
os_mutex_unlock(&heap->lock);
|
||||
return obj_old;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
hmu = alloc_hmu_ex(heap, tot_size);
|
||||
if (!hmu)
|
||||
goto finish;
|
||||
|
||||
@ -72,6 +72,35 @@ gc_init_with_pool(char *buf, gc_size_t buf_size);
|
||||
int
|
||||
gc_destroy_with_pool(gc_handle_t handle);
|
||||
|
||||
/**
|
||||
* Migrate heap from one place to another place
|
||||
*
|
||||
* @param handle handle of the new heap
|
||||
* @param handle_old handle of the old heap
|
||||
*
|
||||
* @return GC_SUCCESS if success, GC_ERROR otherwise
|
||||
*/
|
||||
int
|
||||
gc_migrate(gc_handle_t handle, gc_handle_t handle_old);
|
||||
|
||||
/**
|
||||
* Re-initialize lock of heap
|
||||
*
|
||||
* @param handle the heap handle
|
||||
*
|
||||
* @return GC_SUCCESS if success, GC_ERROR otherwise
|
||||
*/
|
||||
int
|
||||
gc_reinit_lock(gc_handle_t handle);
|
||||
|
||||
/**
|
||||
* Destroy lock of heap
|
||||
*
|
||||
* @param handle the heap handle
|
||||
*/
|
||||
void
|
||||
gc_destroy_lock(gc_handle_t handle);
|
||||
|
||||
/**
|
||||
* Get Heap Stats
|
||||
*
|
||||
|
||||
@ -192,7 +192,6 @@ typedef struct gc_heap_struct {
|
||||
|
||||
gc_uint8 *base_addr;
|
||||
gc_size_t current_size;
|
||||
gc_size_t max_size;
|
||||
|
||||
korp_mutex lock;
|
||||
|
||||
|
||||
@ -35,7 +35,6 @@ gc_init_with_pool(char *buf, gc_size_t buf_size)
|
||||
}
|
||||
|
||||
/* init all data structures*/
|
||||
heap->max_size = heap_max_size;
|
||||
heap->current_size = heap_max_size;
|
||||
heap->base_addr = (gc_uint8*)base_addr;
|
||||
heap->heap_id = (gc_handle_t)heap;
|
||||
@ -82,11 +81,72 @@ gc_destroy_with_pool(gc_handle_t handle)
|
||||
{
|
||||
gc_heap_t *heap = (gc_heap_t *) handle;
|
||||
os_mutex_destroy(&heap->lock);
|
||||
memset(heap->base_addr, 0, heap->max_size);
|
||||
memset(heap->base_addr, 0, heap->current_size);
|
||||
memset(heap, 0, sizeof(gc_heap_t));
|
||||
return GC_SUCCESS;
|
||||
}
|
||||
|
||||
static void
|
||||
adjust_ptr(uint8 **p_ptr, intptr_t offset)
|
||||
{
|
||||
if (*p_ptr)
|
||||
*p_ptr += offset;
|
||||
}
|
||||
|
||||
int
|
||||
gc_migrate(gc_handle_t handle, gc_handle_t handle_old)
|
||||
{
|
||||
gc_heap_t *heap = (gc_heap_t *) handle;
|
||||
intptr_t offset = (uint8*)handle - (uint8*)handle_old;
|
||||
hmu_t *cur = NULL, *end = NULL;
|
||||
hmu_tree_node_t *tree_node;
|
||||
gc_size_t size;
|
||||
|
||||
os_mutex_init(&heap->lock);
|
||||
|
||||
if (offset == 0)
|
||||
return 0;
|
||||
|
||||
heap->heap_id = (gc_handle_t)heap;
|
||||
heap->base_addr += offset;
|
||||
adjust_ptr((uint8**)&heap->kfc_tree_root.left, offset);
|
||||
adjust_ptr((uint8**)&heap->kfc_tree_root.right, offset);
|
||||
adjust_ptr((uint8**)&heap->kfc_tree_root.parent, offset);
|
||||
|
||||
cur = (hmu_t*)heap->base_addr;
|
||||
end = (hmu_t*)((char*)heap->base_addr + heap->current_size);
|
||||
|
||||
while (cur < end) {
|
||||
size = hmu_get_size(cur);
|
||||
bh_assert(size > 0);
|
||||
|
||||
if (!HMU_IS_FC_NORMAL(size)) {
|
||||
tree_node = (hmu_tree_node_t *)cur;
|
||||
adjust_ptr((uint8**)&tree_node->left, offset);
|
||||
adjust_ptr((uint8**)&tree_node->right, offset);
|
||||
adjust_ptr((uint8**)&tree_node->parent, offset);
|
||||
}
|
||||
cur = (hmu_t*)((char *)cur + size);
|
||||
}
|
||||
|
||||
bh_assert(cur == end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
gc_reinit_lock(gc_handle_t handle)
|
||||
{
|
||||
gc_heap_t *heap = (gc_heap_t *) handle;
|
||||
return os_mutex_init(&heap->lock);
|
||||
}
|
||||
|
||||
void
|
||||
gc_destroy_lock(gc_handle_t handle)
|
||||
{
|
||||
gc_heap_t *heap = (gc_heap_t *) handle;
|
||||
os_mutex_destroy(&heap->lock);
|
||||
}
|
||||
|
||||
#if BH_ENABLE_GC_VERIFY != 0
|
||||
void
|
||||
gci_verify_heap(gc_heap_t *heap)
|
||||
|
||||
@ -37,6 +37,26 @@ void mem_allocator_free(mem_allocator_t allocator, void *ptr)
|
||||
gc_free_vo((gc_handle_t) allocator, ptr);
|
||||
}
|
||||
|
||||
int
|
||||
mem_allocator_migrate(mem_allocator_t allocator,
|
||||
mem_allocator_t allocator_old)
|
||||
{
|
||||
return gc_migrate((gc_handle_t) allocator,
|
||||
(gc_handle_t) allocator_old);
|
||||
}
|
||||
|
||||
int
|
||||
mem_allocator_reinit_lock(mem_allocator_t allocator)
|
||||
{
|
||||
return gc_reinit_lock((gc_handle_t) allocator);
|
||||
}
|
||||
|
||||
void
|
||||
mem_allocator_destroy_lock(mem_allocator_t allocator)
|
||||
{
|
||||
gc_destroy_lock((gc_handle_t) allocator);
|
||||
}
|
||||
|
||||
#else /* else of DEFAULT_MEM_ALLOCATOR */
|
||||
|
||||
#include "tlsf/tlsf.h"
|
||||
@ -141,5 +161,27 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr)
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
mem_allocator_migrate(mem_allocator_t allocator,
|
||||
mem_allocator_t allocator_old)
|
||||
{
|
||||
return tlsf_migrate((mem_allocator_tlsf *) allocator,
|
||||
(mem_allocator_tlsf *) allocator_old);
|
||||
}
|
||||
|
||||
int
|
||||
mem_allocator_init_lock(mem_allocator_t allocator)
|
||||
{
|
||||
mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
|
||||
return os_mutex_init(&allocator_tlsf->lock);
|
||||
}
|
||||
|
||||
void
|
||||
mem_allocator_destroy_lock(mem_allocator_t allocator)
|
||||
{
|
||||
mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
|
||||
os_mutex_destroy(&allocator_tlsf->lock);
|
||||
}
|
||||
|
||||
#endif /* end of DEFAULT_MEM_ALLOCATOR */
|
||||
|
||||
|
||||
@ -29,6 +29,16 @@ mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size);
|
||||
void
|
||||
mem_allocator_free(mem_allocator_t allocator, void *ptr);
|
||||
|
||||
int
|
||||
mem_allocator_migrate(mem_allocator_t allocator,
|
||||
mem_allocator_t allocator_old);
|
||||
|
||||
int
|
||||
mem_allocator_reinit_lock(mem_allocator_t allocator);
|
||||
|
||||
void
|
||||
mem_allocator_destroy_lock(mem_allocator_t allocator);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
Reference in New Issue
Block a user