re-org platform APIs, simplify porting process (#201)

Co-authored-by: Xu Jun <jun1.xu@intel.com>
This commit is contained in:
Xu Jun
2020-03-16 16:43:57 +08:00
committed by GitHub
parent ef5ceffe71
commit f1a0e75ab7
177 changed files with 2954 additions and 7904 deletions

View File

@ -20,7 +20,7 @@ static int hmu_is_in_heap(gc_heap_t* heap, hmu_t* hmu)
/* Node @p will be removed from the tree and left,right,parent pointers of node @p will be*/
/* set to be NULL. Other fields will not be touched.*/
/* The tree will be re-organized so that the order conditions are still satisified.*/
BH_STATIC void remove_tree_node(hmu_tree_node_t *p)
static void remove_tree_node(hmu_tree_node_t *p)
{
hmu_tree_node_t *q = NULL, **slot = NULL;
@ -92,19 +92,20 @@ static void unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
if (HMU_IS_FC_NORMAL(size)) {
uint32 node_idx = size >> 3;
hmu_normal_node_t* node = heap->kfc_normal_list[node_idx].next;
hmu_normal_node_t** p = &(heap->kfc_normal_list[node_idx].next);
hmu_normal_node_t *node_prev = &heap->kfc_normal_list[node_idx];
hmu_normal_node_t *node =
get_hmu_normal_node_next(&heap->kfc_normal_list[node_idx]);
while (node) {
if ((hmu_t*) node == hmu) {
*p = node->next;
set_hmu_normal_node_next(node_prev, get_hmu_normal_node_next(node));
break;
}
p = &(node->next);
node = node->next;
node_prev = node;
node = get_hmu_normal_node_next(node);
}
if (!node) {
bh_printf("[GC_ERROR]couldn't find the node in the normal list");
os_printf("[GC_ERROR]couldn't find the node in the normal list");
}
} else {
remove_tree_node((hmu_tree_node_t *) hmu);
@ -154,7 +155,7 @@ void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
node_idx = size >> 3;
np->next = heap->kfc_normal_list[node_idx].next;
heap->kfc_normal_list[node_idx].next = np;
set_hmu_normal_node_next(&heap->kfc_normal_list[node_idx], np);
return;
}
@ -197,7 +198,7 @@ void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
/* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
/* NULL will be returned if there are no proper HMU.*/
BH_STATIC hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
static hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
{
hmu_normal_node_t *node = NULL, *p = NULL;
uint32 node_idx = 0, init_node_idx = 0;
@ -217,7 +218,7 @@ BH_STATIC hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
for (node_idx = init_node_idx; node_idx < HMU_NORMAL_NODE_CNT;
node_idx++) {
node = heap->kfc_normal_list + node_idx;
if (node->next)
if (get_hmu_normal_node_next(node))
break;
node = NULL;
}
@ -226,7 +227,7 @@ BH_STATIC hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
if (node) {
bh_assert(node_idx >= init_node_idx);
p = node->next;
p = get_hmu_normal_node_next(node);
node->next = p->next;
bh_assert(((gc_int32)(uintptr_t)hmu_to_obj(p) & 7) == 0);
@ -316,7 +317,7 @@ BH_STATIC hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
/* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
/* NULL will be returned if there are no proper HMU.*/
BH_STATIC hmu_t* alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
static hmu_t* alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
{
hmu_t *ret = NULL;
@ -353,11 +354,14 @@ gc_object_t _gc_alloc_vo_i_heap(void *vheap,
gc_heap_t* heap = (gc_heap_t*) vheap;
hmu_t *hmu = NULL;
gc_object_t ret = (gc_object_t) NULL;
gc_size_t tot_size = 0;
gc_size_t tot_size = 0, tot_size_unaligned;
/* align size*/
tot_size = GC_ALIGN_8(size + HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE); /* hmu header, prefix, suffix*/
/* hmu header + prefix + obj + suffix */
tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
/* aligned size*/
tot_size = GC_ALIGN_8(tot_size_unaligned);
if (tot_size < size)
/* integer overflow */
return NULL;
gct_vm_mutex_lock(&heap->lock);
@ -376,9 +380,12 @@ gc_object_t _gc_alloc_vo_i_heap(void *vheap,
#endif
ret = hmu_to_obj(hmu);
if (tot_size > tot_size_unaligned)
/* clear buffer appended by GC_ALIGN_8() */
memset((uint8*)ret + size, 0, tot_size - tot_size_unaligned);
#if BH_ENABLE_MEMORY_PROFILING != 0
bh_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
os_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
#endif
FINISH:
@ -393,23 +400,25 @@ gc_object_t _gc_realloc_vo_i_heap(void *vheap, void *ptr,
gc_heap_t* heap = (gc_heap_t*) vheap;
hmu_t *hmu = NULL, *hmu_old = NULL;
gc_object_t ret = (gc_object_t) NULL, obj_old = (gc_object_t)ptr;
gc_size_t tot_size = 0, size_old = 0;
gc_size_t tot_size, tot_size_unaligned, tot_size_old = 0;
gc_size_t obj_size, obj_size_old;
/* hmu header + prefix + obj + suffix */
tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE;
/* aligned size*/
tot_size = GC_ALIGN_8(tot_size_unaligned);
if (tot_size < size)
/* integer overflow */
return NULL;
if (obj_old) {
hmu_old = obj_to_hmu(obj_old);
size_old = hmu_get_size(hmu_old);
size_old -= HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE;
if (size < size_old)
return NULL;
if (size == size_old)
tot_size_old = hmu_get_size(hmu_old);
if (tot_size <= tot_size_old)
/* current node alreay meets requirement */
return obj_old;
}
/* align size*/
tot_size = GC_ALIGN_8(size + HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE); /* hmu header, prefix, suffix*/
if (tot_size < size)
return NULL;
gct_vm_mutex_lock(&heap->lock);
hmu = alloc_hmu_ex(heap, tot_size);
@ -428,16 +437,19 @@ gc_object_t _gc_realloc_vo_i_heap(void *vheap, void *ptr,
ret = hmu_to_obj(hmu);
#if BH_ENABLE_MEMORY_PROFILING != 0
bh_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
os_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
#endif
FINISH:
gct_vm_mutex_unlock(&heap->lock);
if (ret) {
memset(ret, 0, size);
obj_size = tot_size - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
memset(ret, 0, obj_size);
if (obj_old) {
memcpy(ret, obj_old, size_old);
obj_size_old = tot_size_old - HMU_SIZE
- OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
bh_memcpy_s(ret, obj_size, obj_old, obj_size_old);
gc_free_h(vheap, obj_old);
}
}
@ -445,47 +457,6 @@ FINISH:
return ret;
}
/* see ems_gc.h for description*/
gc_object_t _gc_alloc_jo_i_heap(void *vheap,
gc_size_t size ALLOC_EXTRA_PARAMETERS)
{
gc_heap_t* heap = (gc_heap_t*) vheap;
gc_object_t ret = (gc_object_t) NULL;
hmu_t *hmu = NULL;
gc_size_t tot_size = 0;
bh_assert(gci_is_heap_valid(heap));
/* align size*/
tot_size = GC_ALIGN_8(size + HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE); /* hmu header, prefix, suffix*/
if (tot_size < size)
return NULL;
hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
goto FINISH;
/* reset all fields*/
memset((char*) hmu + sizeof(*hmu), 0, tot_size - sizeof(*hmu));
/* hmu->header = 0; */
hmu_set_ut(hmu, HMU_JO);
hmu_unmark_jo(hmu);
#if defined(GC_VERIFY)
hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
#endif
ret = hmu_to_obj(hmu);
#if BH_ENABLE_MEMORY_PROFILING != 0
bh_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
#endif
FINISH:
return ret;
}
/* Do some checking to see if given pointer is a possible valid heap*/
/* Return GC_TRUE if all checking passed*/
@ -539,7 +510,7 @@ int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
heap->total_free_size += size;
#endif
#if BH_ENABLE_MEMORY_PROFILING != 0
bh_printf("HEAP.FREE, heap: %p, size: %u\n",heap, size);
os_printf("HEAP.FREE, heap: %p, size: %u\n",heap, size);
#endif
if (!hmu_get_pinuse(hmu)) {
@ -582,12 +553,12 @@ int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
void gc_dump_heap_stats(gc_heap_t *heap)
{
bh_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
bh_printf(
os_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
os_printf(
"total malloc: totalfree: %u, current: %u, highmark: %u, gc cnt: %u\n",
heap->total_free_size, heap->current_size, heap->highmark_size,
heap->total_gc_count);
bh_printf("g_total_malloc=%lu, g_total_free=%lu, occupied=%lu\n",
os_printf("g_total_malloc=%lu, g_total_free=%lu, occupied=%lu\n",
g_total_malloc, g_total_free, g_total_malloc - g_total_free);
}

View File

@ -44,13 +44,7 @@ extern "C" {
# error "Can not define GC_EMBEDDED and GC_STANDALONE at the same time"
#endif
#ifdef BH_TEST
# ifndef GC_TEST
# define GC_TEST
# endif
#endif
#ifdef BH_DEBUG
#if BH_DEBUG != 0
/*instrument mode ignore GC_DEBUG feature, for instrument testing gc_alloc_vo_i_heap only has func_name parameter*/
#if !defined INSTRUMENT_TEST_ENABLED && !defined GC_DEBUG
# define GC_DEBUG

View File

@ -11,8 +11,6 @@ extern "C" {
#endif
#include "bh_platform.h"
#include "bh_thread.h"
#include "bh_assert.h"
#include "ems_gc.h"
/* basic block managed by EMS gc is the so-called HMU (heap memory unit)*/
@ -146,8 +144,35 @@ extern void hmu_verify(hmu_t *hmu);
typedef struct _hmu_normal_node
{
hmu_t hmu_header;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
struct {
uint32 parts[2];
} next;
#else
struct _hmu_normal_node *next;
}hmu_normal_node_t;
#endif
} hmu_normal_node_t;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
static inline hmu_normal_node_t *
get_hmu_normal_node_next(hmu_normal_node_t *node)
{
hmu_normal_node_t *next;
bh_memcpy_s(&next, (uint32)sizeof(hmu_normal_node_t *),
&node->next.parts, (uint32)sizeof(uint32) * 2);
return next;
}
static inline void
set_hmu_normal_node_next(hmu_normal_node_t *node, hmu_normal_node_t *next)
{
bh_memcpy_s(&node->next.parts, (uint32)sizeof(uint32) * 2,
&next, (uint32)sizeof(hmu_normal_node_t *));
}
#else
#define get_hmu_normal_node_next(node) (node)->next
#define set_hmu_normal_node_next(node, _next) (node)->next = _next
#endif
typedef struct _hmu_tree_node
{
@ -156,7 +181,7 @@ typedef struct _hmu_tree_node
struct _hmu_tree_node *left;
struct _hmu_tree_node *right;
struct _hmu_tree_node *parent;
}hmu_tree_node_t;
} hmu_tree_node_t;
typedef struct _gc_heap_struct
{
@ -193,7 +218,7 @@ typedef struct _gc_heap_struct
gc_size_t gc_threshold_factor;
gc_int64 total_gc_time;
#endif
}gc_heap_t;
} gc_heap_t;
/*////// MISC internal used APIs*/
@ -254,10 +279,10 @@ extern int (*gct_vm_begin_rootset_enumeration)(void* heap);
extern int (*gct_vm_gc_finished)(void);
#else
#define gct_vm_get_java_object_ref_list bh_get_java_object_ref_list
#define gct_vm_mutex_init vm_mutex_init
#define gct_vm_mutex_destroy vm_mutex_destroy
#define gct_vm_mutex_lock vm_mutex_lock
#define gct_vm_mutex_unlock vm_mutex_unlock
#define gct_vm_mutex_init os_mutex_init
#define gct_vm_mutex_destroy os_mutex_destroy
#define gct_vm_mutex_lock os_mutex_lock
#define gct_vm_mutex_unlock os_mutex_unlock
#define gct_vm_get_gc_handle_for_current_instance app_manager_get_cur_applet_heap
#define gct_vm_begin_rootset_enumeration vm_begin_rootset_enumeration
#define gct_vm_gc_finished jeff_runtime_gc_finished

View File

@ -16,7 +16,7 @@ int gci_check_platform()
{
#define CHECK(x, y) do { \
if((x) != (y)) { \
bh_printf("Platform checking failed on LINE %d at FILE %s.",\
os_printf("Platform checking failed on LINE %d at FILE %s.",\
__LINE__, __FILE__); \
return GC_ERROR; \
} \
@ -48,12 +48,12 @@ gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size)
/* check system compatibility*/
if (gci_check_platform() == GC_ERROR) {
bh_printf("Check platform compatibility failed");
os_printf("Check platform compatibility failed");
return NULL;
}
if (buf_size < 1024) {
bh_printf("[GC_ERROR]heap_init_size(%d) < 1024", buf_size);
os_printf("[GC_ERROR]heap_init_size(%d) < 1024", buf_size);
return NULL;
}
@ -65,12 +65,12 @@ gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size)
ret = gct_vm_mutex_init(&heap->lock);
if (ret != BHT_OK) {
bh_printf("[GC_ERROR]failed to init lock ");
os_printf("[GC_ERROR]failed to init lock ");
return NULL;
}
#ifdef BH_FOOTPRINT
bh_printf("\nINIT HEAP 0x%08x %d\n", base_addr, heap_max_size);
os_printf("\nINIT HEAP 0x%08x %d\n", base_addr, heap_max_size);
#endif
/* init all data structures*/
@ -117,8 +117,8 @@ gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size)
&& HMU_FC_NORMAL_MAX_SIZE < q->size); /*@NOTIFY*/
#if BH_ENABLE_MEMORY_PROFILING != 0
bh_printf("heap is successfully initialized with max_size=%u.",
heap_max_size);
os_printf("heap is successfully initialized with max_size=%u.",
heap_max_size);
#endif
return heap;
}

View File

@ -4,7 +4,6 @@
*/
#include "mem_alloc.h"
#include "config.h"
#if DEFAULT_MEM_ALLOCATOR == MEM_ALLOCATOR_EMS
@ -41,7 +40,6 @@ void mem_allocator_free(mem_allocator_t allocator, void *ptr)
#else /* else of DEFAULT_MEM_ALLOCATOR */
#include "tlsf/tlsf.h"
#include "bh_thread.h"
typedef struct mem_allocator_tlsf {
tlsf_t tlsf;
@ -79,7 +77,7 @@ mem_allocator_create(void *mem, uint32_t size)
allocator_tlsf->tlsf = tlsf;
if (vm_mutex_init(&allocator_tlsf->lock)) {
if (os_mutex_init(&allocator_tlsf->lock)) {
printf("Create mem allocator failed: tlsf_malloc failed.\n");
tlsf_free(tlsf, allocator_tlsf);
tlsf_destroy(tlsf);
@ -95,7 +93,7 @@ mem_allocator_destroy(mem_allocator_t allocator)
mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
tlsf_t tlsf = allocator_tlsf->tlsf;
vm_mutex_destroy(&allocator_tlsf->lock);
os_mutex_destroy(&allocator_tlsf->lock);
tlsf_free(tlsf, allocator_tlsf);
tlsf_destroy(tlsf);
}
@ -110,9 +108,9 @@ mem_allocator_malloc(mem_allocator_t allocator, uint32_t size)
/* tlsf doesn't allow to allocate 0 byte */
size = 1;
vm_mutex_lock(&allocator_tlsf->lock);
os_mutex_lock(&allocator_tlsf->lock);
ret = tlsf_malloc(allocator_tlsf->tlsf, size);
vm_mutex_unlock(&allocator_tlsf->lock);
os_mutex_unlock(&allocator_tlsf->lock);
return ret;
}
@ -126,9 +124,9 @@ mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size)
/* tlsf doesn't allow to allocate 0 byte */
size = 1;
vm_mutex_lock(&allocator_tlsf->lock);
os_mutex_lock(&allocator_tlsf->lock);
ret = tlsf_realloc(allocator_tlsf->tlsf, ptr, size);
vm_mutex_unlock(&allocator_tlsf->lock);
os_mutex_unlock(&allocator_tlsf->lock);
return ret;
}
@ -137,9 +135,9 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr)
{
if (ptr) {
mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
vm_mutex_lock(&allocator_tlsf->lock);
os_mutex_lock(&allocator_tlsf->lock);
tlsf_free(allocator_tlsf->tlsf, ptr);
vm_mutex_unlock(&allocator_tlsf->lock);
os_mutex_unlock(&allocator_tlsf->lock);
}
}

View File

@ -0,0 +1,37 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef __MEM_ALLOC_H
#define __MEM_ALLOC_H
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void *mem_allocator_t;
mem_allocator_t
mem_allocator_create(void *mem, uint32_t size);
void
mem_allocator_destroy(mem_allocator_t allocator);
void *
mem_allocator_malloc(mem_allocator_t allocator, uint32_t size);
void *
mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size);
void
mem_allocator_free(mem_allocator_t allocator, void *ptr);
#ifdef __cplusplus
}
#endif
#endif /* #ifndef __MEM_ALLOC_H */