WebAssembly Micro Runtime first version

This commit is contained in:
Wang Xin
2019-05-07 10:18:18 +08:00
parent 15aa50914b
commit a75a5f0f41
252 changed files with 33487 additions and 0 deletions

View File

@ -0,0 +1,105 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "bh_memory.h"
#include "mem_alloc.h"
#include <stdio.h>
#include <stdlib.h>
#ifndef MALLOC_MEMORY_FROM_SYSTEM
typedef enum Memory_Mode {
MEMORY_MODE_UNKNOWN = 0, MEMORY_MODE_POOL, MEMORY_MODE_ALLOCATOR
} Memory_Mode;
static Memory_Mode memory_mode = MEMORY_MODE_UNKNOWN;
static mem_allocator_t pool_allocator = NULL;
static void *(*malloc_func)(unsigned int size) = NULL;
static void (*free_func)(void *ptr) = NULL;
int bh_memory_init_with_pool(void *mem, unsigned int bytes)
{
mem_allocator_t _allocator = mem_allocator_create(mem, bytes);
if (_allocator) {
memory_mode = MEMORY_MODE_POOL;
pool_allocator = _allocator;
return 0;
}
printf("Init memory with pool (%p, %u) failed.\n", mem, bytes);
return -1;
}
int bh_memory_init_with_allocator(void *_malloc_func, void *_free_func)
{
if (_malloc_func && _free_func && _malloc_func != _free_func) {
memory_mode = MEMORY_MODE_ALLOCATOR;
malloc_func = _malloc_func;
free_func = _free_func;
return 0;
}
printf("Init memory with allocator (%p, %p) failed.\n", _malloc_func,
_free_func);
return -1;
}
void bh_memory_destroy()
{
if (memory_mode == MEMORY_MODE_POOL)
mem_allocator_destroy(pool_allocator);
memory_mode = MEMORY_MODE_UNKNOWN;
}
void* bh_malloc(unsigned int size)
{
if (memory_mode == MEMORY_MODE_UNKNOWN) {
printf("bh_malloc failed: memory hasn't been initialize.\n");
return NULL;
} else if (memory_mode == MEMORY_MODE_POOL) {
return mem_allocator_malloc(pool_allocator, size);
} else {
return malloc_func(size);
}
}
void bh_free(void *ptr)
{
if (memory_mode == MEMORY_MODE_UNKNOWN) {
printf("bh_free failed: memory hasn't been initialize.\n");
} else if (memory_mode == MEMORY_MODE_POOL) {
mem_allocator_free(pool_allocator, ptr);
} else {
free_func(ptr);
}
}
#else /* else of MALLOC_MEMORY_FROM_SYSTEM */
void* bh_malloc(unsigned int size)
{
return malloc(size);
}
void bh_free(void *ptr)
{
if (ptr)
free(ptr);
}
#endif /* end of MALLOC_MEMORY_FROM_SYSTEM*/

View File

@ -0,0 +1,590 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ems_gc_internal.h"
#if !defined(NVALGRIND)
#include <valgrind/memcheck.h>
#endif
static int hmu_is_in_heap(gc_heap_t* heap, hmu_t* hmu)
{
return heap && hmu && (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size;
}
/* Remove a node from the tree it belongs to*/
/* @p can not be NULL*/
/* @p can not be the ROOT node*/
/* Node @p will be removed from the tree and left,right,parent pointers of node @p will be*/
/* set to be NULL. Other fields will not be touched.*/
/* The tree will be re-organized so that the order conditions are still satisified.*/
BH_STATIC void remove_tree_node(hmu_tree_node_t *p)
{
hmu_tree_node_t *q = NULL, **slot = NULL;
bh_assert(p);
bh_assert(p->parent); /* @p can not be the ROOT node*/
/* get the slot which holds pointer to node p*/
if (p == p->parent->right) {
slot = &p->parent->right;
} else {
bh_assert(p == p->parent->left); /* @p should be a child of its parent*/
slot = &p->parent->left;
}
/* algorithms used to remove node p*/
/* case 1: if p has no left child, replace p with its right child*/
/* case 2: if p has no right child, replace p with its left child*/
/* case 3: otherwise, find p's predecessor, remove it from the tree and replace p with it.*/
/* use predecessor can keep the left <= root < right condition.*/
if (!p->left) {
/* move right child up*/
*slot = p->right;
if (p->right)
p->right->parent = p->parent;
p->left = p->right = p->parent = NULL;
return;
}
if (!p->right) {
/* move left child up*/
*slot = p->left;
p->left->parent = p->parent; /* p->left can never be NULL.*/
p->left = p->right = p->parent = NULL;
return;
}
/* both left & right exist, find p's predecessor at first*/
q = p->left;
while (q->right)
q = q->right;
remove_tree_node(q); /* remove from the tree*/
*slot = q;
q->parent = p->parent;
q->left = p->left;
q->right = p->right;
if (q->left)
q->left->parent = q;
if (q->right)
q->right->parent = q;
p->left = p->right = p->parent = NULL;
}
static void unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
{
gc_size_t size;
bh_assert(gci_is_heap_valid(heap));
bh_assert(
hmu && (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size);
bh_assert(hmu_get_ut(hmu) == HMU_FC);
size = hmu_get_size(hmu);
if (HMU_IS_FC_NORMAL(size)) {
int node_idx = size >> 3;
hmu_normal_node_t* node = heap->kfc_normal_list[node_idx].next;
hmu_normal_node_t** p = &(heap->kfc_normal_list[node_idx].next);
while (node) {
if ((hmu_t*) node == hmu) {
*p = node->next;
break;
}
p = &(node->next);
node = node->next;
}
if (!node) {
printf("[GC_ERROR]couldn't find the node in the normal list");
}
} else {
remove_tree_node((hmu_tree_node_t *) hmu);
}
}
static void hmu_set_free_size(hmu_t *hmu)
{
gc_size_t size;
bh_assert(hmu && hmu_get_ut(hmu) == HMU_FC);
size = hmu_get_size(hmu);
*((int*) ((char*) hmu + size) - 1) = size;
}
/* Add free chunk back to KFC*/
/* @heap should not be NULL and it should be a valid heap*/
/* @hmu should not be NULL and it should be a HMU of length @size inside @heap*/
/* @hmu should be aligned to 8*/
/* @size should be positive and multiple of 8*/
/* @hmu with size @size will be added into KFC as a new FC.*/
void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
{
hmu_normal_node_t *np = NULL;
hmu_tree_node_t *root = NULL, *tp = NULL, *node = NULL;
int node_idx;
bh_assert(gci_is_heap_valid(heap));
bh_assert(
hmu && (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size);
bh_assert(((gc_uint32) hmu_to_obj(hmu) & 7) == 0);
bh_assert(
size > 0
&& ((gc_uint8*) hmu) + size
<= heap->base_addr + heap->current_size);
bh_assert(!(size & 7));
hmu_set_ut(hmu, HMU_FC);
hmu_set_size(hmu, size);
hmu_set_free_size(hmu);
if (HMU_IS_FC_NORMAL(size)) {
np = (hmu_normal_node_t*) hmu;
node_idx = size >> 3;
np->next = heap->kfc_normal_list[node_idx].next;
heap->kfc_normal_list[node_idx].next = np;
return;
}
/* big block*/
node = (hmu_tree_node_t*) hmu;
node->size = size;
node->left = node->right = node->parent = NULL;
/* find proper node to link this new node to*/
root = &heap->kfc_tree_root;
tp = root;
bh_assert(tp->size < size);
while (1) {
if (tp->size < size) {
if (!tp->right) {
tp->right = node;
node->parent = tp;
break;
}
tp = tp->right;
} else /* tp->size >= size*/
{
if (!tp->left) {
tp->left = node;
node->parent = tp;
break;
}
tp = tp->left;
}
}
}
/* Find a proper hmu for required memory size*/
/* @heap should not be NULL and it should be a valid heap*/
/* @size should cover the header and it should be 8 bytes aligned*/
/* GC will not be performed here.*/
/* Heap extension will not be performed here.*/
/* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
/* NULL will be returned if there are no proper HMU.*/
BH_STATIC hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
{
hmu_normal_node_t *node = NULL, *p = NULL;
int node_idx = 0, init_node_idx = 0;
hmu_tree_node_t *root = NULL, *tp = NULL, *last_tp = NULL;
hmu_t *next, *rest;
bh_assert(gci_is_heap_valid(heap));
bh_assert(size > 0 && !(size & 7));
if (size < GC_SMALLEST_SIZE)
size = GC_SMALLEST_SIZE;
/* check normal list at first*/
if (HMU_IS_FC_NORMAL(size)) {
/* find a non-empty slot in normal_node_list with good size*/
init_node_idx = (int) (size >> 3);
for (node_idx = init_node_idx; node_idx < HMU_NORMAL_NODE_CNT;
node_idx++) {
node = heap->kfc_normal_list + node_idx;
if (node->next)
break;
node = NULL;
}
/* not found in normal list*/
if (node) {
bh_assert(node_idx >= init_node_idx);
p = node->next;
node->next = p->next;
bh_assert(((gc_int32) hmu_to_obj(p) & 7) == 0);
if ((gc_size_t) node_idx
!= init_node_idx&& ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) { /* with bigger size*/
rest = (hmu_t*) (((char *) p) + size);
gci_add_fc(heap, rest, (node_idx << 3) - size);
hmu_mark_pinuse(rest);
} else {
size = node_idx << 3;
next = (hmu_t*) ((char*) p + size);
if (hmu_is_in_heap(heap, next))
hmu_mark_pinuse(next);
}
#if GC_STAT_DATA != 0
heap->total_free_size -= size;
if ((heap->current_size - heap->total_free_size)
> heap->highmark_size)
heap->highmark_size = heap->current_size
- heap->total_free_size;
#endif
hmu_set_size((hmu_t* ) p, size);
return (hmu_t*) p;
}
}
/* need to find a node in tree*/
root = &heap->kfc_tree_root;
/* find the best node*/
bh_assert(root);
tp = root->right;
while (tp) {
if (tp->size < size) {
tp = tp->right;
continue;
}
/* record the last node with size equal to or bigger than given size*/
last_tp = tp;
tp = tp->left;
}
if (last_tp) {
bh_assert(last_tp->size >= size);
/* alloc in last_p*/
/* remove node last_p from tree*/
remove_tree_node(last_tp);
if (last_tp->size >= size + GC_SMALLEST_SIZE) {
rest = (hmu_t*) ((char*) last_tp + size);
gci_add_fc(heap, rest, last_tp->size - size);
hmu_mark_pinuse(rest);
} else {
size = last_tp->size;
next = (hmu_t*) ((char*) last_tp + size);
if (hmu_is_in_heap(heap, next))
hmu_mark_pinuse(next);
}
#if GC_STAT_DATA != 0
heap->total_free_size -= size;
if ((heap->current_size - heap->total_free_size) > heap->highmark_size)
heap->highmark_size = heap->current_size - heap->total_free_size;
#endif
hmu_set_size((hmu_t* ) last_tp, size);
return (hmu_t*) last_tp;
}
return NULL;
}
/* Find a proper HMU for given size*/
/* @heap should not be NULL and it should be a valid heap*/
/* @size should cover the header and it should be 8 bytes aligned*/
/* This function will try several ways to satisfy the allocation request.*/
/* 1. Find a proper on available HMUs.*/
/* 2. GC will be triggered if 1 failed.*/
/* 3. Find a proper on available HMUS.*/
/* 4. Return NULL if 3 failed*/
/* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
/* NULL will be returned if there are no proper HMU.*/
BH_STATIC hmu_t* alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
{
hmu_t *ret = NULL;
bh_assert(gci_is_heap_valid(heap));
bh_assert(size > 0 && !(size & 7));
#ifdef GC_IN_EVERY_ALLOCATION
gci_gc_heap(heap);
ret = alloc_hmu(heap, size);
#else
# if GC_STAT_DATA != 0
if (heap->gc_threshold < heap->total_free_size)
ret = alloc_hmu(heap, size);
# else
ret = alloc_hmu(heap, size);
# endif
if (ret)
return ret;
/*gci_gc_heap(heap);*//* disable gc claim currently */
ret = alloc_hmu(heap, size);
#endif
return ret;
}
unsigned long g_total_malloc = 0;
unsigned long g_total_free = 0;
gc_object_t _gc_alloc_vo_i_heap(void *vheap,
gc_size_t size ALLOC_EXTRA_PARAMETERS)
{
gc_heap_t* heap = (gc_heap_t*) vheap;
hmu_t *hmu = NULL;
gc_object_t ret = (gc_object_t) NULL;
gc_size_t tot_size = 0;
/* align size*/
tot_size = GC_ALIGN_8(size + HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE); /* hmu header, prefix, suffix*/
if (tot_size < size)
return NULL;
gct_vm_mutex_lock(&heap->lock);
hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
goto FINISH;
g_total_malloc += tot_size;
hmu_set_ut(hmu, HMU_VO);
hmu_unfree_vo(hmu);
#if defined(GC_VERIFY)
hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
#endif
ret = hmu_to_obj(hmu);
#if BH_ENABLE_MEMORY_PROFILING != 0
printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
#endif
FINISH:
gct_vm_mutex_unlock(&heap->lock);
return ret;
}
/* see ems_gc.h for description*/
gc_object_t _gc_alloc_jo_i_heap(void *vheap,
gc_size_t size ALLOC_EXTRA_PARAMETERS)
{
gc_heap_t* heap = (gc_heap_t*) vheap;
gc_object_t ret = (gc_object_t) NULL;
hmu_t *hmu = NULL;
gc_size_t tot_size = 0;
bh_assert(gci_is_heap_valid(heap));
/* align size*/
tot_size = GC_ALIGN_8(size + HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE); /* hmu header, prefix, suffix*/
if (tot_size < size)
return NULL;
hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
goto FINISH;
/* reset all fields*/
memset((char*) hmu + sizeof(*hmu), 0, tot_size - sizeof(*hmu));
/* hmu->header = 0; */
hmu_set_ut(hmu, HMU_JO);
hmu_unmark_jo(hmu);
#if defined(GC_VERIFY)
hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
#endif
ret = hmu_to_obj(hmu);
#if BH_ENABLE_MEMORY_PROFILING != 0
printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
#endif
FINISH:
return ret;
}
/* Do some checking to see if given pointer is a possible valid heap*/
/* Return GC_TRUE if all checking passed*/
/* Return GC_FALSE otherwise*/
int gci_is_heap_valid(gc_heap_t *heap)
{
if (!heap)
return GC_FALSE;
if (heap->heap_id != (gc_handle_t) heap)
return GC_FALSE;
return GC_TRUE;
}
int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
{
gc_heap_t* heap = (gc_heap_t*) vheap;
hmu_t *hmu = NULL;
hmu_t *prev = NULL;
hmu_t *next = NULL;
gc_size_t size = 0;
hmu_type_t ut;
int ret = GC_SUCCESS;
if (!obj) {
return GC_SUCCESS;
}
hmu = obj_to_hmu(obj);
gct_vm_mutex_lock(&heap->lock);
if ((gc_uint8 *) hmu >= heap->base_addr
&& (gc_uint8 *) hmu < heap->base_addr + heap->current_size) {
#ifdef GC_VERIFY
hmu_verify(hmu);
#endif
ut = hmu_get_ut(hmu);
if (ut == HMU_VO) {
if (hmu_is_vo_freed(hmu)) {
bh_assert(0);
ret = GC_ERROR;
goto out;
}
size = hmu_get_size(hmu);
g_total_free += size;
#if GC_STAT_DATA != 0
heap->total_free_size += size;
#endif
#if BH_ENABLE_MEMORY_PROFILING != 0
printf("HEAP.FREE, heap: %p, size: %u\n",heap, size);
#endif
if (!hmu_get_pinuse(hmu)) {
prev = (hmu_t*) ((char*) hmu - *((int*) hmu - 1));
if (hmu_is_in_heap(heap, prev) && hmu_get_ut(prev) == HMU_FC) {
size += hmu_get_size(prev);
hmu = prev;
unlink_hmu(heap, prev);
}
}
next = (hmu_t*) ((char*) hmu + size);
if (hmu_is_in_heap(heap, next)) {
if (hmu_get_ut(next) == HMU_FC) {
size += hmu_get_size(next);
unlink_hmu(heap, next);
next = (hmu_t*) ((char*) hmu + size);
}
}
gci_add_fc(heap, hmu, size);
if (hmu_is_in_heap(heap, next)) {
hmu_unmark_pinuse(next);
}
} else {
ret = GC_ERROR;
goto out;
}
ret = GC_SUCCESS;
goto out;
}
out:
gct_vm_mutex_unlock(&heap->lock);
return ret;
}
void gc_dump_heap_stats(gc_heap_t *heap)
{
printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
printf(
"total malloc: totalfree: %u, current: %u, highmark: %u, gc cnt: %u\n",
heap->total_free_size, heap->current_size, heap->highmark_size,
heap->total_gc_count);
printf("g_total_malloc=%lu, g_total_free=%lu, occupied=%lu\n",
g_total_malloc, g_total_free, g_total_malloc - g_total_free);
}
#ifdef GC_TEST
void gci_dump(char* buf, gc_heap_t *heap)
{
hmu_t *cur = NULL, *end = NULL;
hmu_type_t ut;
gc_size_t size;
int i = 0;
int p;
char inuse;
int mark;
cur = (hmu_t*)heap->base_addr;
end = (hmu_t*)((char*)heap->base_addr + heap->current_size);
while(cur < end)
{
ut = hmu_get_ut(cur);
size = hmu_get_size(cur);
p = hmu_get_pinuse(cur);
mark = hmu_is_jo_marked (cur);
if(ut == HMU_VO)
inuse = 'V';
else if(ut == HMU_JO)
inuse = hmu_is_jo_marked(cur) ? 'J' : 'j';
else if(ut == HMU_FC)
inuse = 'F';
bh_assert(size > 0);
buf += sprintf(buf, "#%d %08x %x %x %d %c %d\n", i, (char*) cur - (char*) heap->base_addr, ut, p, mark, inuse, hmu_obj_size(size));
cur = (hmu_t*)((char *)cur + size);
i++;
}
bh_assert(cur == end);
}
#endif

View File

@ -0,0 +1,341 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file ems_gc.h
* @date Wed Aug 3 10:46:38 2011
*
* @brief This file defines GC modules types and interfaces.
*
*
*/
#ifndef _EMS_GC_H
#define _EMS_GC_H
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
/*Pre-compile configuration can be done here or on Makefiles*/
/*#define GC_EMBEDDED or GC_STANDALONE*/
/*#define GC_DEBUG*/
/*#define GC_TEST // TEST mode is a sub-mode of STANDALONE*/
/* #define GC_ALLOC_TRACE */
/* #define GC_STAT */
#ifndef GC_STAT_DATA
#define GC_STAT_DATA 1
#endif
#define GC_HEAD_PADDING 4
/* Standalone GC is used for testing.*/
#ifndef GC_EMBEDDED
# ifndef GC_STANDALONE
# define GC_STANDALONE
# endif
#endif
#if defined(GC_EMBEDDED) && defined(GC_STANDALONE)
# error "Can not define GC_EMBEDDED and GC_STANDALONE at the same time"
#endif
#ifdef BH_TEST
# ifndef GC_TEST
# define GC_TEST
# endif
#endif
#ifdef BH_DEBUG
/*instrument mode ignore GC_DEBUG feature, for instrument testing gc_alloc_vo_i_heap only has func_name parameter*/
#if !defined INSTRUMENT_TEST_ENABLED && !defined GC_DEBUG
# define GC_DEBUG
#endif
#endif
#if defined(GC_EMBEDDED) && defined(GC_TEST)
# error "Can not defined GC_EMBEDDED and GC_TEST at the same time"
#endif
typedef void *gc_handle_t;
typedef void *gc_object_t;
#define NULL_REF ((gc_object_t)NULL)
#define GC_SUCCESS (0)
#define GC_ERROR (-1)
#define GC_TRUE (1)
#define GC_FALSE (0)
#define GC_MAX_HEAP_SIZE (256 * BH_KB)
typedef int64 gc_int64;
typedef unsigned int gc_uint32;
typedef signed int gc_int32;
typedef unsigned short gc_uint16;
typedef signed short gc_int16;
typedef unsigned char gc_uint8;
typedef signed char gc_int8;
typedef gc_uint32 gc_size_t;
typedef enum {
MMT_SHARED = 0,
MMT_INSTANCE = 1,
MMT_APPMANAGER = MMT_SHARED,
MMT_VERIFIER = MMT_SHARED,
MMT_JHI = MMT_SHARED,
MMT_LOADER = MMT_SHARED,
MMT_APPLET = MMT_INSTANCE,
MMT_INTERPRETER = MMT_INSTANCE
} gc_mm_t;
#ifdef GC_STAT
#define GC_HEAP_STAT_SIZE (128 / 4)
typedef struct {
int usage;
int usage_block;
int vo_usage;
int jo_usage;
int free;
int free_block;
int vo_free;
int jo_free;
int usage_sizes[GC_HEAP_STAT_SIZE];
int free_sizes[GC_HEAP_STAT_SIZE];
}gc_stat_t;
extern void gc_heap_stat(void* heap, gc_stat_t* gc_stat);
extern void __gc_print_stat(void *heap, int verbose);
#define gc_print_stat __gc_print_stat
#else
#define gc_print_stat(heap, verbose)
#endif
#if GC_STAT_DATA != 0
typedef enum {
GC_STAT_TOTAL = 0,
GC_STAT_FREE,
GC_STAT_HIGHMARK,
GC_STAT_COUNT,
GC_STAT_TIME,
GC_STAT_MAX_1,
GC_STAT_MAX_2,
GC_STAT_MAX_3,
GC_STAT_MAX
} GC_STAT_INDEX;
#endif
/*////////////// Exported APIs*/
/**
* GC initialization from a buffer
*
* @param buf the buffer to be initialized to a heap
* @param buf_size the size of buffer
*
* @return gc handle if success, NULL otherwise
*/
extern gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size);
/**
* Destroy heap which is initilized from a buffer
*
* @param handle handle to heap needed destory
*
* @return GC_SUCCESS if success
* GC_ERROR for bad parameters or failed system resource freeing.
*/
extern int gc_destroy_with_pool(gc_handle_t handle);
#if GC_STAT_DATA != 0
/**
* Get Heap Stats
*
* @param stats [out] integer array to save heap stats
* @param size [in] the size of stats
* @param mmt [in] type of heap, MMT_SHARED or MMT_INSTANCE
*/
extern void* gc_heap_stats(void *heap, int* stats, int size, gc_mm_t mmt);
/**
* Set GC threshold factor
*
* @param heap [in] the heap to set
* @param factor [in] the threshold size is free_size * factor / 1000
*
* @return GC_SUCCESS if success.
*/
extern int gc_set_threshold_factor(void *heap, unsigned int factor);
#endif
/*////// Allocate heap object*/
/* There are two versions of allocate functions. The functions with _i suffix should be only used*/
/* internally. Functions without _i suffix are just wrappers with the corresponded functions with*/
/* _i suffix. Allocation operation code position are record under DEBUG model for debugging.*/
#ifdef GC_DEBUG
# define ALLOC_EXTRA_PARAMETERS ,const char*file_name,int line_number
# define ALLOC_EXTRA_ARGUMENTS , __FILE__, __LINE__
# define ALLOC_PASSDOWN_EXTRA_ARGUMENTS , file_name, line_number
# define gc_alloc_vo_h(heap, size) gc_alloc_vo_i_heap(heap, size, __FILE__, __LINE__)
# define gc_free_h(heap, obj) gc_free_i_heap(heap, obj, __FILE__, __LINE__)
#else
# define ALLOC_EXTRA_PARAMETERS
# define ALLOC_EXTRA_ARGUMENTS
# define ALLOC_PASSDOWN_EXTRA_ARGUMENTS
# define gc_alloc_vo_h gc_alloc_vo_i_heap
# define gc_free_h gc_free_i_heap
#endif
/**
* Invoke a GC
*
* @param heap
*
* @return GC_SUCCESS if success
*/
extern int gci_gc_heap(void *heap);
/**
* Allocate VM Object in specific heap.
*
* @param heap heap to allocate.
* @param size bytes to allocate.
*
* @return pointer to VM object allocated
* NULL if failed.
*/
extern gc_object_t _gc_alloc_vo_i_heap(void *heap,
gc_size_t size ALLOC_EXTRA_PARAMETERS);
extern gc_object_t _gc_alloc_jo_i_heap(void *heap,
gc_size_t size ALLOC_EXTRA_PARAMETERS);
#ifdef INSTRUMENT_TEST_ENABLED
extern gc_object_t gc_alloc_vo_i_heap_instr(void *heap, gc_size_t size, const char* func_name );
extern gc_object_t gc_alloc_jo_i_heap_instr(void *heap, gc_size_t size, const char* func_name);
# define gc_alloc_vo_i_heap(heap, size) gc_alloc_vo_i_heap_instr(heap, size, __FUNCTION__)
# define gc_alloc_jo_i_heap(heap, size) gc_alloc_jo_i_heap_instr(heap, size, __FUNCTION__)
#else
# define gc_alloc_vo_i_heap _gc_alloc_vo_i_heap
# define gc_alloc_jo_i_heap _gc_alloc_jo_i_heap
#endif
/**
* Allocate Java object in specific heap.
*
* @param heap heap to allocate.
* @param size bytes to allocate.
*
* @return pointer to Java object allocated
* NULL if failed.
*/
extern gc_object_t _gc_alloc_jo_i_heap(void *heap,
gc_size_t size ALLOC_EXTRA_PARAMETERS);
/**
* Free VM object
*
* @param heap heap to free.
* @param obj pointer to object need free.
*
* @return GC_SUCCESS if success
*/
extern int gc_free_i_heap(void *heap, gc_object_t obj ALLOC_EXTRA_PARAMETERS);
/**
* Add ref to rootset of gc for current instance.
*
* @param obj pointer to real load of a valid Java object managed by gc for current instance.
*
* @return GC_SUCCESS if success.
* GC_ERROR for invalid parameters.
*/
extern int gc_add_root(void* heap, gc_object_t obj);
/*////////////// Imported APIs which should be implemented in other components*/
/*////// Java object layout related APIs*/
/**
* Get Java object size from corresponding VM module
*
* @param obj pointer to the real load of a Java object.
*
* @return size of java object.
*/
extern gc_size_t vm_get_java_object_size(gc_object_t obj);
/**
* Get reference list of this object
*
* @param obj [in] pointer to java object.
* @param is_compact_mode [in] indicate the java object mode. GC_TRUE or GC_FALSE.
* @param ref_num [out] the size of ref_list.
* @param ref_list [out] if is_compact_mode is GC_FALSE, this parameter will be set to a list of offset.
* @param ref_start_offset [out] If is_compact_mode is GC_TRUE, this parameter will be set to the start offset of the references in this object.
*
* @return GC_SUCCESS if success.
* GC_ERROR when error occurs.
*/
extern int vm_get_java_object_ref_list(gc_object_t obj, int *is_compact_mode,
gc_size_t *ref_num, gc_uint16 **ref_list, gc_uint32 *ref_start_offset);
/**
* Get gc handle for current instance
*
*
* @return instance heap handle.
*/
extern gc_handle_t app_manager_get_cur_applet_heap(void);
/**
* Begin current instance heap rootset enumeration
*
*
* @return GC_SUCCESS if success.
* GC_ERROR when error occurs.
*/
extern int vm_begin_rootset_enumeration(void *heap);
#ifdef _INSTRUMENT_TEST_ENABLED
extern int vm_begin_rootset_enumeration_instr(void *heap, const char*func_name);
#define vm_begin_rootset_enumeration(heap) vm_begin_rootset_enumeration_instr(heap, __FUNCTION__)
#else
#define vm_begin_rootset_enumeration _vm_begin_rootset_enumeration
#endif /* INSTUMENT_TEST_ENABLED*/
#ifndef offsetof
#define offsetof(Type, field) ((size_t)(&((Type *)0)->field))
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,282 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _EMS_GC_INTERNAL_H
#define _EMS_GC_INTERNAL_H
#ifdef __cplusplus
extern "C" {
#endif
#include "bh_thread.h"
#include "bh_memory.h"
#include "bh_assert.h"
#include "ems_gc.h"
/* basic block managed by EMS gc is the so-called HMU (heap memory unit)*/
typedef enum _hmu_type_enum
{
HMU_TYPE_MIN = 0,
HMU_TYPE_MAX = 3,
HMU_JO = 3,
HMU_VO = 2,
HMU_FC = 1,
HMU_FM = 0
}hmu_type_t;
typedef struct _hmu_struct
{
gc_uint32 header;
}hmu_t;
#if defined(GC_VERIFY)
#define GC_OBJECT_PREFIX_PADDING_CNT 3
#define GC_OBJECT_SUFFIX_PADDING_CNT 4
#define GC_OBJECT_PADDING_VALUE (0x12345678)
typedef struct _gc_object_prefix
{
const char *file_name;
gc_int32 line_no;
gc_int32 size;
gc_uint32 padding[GC_OBJECT_PREFIX_PADDING_CNT];
}gc_object_prefix_t;
#define OBJ_PREFIX_SIZE (sizeof(gc_object_prefix_t))
typedef struct _gc_object_suffix
{
gc_uint32 padding[GC_OBJECT_SUFFIX_PADDING_CNT];
}gc_object_suffix_t;
#define OBJ_SUFFIX_SIZE (sizeof(gc_object_suffix_t))
extern void hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size, const char *file_name, int line_no);
extern void hmu_verify(hmu_t *hmu);
#define SKIP_OBJ_PREFIX(p) ((void*)((gc_uint8*)(p) + OBJ_PREFIX_SIZE))
#define SKIP_OBJ_SUFFIX(p) ((void*)((gc_uint8*)(p) + OBJ_SUFFIX_SIZE))
#define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
#else
#define OBJ_PREFIX_SIZE 0
#define OBJ_SUFFIX_SIZE 0
#define SKIP_OBJ_PREFIX(p) ((void*)((gc_uint8*)(p) + OBJ_PREFIX_SIZE))
#define SKIP_OBJ_SUFFIX(p) ((void*)((gc_uint8*)(p) + OBJ_SUFFIX_SIZE))
#define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
#endif /* GC_DEBUG*/
#define hmu_obj_size(s) ((s)-OBJ_EXTRA_SIZE)
#define GC_ALIGN_8(s) (((int)(s) + 7) & ~7)
#define GC_SMALLEST_SIZE GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + 8)
#define GC_GET_REAL_SIZE(x) GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + (((x) > 8) ? (x): 8))
/*////// functions for bit operation*/
#define SETBIT(v, offset) (v) |= (1 << (offset))
#define GETBIT(v, offset) ((v) & (1 << (offset)) ? 1 : 0)
#define CLRBIT(v, offset) (v) &= ~(1 << (offset))
#define SETBITS(v, offset, size, value) do { \
(v) &= ~(((1 << size) - 1) << offset); \
(v) |= value << offset; \
} while(0)
#define CLRBITS(v, offset, size) (v) &= ~(((1 << size) - 1) << offset)
#define GETBITS(v, offset, size) (((v) & (((1 << size) - 1) << offset)) >> offset)
/*////// gc object layout definition*/
#define HMU_SIZE (sizeof(hmu_t))
#define hmu_to_obj(hmu) (gc_object_t)(SKIP_OBJ_PREFIX((hmu_t*) (hmu) + 1))
#define obj_to_hmu(obj) ((hmu_t *)((gc_uint8*)(obj) - OBJ_PREFIX_SIZE) - 1)
#define HMU_UT_SIZE 2
#define HMU_UT_OFFSET 30
#define hmu_get_ut(hmu) GETBITS ((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE)
#define hmu_set_ut(hmu, type) SETBITS ((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE, type)
#define hmu_is_ut_valid(tp) (tp >= HMU_TYPE_MIN && tp <= HMU_TYPE_MAX)
/* P in use bit means the previous chunk is in use */
#define HMU_P_OFFSET 29
#define hmu_mark_pinuse(hmu) SETBIT ((hmu)->header, HMU_P_OFFSET)
#define hmu_unmark_pinuse(hmu) CLRBIT ((hmu)->header, HMU_P_OFFSET)
#define hmu_get_pinuse(hmu) GETBIT ((hmu)->header, HMU_P_OFFSET)
#define HMU_JO_VT_SIZE 27
#define HMU_JO_VT_OFFSET 0
#define HMU_JO_MB_OFFSET 28
#define hmu_mark_jo(hmu) SETBIT ((hmu)->header, HMU_JO_MB_OFFSET)
#define hmu_unmark_jo(hmu) CLRBIT ((hmu)->header, HMU_JO_MB_OFFSET)
#define hmu_is_jo_marked(hmu) GETBIT ((hmu)->header, HMU_JO_MB_OFFSET)
#define HMU_SIZE_SIZE 27
#define HMU_SIZE_OFFSET 0
#define HMU_VO_FB_OFFSET 28
#define hmu_is_vo_freed(hmu) GETBIT ((hmu)->header, HMU_VO_FB_OFFSET)
#define hmu_unfree_vo(hmu) CLRBIT ((hmu)->header, HMU_VO_FB_OFFSET)
#define hmu_get_size(hmu) GETBITS ((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE)
#define hmu_set_size(hmu, size) SETBITS ((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE, size)
/*////// HMU free chunk management*/
#define HMU_NORMAL_NODE_CNT 32
#define HMU_FC_NORMAL_MAX_SIZE ((HMU_NORMAL_NODE_CNT - 1) << 3)
#define HMU_IS_FC_NORMAL(size) ((size) < HMU_FC_NORMAL_MAX_SIZE)
#if HMU_FC_NORMAL_MAX_SIZE >= GC_MAX_HEAP_SIZE
# error "Too small GC_MAX_HEAP_SIZE"
#endif
typedef struct _hmu_normal_node
{
hmu_t hmu_header;
struct _hmu_normal_node *next;
}hmu_normal_node_t;
typedef struct _hmu_tree_node
{
hmu_t hmu_header;
gc_size_t size;
struct _hmu_tree_node *left;
struct _hmu_tree_node *right;
struct _hmu_tree_node *parent;
}hmu_tree_node_t;
typedef struct _gc_heap_struct
{
gc_handle_t heap_id; /* for double checking*/
gc_uint8 *base_addr;
gc_size_t current_size;
gc_size_t max_size;
korp_mutex lock;
hmu_normal_node_t kfc_normal_list[HMU_NORMAL_NODE_CNT];
/* order in kfc_tree is: size[left] <= size[cur] < size[right]*/
hmu_tree_node_t kfc_tree_root;
/* for rootset enumeration of private heap*/
void *root_set;
/* whether the fast mode of marking process that requires
additional memory fails. When the fast mode fails, the
marking process can still be done in the slow mode, which
doesn't need additional memory (by walking through all
blocks and marking sucessors of marked nodes until no new
node is marked). TODO: slow mode is not implemented. */
unsigned is_fast_marking_failed : 1;
#if GC_STAT_DATA != 0
gc_size_t highmark_size;
gc_size_t init_size;
gc_size_t total_gc_count;
gc_size_t total_free_size;
gc_size_t gc_threshold;
gc_size_t gc_threshold_factor;
gc_int64 total_gc_time;
#endif
}gc_heap_t;
/*////// MISC internal used APIs*/
extern void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size);
extern int gci_is_heap_valid(gc_heap_t *heap);
#ifdef GC_DEBUG
extern void gci_verify_heap(gc_heap_t *heap);
extern void gci_dump(char* buf, gc_heap_t *heap);
#endif
#if GC_STAT_DATA != 0
/* the default GC threshold size is free_size * GC_DEFAULT_THRESHOLD_FACTOR / 1000 */
#define GC_DEFAULT_THRESHOLD_FACTOR 400
static inline void gc_update_threshold(gc_heap_t *heap)
{
heap->gc_threshold = heap->total_free_size * heap->gc_threshold_factor / 1000;
}
#endif
/*////// MISC data structures*/
#define MARK_NODE_OBJ_CNT 256
/* mark node is used for gc marker*/
typedef struct _mark_node_struct
{
/* number of to-expand objects can be saved in this node*/
gc_size_t cnt;
/* the first unused index*/
int idx;
/* next node on the node list*/
struct _mark_node_struct *next;
/* the actual to-expand objects list*/
gc_object_t set[MARK_NODE_OBJ_CNT];
}mark_node_t;
/*////// Imported APIs wrappers under TEST mode*/
#ifdef GC_TEST
extern int (*gct_vm_get_java_object_ref_list)(
gc_object_t obj,
int *is_compact_mode, /* can be set to GC_TRUE, or GC_FALSE */
gc_size_t *ref_num,
gc_uint16 **ref_list,
gc_uint32 *ref_start_offset);
extern int (*gct_vm_mutex_init)(korp_mutex *mutex);
extern int (*gct_vm_mutex_destroy)(korp_mutex *mutex);
extern int (*gct_vm_mutex_lock)(korp_mutex *mutex);
extern int (*gct_vm_mutex_unlock)(korp_mutex *mutex);
extern gc_handle_t (*gct_vm_get_gc_handle_for_current_instance)(void);
extern int (*gct_vm_begin_rootset_enumeration)(void* heap);
extern int (*gct_vm_gc_finished)(void);
#else
#define gct_vm_get_java_object_ref_list bh_get_java_object_ref_list
#define gct_vm_mutex_init vm_mutex_init
#define gct_vm_mutex_destroy vm_mutex_destroy
#define gct_vm_mutex_lock vm_mutex_lock
#define gct_vm_mutex_unlock vm_mutex_unlock
#define gct_vm_get_gc_handle_for_current_instance app_manager_get_cur_applet_heap
#define gct_vm_begin_rootset_enumeration vm_begin_rootset_enumeration
#define gct_vm_gc_finished jeff_runtime_gc_finished
#endif
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,98 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ems_gc_internal.h"
#if defined(GC_VERIFY)
/* Set default value to prefix and suffix*/
/* @hmu should not be NULL and it should have been correctly initilized (except for prefix and suffix part)*/
/* @tot_size is offered here because hmu_get_size can not be used till now. @tot_size should not be smaller than OBJ_EXTRA_SIZE.*/
/* For VO, @tot_size should be equal to object total size.*/
void hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size, const char *file_name, int line_no)
{
gc_object_prefix_t *prefix = NULL;
gc_object_suffix_t *suffix = NULL;
gc_uint32 i = 0;
bh_assert(hmu);
bh_assert(hmu_get_ut(hmu) == HMU_JO || hmu_get_ut(hmu) == HMU_VO);
bh_assert(tot_size >= OBJ_EXTRA_SIZE);
bh_assert(!(tot_size & 7));
bh_assert(hmu_get_ut(hmu) != HMU_VO || hmu_get_size(hmu) >= tot_size);
prefix = (gc_object_prefix_t *)(hmu + 1);
suffix = (gc_object_suffix_t *)((gc_uint8*)hmu + tot_size - OBJ_SUFFIX_SIZE);
prefix->file_name = file_name;
prefix->line_no = line_no;
prefix->size = tot_size;
for(i = 0;i < GC_OBJECT_PREFIX_PADDING_CNT;i++)
{
prefix->padding[i] = GC_OBJECT_PADDING_VALUE;
}
for(i = 0;i < GC_OBJECT_SUFFIX_PADDING_CNT;i++)
{
suffix->padding[i] = GC_OBJECT_PADDING_VALUE;
}
}
void hmu_verify(hmu_t *hmu)
{
gc_object_prefix_t *prefix = NULL;
gc_object_suffix_t *suffix = NULL;
gc_uint32 i = 0;
hmu_type_t ut;
gc_size_t size = 0;
int is_padding_ok = 1;
bh_assert(hmu);
ut = hmu_get_ut(hmu);
bh_assert(hmu_is_ut_valid(ut));
prefix = (gc_object_prefix_t *)(hmu + 1);
size = prefix->size;
suffix = (gc_object_suffix_t *)((gc_uint8*)hmu + size - OBJ_SUFFIX_SIZE);
if(ut == HMU_VO || ut == HMU_JO)
{
/* check padding*/
for(i = 0;i < GC_OBJECT_PREFIX_PADDING_CNT;i++)
{
if(prefix->padding[i] != GC_OBJECT_PADDING_VALUE)
{
is_padding_ok = 0;
break;
}
}
for(i = 0;i < GC_OBJECT_SUFFIX_PADDING_CNT;i++)
{
if(suffix->padding[i] != GC_OBJECT_PADDING_VALUE)
{
is_padding_ok = 0;
break;
}
}
if(!is_padding_ok)
{
printf("Invalid padding for object created at %s:%d",
(prefix->file_name ? prefix->file_name : ""), prefix->line_no);
}
bh_assert(is_padding_ok);
}
}
#endif

View File

@ -0,0 +1,196 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ems_gc_internal.h"
#if !defined(NVALGRIND)
#include <valgrind/memcheck.h>
#endif
#define HEAP_INC_FACTOR 1
/* Check if current platform is compatible with current GC design*/
/* Return GC_ERROR if not;*/
/* Return GC_SUCCESS otherwise.*/
int gci_check_platform()
{
#define CHECK(x, y) do { \
if((x) != (y)) { \
printf("Platform checking failed on LINE %d at FILE %s.", \
__LINE__, __FILE__); \
return GC_ERROR; \
} \
} while(0)
CHECK(8, sizeof(gc_int64));
CHECK(4, sizeof(gc_uint32));
CHECK(4, sizeof(gc_int32));
CHECK(2, sizeof(gc_uint16));
CHECK(2, sizeof(gc_int16));
CHECK(1, sizeof(gc_int8));
CHECK(1, sizeof(gc_uint8));
CHECK(4, sizeof(gc_size_t));
CHECK(4, sizeof(void *));
return GC_SUCCESS;
}
gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size)
{
char *buf_end = buf + buf_size;
char *buf_aligned = (char*) (((uintptr_t) buf + 7) & ~7);
char *base_addr = buf_aligned + sizeof(gc_heap_t);
gc_heap_t *heap = (gc_heap_t*) buf_aligned;
gc_size_t heap_max_size;
hmu_normal_node_t *p = NULL;
hmu_tree_node_t *root = NULL, *q = NULL;
int i = 0, ret;
/* check system compatibility*/
if (gci_check_platform() == GC_ERROR) {
printf("Check platform compatibility failed");
return NULL;
}
if (buf_size < 1024) {
printf("[GC_ERROR]heap_init_size(%d) < 1024", buf_size);
return NULL;
}
base_addr = (char*) (((uintptr_t) base_addr + 7) & ~7) + GC_HEAD_PADDING;
heap_max_size = (buf_end - base_addr) & ~7;
memset(heap, 0, sizeof *heap);
memset(base_addr, 0, heap_max_size);
ret = gct_vm_mutex_init(&heap->lock);
if (ret != BHT_OK) {
printf("[GC_ERROR]failed to init lock ");
return NULL;
}
#ifdef BH_FOOTPRINT
printf("\nINIT HEAP 0x%08x %d\n", base_addr, heap_max_size);
#endif
/* init all data structures*/
heap->max_size = heap_max_size;
heap->current_size = heap_max_size;
heap->base_addr = (gc_uint8*) base_addr;
heap->heap_id = (gc_handle_t) heap;
#if GC_STAT_DATA != 0
heap->total_free_size = heap->current_size;
heap->highmark_size = 0;
heap->total_gc_count = 0;
heap->total_gc_time = 0;
heap->gc_threshold_factor = GC_DEFAULT_THRESHOLD_FACTOR;
gc_update_threshold(heap);
#endif
for (i = 0; i < HMU_NORMAL_NODE_CNT; i++) {
/* make normal node look like a FC*/
p = &heap->kfc_normal_list[i];
memset(p, 0, sizeof *p);
hmu_set_ut(&p->hmu_header, HMU_FC);
hmu_set_size(&p->hmu_header, sizeof *p);
}
root = &heap->kfc_tree_root;
memset(root, 0, sizeof *root);
root->size = sizeof *root;
hmu_set_ut(&root->hmu_header, HMU_FC);
hmu_set_size(&root->hmu_header, sizeof *root);
q = (hmu_tree_node_t *) heap->base_addr;
memset(q, 0, sizeof *q);
hmu_set_ut(&q->hmu_header, HMU_FC);
hmu_set_size(&q->hmu_header, heap->current_size);
hmu_mark_pinuse(&q->hmu_header);
root->right = q;
q->parent = root;
q->size = heap->current_size;
bh_assert(
root->size <= HMU_FC_NORMAL_MAX_SIZE
&& HMU_FC_NORMAL_MAX_SIZE < q->size); /*@NOTIFY*/
#if BH_ENABLE_MEMORY_PROFILING != 0
printf("heap is successfully initialized with max_size=%u.",
heap_max_size);
#endif
return heap;
}
int gc_destroy_with_pool(gc_handle_t handle)
{
gc_heap_t *heap = (gc_heap_t *) handle;
gct_vm_mutex_destroy(&heap->lock);
memset(heap->base_addr, 0, heap->max_size);
memset(heap, 0, sizeof(gc_heap_t));
return GC_SUCCESS;
}
#if defined(GC_VERIFY)
/* Verify heap integrity*/
/* @heap should not be NULL and it should be a valid heap*/
void gci_verify_heap(gc_heap_t *heap)
{
hmu_t *cur = NULL, *end = NULL;
bh_assert(heap && gci_is_heap_valid(heap));
cur = (hmu_t *)heap->base_addr;
end = (hmu_t *)(heap->base_addr + heap->current_size);
while(cur < end)
{
hmu_verify(cur);
cur = (hmu_t *)((gc_uint8*)cur + hmu_get_size(cur));
}
bh_assert(cur == end);
}
#endif
void* gc_heap_stats(void *heap_arg, int* stats, int size, gc_mm_t mmt)
{
(void) mmt;
int i;
gc_heap_t *heap = (gc_heap_t *) heap_arg;
for (i = 0; i < size; i++) {
switch (i) {
case GC_STAT_TOTAL:
stats[i] = heap->current_size;
break;
case GC_STAT_FREE:
stats[i] = heap->total_free_size;
break;
case GC_STAT_HIGHMARK:
stats[i] = heap->highmark_size;
break;
case GC_STAT_COUNT:
stats[i] = heap->total_gc_count;
break;
case GC_STAT_TIME:
stats[i] = (int) heap->total_gc_time;
break;
default:
break;
}
}
return heap;
}

View File

@ -0,0 +1,136 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "mem_alloc.h"
#include "config.h"
#if DEFAULT_MEM_ALLOCATOR == MEM_ALLOCATOR_EMS
#include "ems/ems_gc.h"
mem_allocator_t mem_allocator_create(void *mem, uint32_t size)
{
return gc_init_with_pool((char *) mem, size);
}
void mem_allocator_destroy(mem_allocator_t allocator)
{
gc_destroy_with_pool((gc_handle_t) allocator);
}
void *
mem_allocator_malloc(mem_allocator_t allocator, uint32_t size)
{
return gc_alloc_vo_h((gc_handle_t) allocator, size);
}
void mem_allocator_free(mem_allocator_t allocator, void *ptr)
{
if (ptr)
gc_free_h((gc_handle_t) allocator, ptr);
}
#else /* else of DEFAULT_MEM_ALLOCATOR */
#include "tlsf/tlsf.h"
#include "bh_thread.h"
typedef struct mem_allocator_tlsf {
tlsf_t tlsf;
korp_mutex lock;
}mem_allocator_tlsf;
mem_allocator_t
mem_allocator_create(void *mem, uint32_t size)
{
mem_allocator_tlsf *allocator_tlsf;
tlsf_t tlsf;
char *mem_aligned = (char*)(((uintptr_t)mem + 3) & ~3);
if (size < 1024) {
printf("Create mem allocator failed: pool size must be "
"at least 1024 bytes.\n");
return NULL;
}
size -= mem_aligned - (char*)mem;
mem = (void*)mem_aligned;
tlsf = tlsf_create_with_pool(mem, size);
if (!tlsf) {
printf("Create mem allocator failed: tlsf_create_with_pool failed.\n");
return NULL;
}
allocator_tlsf = tlsf_malloc(tlsf, sizeof(mem_allocator_tlsf));
if (!allocator_tlsf) {
printf("Create mem allocator failed: tlsf_malloc failed.\n");
tlsf_destroy(tlsf);
return NULL;
}
allocator_tlsf->tlsf = tlsf;
if (vm_mutex_init(&allocator_tlsf->lock)) {
printf("Create mem allocator failed: tlsf_malloc failed.\n");
tlsf_free(tlsf, allocator_tlsf);
tlsf_destroy(tlsf);
return NULL;
}
return allocator_tlsf;
}
void
mem_allocator_destroy(mem_allocator_t allocator)
{
mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
tlsf_t tlsf = allocator_tlsf->tlsf;
vm_mutex_destroy(&allocator_tlsf->lock);
tlsf_free(tlsf, allocator_tlsf);
tlsf_destroy(tlsf);
}
void *
mem_allocator_malloc(mem_allocator_t allocator, uint32_t size)
{
void *ret;
mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
if (size == 0)
/* tlsf doesn't allow to allocate 0 byte */
size = 1;
vm_mutex_lock(&allocator_tlsf->lock);
ret = tlsf_malloc(allocator_tlsf->tlsf, size);
vm_mutex_unlock(&allocator_tlsf->lock);
return ret;
}
void
mem_allocator_free(mem_allocator_t allocator, void *ptr)
{
if (ptr) {
mem_allocator_tlsf *allocator_tlsf = (mem_allocator_tlsf *)allocator;
vm_mutex_lock(&allocator_tlsf->lock);
tlsf_free(allocator_tlsf->tlsf, ptr);
vm_mutex_unlock(&allocator_tlsf->lock);
}
}
#endif /* end of DEFAULT_MEM_ALLOCATOR */

View File

@ -0,0 +1,27 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set (MEM_ALLOC_DIR ${CMAKE_CURRENT_LIST_DIR})
include_directories(${MEM_ALLOC_DIR})
file (GLOB_RECURSE source_all
${MEM_ALLOC_DIR}/ems/*.c
${MEM_ALLOC_DIR}/tlsf/*.c
${MEM_ALLOC_DIR}/mem_alloc.c
${MEM_ALLOC_DIR}/bh_memory.c)
set (MEM_ALLOC_SHARED_SOURCE ${source_all})