re-org ems mem allocator source codes, update prot_wamr.md (#217)

This commit is contained in:
Xu Jun 2020-03-30 11:06:39 +08:00 committed by GitHub
parent 31feaa0a88
commit d9890d2ccb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
11 changed files with 365 additions and 654 deletions

View File

@ -118,7 +118,10 @@ enum {
#endif
/* Heap and stack profiling */
#define BEIHAI_ENABLE_MEMORY_PROFILING 0
#define BH_ENABLE_MEMORY_PROFILING 0
/* Heap verification */
#define BH_ENABLE_GC_VERIFY 0
/* Max app number of all modules */
#define MAX_APP_INSTALLATIONS 3

View File

@ -1602,7 +1602,7 @@ load_from_sections(AOTModule *module, AOTSection *sections,
return true;
}
#if BEIHAI_ENABLE_MEMORY_PROFILING != 0
#if BH_ENABLE_MEMORY_PROFILING != 0
static void aot_free(void *ptr)
{
wasm_runtime_free(ptr);

View File

@ -7,7 +7,7 @@
#include "bh_platform.h"
#include "mem_alloc.h"
#if BEIHAI_ENABLE_MEMORY_PROFILING != 0
#if BH_ENABLE_MEMORY_PROFILING != 0
/* Memory profile data of a function */
typedef struct memory_profile {
@ -30,7 +30,7 @@ static memory_profile_t *memory_profiles_list = NULL;
/* Lock of the memory profile list */
static korp_mutex profile_lock;
#endif /* end of BEIHAI_ENABLE_MEMORY_PROFILING */
#endif /* end of BH_ENABLE_MEMORY_PROFILING */
#ifndef MALLOC_MEMORY_FROM_SYSTEM
@ -58,7 +58,7 @@ wasm_memory_init_with_pool(void *mem, unsigned int bytes)
if (_allocator) {
memory_mode = MEMORY_MODE_POOL;
pool_allocator = _allocator;
#if BEIHAI_ENABLE_MEMORY_PROFILING != 0
#if BH_ENABLE_MEMORY_PROFILING != 0
os_mutex_init(&profile_lock);
#endif
global_pool_size = bytes;
@ -78,7 +78,7 @@ wasm_memory_init_with_allocator(void *_malloc_func,
malloc_func = _malloc_func;
realloc_func = _realloc_func;
free_func = _free_func;
#if BEIHAI_ENABLE_MEMORY_PROFILING != 0
#if BH_ENABLE_MEMORY_PROFILING != 0
os_mutex_init(&profile_lock);
#endif
return true;
@ -108,7 +108,7 @@ wasm_runtime_memory_init(mem_alloc_type_t mem_alloc_type,
void
wasm_runtime_memory_destroy()
{
#if BEIHAI_ENABLE_MEMORY_PROFILING != 0
#if BH_ENABLE_MEMORY_PROFILING != 0
os_mutex_destroy(&profile_lock);
#endif
if (memory_mode == MEMORY_MODE_POOL)
@ -166,12 +166,21 @@ wasm_runtime_free(void *ptr)
}
}
#if BEIHAI_ENABLE_MEMORY_PROFILING != 0
#if BH_ENABLE_MEMORY_PROFILING != 0
void
memory_profile_print(const char *file, int line,
const char *func, int alloc)
{
os_printf("location:%s@%d:used:%d:contribution:%d\n",
func, line, memory_in_use, alloc);
}
void *
wasm_runtime_malloc_profile(const char *file, int line,
const char *func, unsigned int size)
{
void *p = wasm_rutime_malloc(size + 8);
void *p = wasm_runtime_malloc(size + 8);
if (p) {
memory_profile_t *profile;
@ -292,15 +301,7 @@ void memory_usage_summarize()
os_mutex_unlock(&profile_lock);
}
void
memory_profile_print(const char *file, int line,
const char *func, int alloc)
{
os_printf("location:%s@%d:used:%d:contribution:%d\n",
func, line, memory_in_use, alloc);
}
#endif /* end of BEIHAI_ENABLE_MEMORY_PROFILING */
#endif /* end of BH_ENABLE_MEMORY_PROFILING */
#else /* else of MALLOC_MEMORY_FROM_SYSTEM */
@ -324,7 +325,7 @@ wasm_runtime_free(void *ptr)
free(ptr);
}
#if BEIHAI_ENABLE_MEMORY_PROFILING != 0
#if BH_ENABLE_MEMORY_PROFILING != 0
void *
wasm_runtime_malloc_profile(const char *file, int line,
const char *func, unsigned int size)
@ -366,6 +367,6 @@ wasm_runtime_free_profile(const char *file, int line,
if (ptr)
free(ptr);
}
#endif /* end of BEIHAI_ENABLE_MEMORY_PROFILING */
#endif /* end of BH_ENABLE_MEMORY_PROFILING */
#endif /* end of MALLOC_MEMORY_FROM_SYSTEM*/

View File

@ -492,8 +492,12 @@ load_memory_import(const uint8 **p_buf, const uint8 *buf_end,
{
const uint8 *p = *p_buf, *p_end = buf_end;
uint32 pool_size = wasm_runtime_memory_pool_size();
#if WASM_ENABLE_APP_FRAMEWORK != 0
uint32 max_page_count = pool_size * APP_MEMORY_MAX_GLOBAL_HEAP_PERCENT
/ DEFAULT_NUM_BYTES_PER_PAGE;
#else
uint32 max_page_count = pool_size / DEFAULT_NUM_BYTES_PER_PAGE;
#endif
read_leb_uint32(p, p_end, memory->flags);
read_leb_uint32(p, p_end, memory->init_page_count);
@ -539,8 +543,12 @@ load_memory(const uint8 **p_buf, const uint8 *buf_end, WASMMemory *memory,
{
const uint8 *p = *p_buf, *p_end = buf_end;
uint32 pool_size = wasm_runtime_memory_pool_size();
#if WASM_ENABLE_APP_FRAMEWORK != 0
uint32 max_page_count = pool_size * APP_MEMORY_MAX_GLOBAL_HEAP_PERCENT
/ DEFAULT_NUM_BYTES_PER_PAGE;
#else
uint32 max_page_count = pool_size / DEFAULT_NUM_BYTES_PER_PAGE;
#endif
read_leb_uint32(p, p_end, memory->flags);
read_leb_uint32(p, p_end, memory->init_page_count);
@ -1694,7 +1702,7 @@ load_from_sections(WASMModule *module, WASMSection *sections,
return true;
}
#if BEIHAI_ENABLE_MEMORY_PROFILING != 0
#if BH_ENABLE_MEMORY_PROFILING != 0
static void wasm_loader_free(void *ptr)
{
wasm_runtime_free(ptr);

View File

@ -6,21 +6,25 @@
#include "ems_gc_internal.h"
static int hmu_is_in_heap(gc_heap_t* heap, hmu_t* hmu)
static int
hmu_is_in_heap(gc_heap_t* heap, hmu_t* hmu)
{
return heap && hmu && (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size;
return heap && hmu
&& (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size;
}
/* Remove a node from the tree it belongs to*/
/* @p can not be NULL*/
/* @p can not be the ROOT node*/
/* Node @p will be removed from the tree and left,right,parent pointers of node @p will be*/
/* set to be NULL. Other fields will not be touched.*/
/* The tree will be re-organized so that the order conditions are still satisified.*/
static void remove_tree_node(hmu_tree_node_t *p)
/**
* Remove a node from the tree it belongs to
*
* @param p the node to remove, can not be NULL, can not be the ROOT node
* the node will be removed from the tree, and the left, right and
* parent pointers of the node @p will be set to be NULL. Other fields
* won't be touched. The tree will be re-organized so that the order
* conditions are still satisified.
*/
static void
remove_tree_node(hmu_tree_node_t *p)
{
hmu_tree_node_t *q = NULL, **slot = NULL;
@ -35,11 +39,14 @@ static void remove_tree_node(hmu_tree_node_t *p)
slot = &p->parent->left;
}
/* algorithms used to remove node p*/
/* case 1: if p has no left child, replace p with its right child*/
/* case 2: if p has no right child, replace p with its left child*/
/* case 3: otherwise, find p's predecessor, remove it from the tree and replace p with it.*/
/* use predecessor can keep the left <= root < right condition.*/
/**
* algorithms used to remove node p
* case 1: if p has no left child, replace p with its right child
* case 2: if p has no right child, replace p with its left child
* case 3: otherwise, find p's predecessor, remove it from the tree
* and replace p with it.
* use predecessor can keep the left <= root < right condition.
*/
if (!p->left) {
/* move right child up*/
@ -64,7 +71,8 @@ static void remove_tree_node(hmu_tree_node_t *p)
q = p->left;
while (q->right)
q = q->right;
remove_tree_node(q); /* remove from the tree*/
/* remove from the tree*/
remove_tree_node(q);
*slot = q;
q->parent = p->parent;
@ -78,14 +86,14 @@ static void remove_tree_node(hmu_tree_node_t *p)
p->left = p->right = p->parent = NULL;
}
static void unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
static void
unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
{
gc_size_t size;
bh_assert(gci_is_heap_valid(heap));
bh_assert(
hmu && (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size);
bh_assert(hmu && (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size);
bh_assert(hmu_get_ut(hmu) == HMU_FC);
size = hmu_get_size(hmu);
@ -105,45 +113,46 @@ static void unlink_hmu(gc_heap_t *heap, hmu_t *hmu)
}
if (!node) {
os_printf("[GC_ERROR]couldn't find the node in the normal list");
os_printf("[GC_ERROR]couldn't find the node in the normal list\n");
}
} else {
}
else {
remove_tree_node((hmu_tree_node_t *) hmu);
}
}
static void hmu_set_free_size(hmu_t *hmu)
static void
hmu_set_free_size(hmu_t *hmu)
{
gc_size_t size;
bh_assert(hmu && hmu_get_ut(hmu) == HMU_FC);
size = hmu_get_size(hmu);
*((uint32*) ((char*) hmu + size) - 1) = size;
*((uint32*)((char*) hmu + size) - 1) = size;
}
/* Add free chunk back to KFC*/
/* @heap should not be NULL and it should be a valid heap*/
/* @hmu should not be NULL and it should be a HMU of length @size inside @heap*/
/* @hmu should be aligned to 8*/
/* @size should be positive and multiple of 8*/
/* @hmu with size @size will be added into KFC as a new FC.*/
void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
/**
* Add free chunk back to KFC
*
* @param heap should not be NULL and it should be a valid heap
* @param hmu should not be NULL and it should be a HMU of length @size inside @heap
* hmu should be 8-bytes aligned
* @param size should be positive and multiple of 8
* hmu with size @size will be added into KFC as a new FC.
*/
void
gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
{
hmu_normal_node_t *np = NULL;
hmu_tree_node_t *root = NULL, *tp = NULL, *node = NULL;
uint32 node_idx;
bh_assert(gci_is_heap_valid(heap));
bh_assert(
hmu && (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size);
bh_assert(hmu && (gc_uint8*) hmu >= heap->base_addr
&& (gc_uint8*) hmu < heap->base_addr + heap->current_size);
bh_assert(((gc_uint32)(uintptr_t)hmu_to_obj(hmu) & 7) == 0);
bh_assert(
size > 0
&& ((gc_uint8*) hmu) + size
<= heap->base_addr + heap->current_size);
bh_assert(size > 0
&& ((gc_uint8*) hmu) + size <= heap->base_addr + heap->current_size);
bh_assert(!(size & 7));
hmu_set_ut(hmu, HMU_FC);
@ -154,7 +163,8 @@ void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
np = (hmu_normal_node_t*) hmu;
node_idx = size >> 3;
np->next = heap->kfc_normal_list[node_idx].next;
set_hmu_normal_node_next(np, get_hmu_normal_node_next
(&heap->kfc_normal_list[node_idx]));
set_hmu_normal_node_next(&heap->kfc_normal_list[node_idx], np);
return;
}
@ -176,8 +186,8 @@ void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
break;
}
tp = tp->right;
} else /* tp->size >= size*/
{
}
else { /* tp->size >= size*/
if (!tp->left) {
tp->left = node;
node->parent = tp;
@ -188,17 +198,19 @@ void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size)
}
}
/* Find a proper hmu for required memory size*/
/* @heap should not be NULL and it should be a valid heap*/
/* @size should cover the header and it should be 8 bytes aligned*/
/* GC will not be performed here.*/
/* Heap extension will not be performed here.*/
/* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
/* NULL will be returned if there are no proper HMU.*/
static hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
/**
* Find a proper hmu for required memory size
*
* @param heap should not be NULL and should be a valid heap
* @param size should cover the header and should be 8 bytes aligned
* GC will not be performed here.
* Heap extension will not be performed here.
*
* @return hmu allocated if success, which will be aligned to 8 bytes,
* NULL otherwise
*/
static hmu_t *
alloc_hmu(gc_heap_t *heap, gc_size_t size)
{
hmu_normal_node_t *node = NULL, *p = NULL;
uint32 node_idx = 0, init_node_idx = 0;
@ -228,31 +240,31 @@ static hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
bh_assert(node_idx >= init_node_idx);
p = get_hmu_normal_node_next(node);
node->next = p->next;
set_hmu_normal_node_next(node, get_hmu_normal_node_next(p));
bh_assert(((gc_int32)(uintptr_t)hmu_to_obj(p) & 7) == 0);
if ((gc_size_t)node_idx != (uint32)init_node_idx
&& ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) { /* with bigger size*/
/* with bigger size*/
&& ((gc_size_t)node_idx << 3) >= size + GC_SMALLEST_SIZE) {
rest = (hmu_t*) (((char *) p) + size);
gci_add_fc(heap, rest, (node_idx << 3) - size);
hmu_mark_pinuse(rest);
} else {
}
else {
size = node_idx << 3;
next = (hmu_t*) ((char*) p + size);
if (hmu_is_in_heap(heap, next))
hmu_mark_pinuse(next);
}
#if GC_STAT_DATA != 0
heap->total_free_size -= size;
if ((heap->current_size - heap->total_free_size)
> heap->highmark_size)
> heap->highmark_size)
heap->highmark_size = heap->current_size
- heap->total_free_size;
#endif
- heap->total_free_size;
hmu_set_size((hmu_t* ) p, size);
return (hmu_t*) p;
hmu_set_size((hmu_t* )p, size);
return (hmu_t*)p;
}
}
@ -285,18 +297,18 @@ static hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
rest = (hmu_t*) ((char*) last_tp + size);
gci_add_fc(heap, rest, last_tp->size - size);
hmu_mark_pinuse(rest);
} else {
}
else {
size = last_tp->size;
next = (hmu_t*) ((char*) last_tp + size);
if (hmu_is_in_heap(heap, next))
hmu_mark_pinuse(next);
}
#if GC_STAT_DATA != 0
heap->total_free_size -= size;
if ((heap->current_size - heap->total_free_size) > heap->highmark_size)
heap->highmark_size = heap->current_size - heap->total_free_size;
#endif
hmu_set_size((hmu_t* ) last_tp, size);
return (hmu_t*) last_tp;
}
@ -304,52 +316,41 @@ static hmu_t *alloc_hmu(gc_heap_t *heap, gc_size_t size)
return NULL;
}
/* Find a proper HMU for given size*/
/* @heap should not be NULL and it should be a valid heap*/
/* @size should cover the header and it should be 8 bytes aligned*/
/* This function will try several ways to satisfy the allocation request.*/
/* 1. Find a proper on available HMUs.*/
/* 2. GC will be triggered if 1 failed.*/
/* 3. Find a proper on available HMUS.*/
/* 4. Return NULL if 3 failed*/
/* A proper HMU will be returned. This HMU can include the header and given size. The returned HMU will be aligned to 8 bytes.*/
/* NULL will be returned if there are no proper HMU.*/
static hmu_t* alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
/**
* Find a proper HMU with given size
*
* @param heap should not be NULL and should be a valid heap
* @param size should cover the header and should be 8 bytes aligned
*
* Note: This function will try several ways to satisfy the allocation request:
* 1. Find a proper on available HMUs.
* 2. GC will be triggered if 1 failed.
* 3. Find a proper on available HMUS.
* 4. Return NULL if 3 failed
*
* @return hmu allocated if success, which will be aligned to 8 bytes,
* NULL otherwise
*/
static hmu_t *
alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
{
hmu_t *ret = NULL;
bh_assert(gci_is_heap_valid(heap));
bh_assert(size > 0 && !(size & 7));
#ifdef GC_IN_EVERY_ALLOCATION
gci_gc_heap(heap);
ret = alloc_hmu(heap, size);
#else
# if GC_STAT_DATA != 0
if (heap->gc_threshold < heap->total_free_size)
ret = alloc_hmu(heap, size);
# else
ret = alloc_hmu(heap, size);
# endif
if (ret)
return ret;
/*gci_gc_heap(heap);*//* disable gc claim currently */
ret = alloc_hmu(heap, size);
#endif
return ret;
return alloc_hmu(heap, size);
}
unsigned long g_total_malloc = 0;
unsigned long g_total_free = 0;
static unsigned long g_total_malloc = 0;
static unsigned long g_total_free = 0;
gc_object_t _gc_alloc_vo_i_heap(void *vheap,
gc_size_t size ALLOC_EXTRA_PARAMETERS)
#if BH_ENABLE_GC_VERIFY == 0
gc_object_t
gc_alloc_vo(void *vheap, gc_size_t size)
#else
gc_object_t
gc_alloc_vo_internal(void *vheap, gc_size_t size,
const char *file, int line)
#endif
{
gc_heap_t* heap = (gc_heap_t*) vheap;
hmu_t *hmu = NULL;
@ -364,19 +365,19 @@ gc_object_t _gc_alloc_vo_i_heap(void *vheap,
/* integer overflow */
return NULL;
gct_vm_mutex_lock(&heap->lock);
os_mutex_lock(&heap->lock);
hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
goto FINISH;
goto finish;
g_total_malloc += tot_size;
hmu_set_ut(hmu, HMU_VO);
hmu_unfree_vo(hmu);
#if defined(GC_VERIFY)
hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
#if BH_ENABLE_GC_VERIFY != 0
hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
#endif
ret = hmu_to_obj(hmu);
@ -385,17 +386,22 @@ gc_object_t _gc_alloc_vo_i_heap(void *vheap,
memset((uint8*)ret + size, 0, tot_size - tot_size_unaligned);
#if BH_ENABLE_MEMORY_PROFILING != 0
os_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
os_printf("HEAP.ALLOC: heap: %p, size: %u\n", heap, size);
#endif
FINISH:
gct_vm_mutex_unlock(&heap->lock);
finish:
os_mutex_unlock(&heap->lock);
return ret;
}
gc_object_t _gc_realloc_vo_i_heap(void *vheap, void *ptr,
gc_size_t size ALLOC_EXTRA_PARAMETERS)
#if BH_ENABLE_GC_VERIFY == 0
gc_object_t
gc_realloc_vo(void *vheap, void *ptr, gc_size_t size)
#else
gc_object_t
gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size,
const char *file, int line)
#endif
{
gc_heap_t* heap = (gc_heap_t*) vheap;
hmu_t *hmu = NULL, *hmu_old = NULL;
@ -419,29 +425,29 @@ gc_object_t _gc_realloc_vo_i_heap(void *vheap, void *ptr,
return obj_old;
}
gct_vm_mutex_lock(&heap->lock);
os_mutex_lock(&heap->lock);
hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
goto FINISH;
goto finish;
g_total_malloc += tot_size;
hmu_set_ut(hmu, HMU_VO);
hmu_unfree_vo(hmu);
#if defined(GC_VERIFY)
hmu_init_prefix_and_suffix(hmu, tot_size, file_name, line_number);
#if BH_ENABLE_GC_VERIFY != 0
hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
#endif
ret = hmu_to_obj(hmu);
#if BH_ENABLE_MEMORY_PROFILING != 0
os_printf("HEAP.ALLOC: heap: %p, size: %u", heap, size);
os_printf("HEAP.ALLOC: heap: %p, size: %u\n", heap, size);
#endif
FINISH:
gct_vm_mutex_unlock(&heap->lock);
finish:
os_mutex_unlock(&heap->lock);
if (ret) {
obj_size = tot_size - HMU_SIZE - OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
@ -450,18 +456,19 @@ FINISH:
obj_size_old = tot_size_old - HMU_SIZE
- OBJ_PREFIX_SIZE - OBJ_SUFFIX_SIZE;
bh_memcpy_s(ret, obj_size, obj_old, obj_size_old);
gc_free_h(vheap, obj_old);
gc_free_vo(vheap, obj_old);
}
}
return ret;
}
/* Do some checking to see if given pointer is a possible valid heap*/
/* Return GC_TRUE if all checking passed*/
/* Return GC_FALSE otherwise*/
int gci_is_heap_valid(gc_heap_t *heap)
/**
* Do some checking to see if given pointer is a possible valid heap
* @return GC_TRUE if all checking passed, GC_FALSE otherwise
*/
int
gci_is_heap_valid(gc_heap_t *heap)
{
if (!heap)
return GC_FALSE;
@ -471,7 +478,14 @@ int gci_is_heap_valid(gc_heap_t *heap)
return GC_TRUE;
}
int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
#if BH_ENABLE_GC_VERIFY == 0
int
gc_free_vo(void *vheap, gc_object_t obj)
#else
int
gc_free_vo_internal(void *vheap, gc_object_t obj,
const char *file, int line)
#endif
{
gc_heap_t* heap = (gc_heap_t*) vheap;
hmu_t *hmu = NULL;
@ -487,11 +501,11 @@ int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
hmu = obj_to_hmu(obj);
gct_vm_mutex_lock(&heap->lock);
os_mutex_lock(&heap->lock);
if ((gc_uint8 *) hmu >= heap->base_addr
&& (gc_uint8 *) hmu < heap->base_addr + heap->current_size) {
#ifdef GC_VERIFY
if ((gc_uint8 *)hmu >= heap->base_addr
&& (gc_uint8 *)hmu < heap->base_addr + heap->current_size) {
#if BH_ENABLE_GC_VERIFY != 0
hmu_verify(hmu);
#endif
ut = hmu_get_ut(hmu);
@ -506,11 +520,9 @@ int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
g_total_free += size;
#if GC_STAT_DATA != 0
heap->total_free_size += size;
#endif
#if BH_ENABLE_MEMORY_PROFILING != 0
os_printf("HEAP.FREE, heap: %p, size: %u\n",heap, size);
os_printf("HEAP.FREE, heap: %p, size: %u\n", heap, size);
#endif
if (!hmu_get_pinuse(hmu)) {
@ -546,54 +558,51 @@ int gc_free_i_heap(void *vheap, gc_object_t obj ALLOC_EXTRA_PARAMETERS)
goto out;
}
out:
gct_vm_mutex_unlock(&heap->lock);
out:
os_mutex_unlock(&heap->lock);
return ret;
}
void gc_dump_heap_stats(gc_heap_t *heap)
void
gc_dump_heap_stats(gc_heap_t *heap)
{
os_printf("heap: %p, heap start: %p\n", heap, heap->base_addr);
os_printf(
"total malloc: totalfree: %u, current: %u, highmark: %u, gc cnt: %u\n",
heap->total_free_size, heap->current_size, heap->highmark_size,
heap->total_gc_count);
os_printf("total free: %u, current: %u, highmark: %u\n",
heap->total_free_size, heap->current_size, heap->highmark_size);
os_printf("g_total_malloc=%lu, g_total_free=%lu, occupied=%lu\n",
g_total_malloc, g_total_free, g_total_malloc - g_total_free);
g_total_malloc, g_total_free, g_total_malloc - g_total_free);
}
#ifdef GC_TEST
void gci_dump(char* buf, gc_heap_t *heap)
void
gci_dump(gc_heap_t *heap)
{
hmu_t *cur = NULL, *end = NULL;
hmu_type_t ut;
gc_size_t size;
int i = 0;
int p;
char inuse;
int mark;
int i = 0, p, mark;
char inuse = 'U';
cur = (hmu_t*)heap->base_addr;
end = (hmu_t*)((char*)heap->base_addr + heap->current_size);
while(cur < end)
{
while(cur < end) {
ut = hmu_get_ut(cur);
size = hmu_get_size(cur);
p = hmu_get_pinuse(cur);
mark = hmu_is_jo_marked (cur);
if(ut == HMU_VO)
inuse = 'V';
else if(ut == HMU_JO)
inuse = hmu_is_jo_marked(cur) ? 'J' : 'j';
else if(ut == HMU_FC)
inuse = 'F';
if (ut == HMU_VO)
inuse = 'V';
else if (ut == HMU_JO)
inuse = hmu_is_jo_marked(cur) ? 'J' : 'j';
else if (ut == HMU_FC)
inuse = 'F';
bh_assert(size > 0);
buf += sprintf(buf, "#%d %08x %x %x %d %c %d\n", i, (char*) cur - (char*) heap->base_addr, ut, p, mark, inuse, hmu_obj_size(size));
os_printf("#%d %08x %x %x %d %c %d\n",
i, (int32)((char*) cur - (char*) heap->base_addr),
ut, p, mark, inuse, (int32)hmu_obj_size(size));
cur = (hmu_t*)((char *)cur + size);
i++;
@ -602,4 +611,3 @@ void gci_dump(char* buf, gc_heap_t *heap)
bh_assert(cur == end);
}
#endif

View File

@ -21,43 +21,8 @@
extern "C" {
#endif
/*Pre-compile configuration can be done here or on Makefiles*/
/*#define GC_EMBEDDED or GC_STANDALONE*/
/*#define GC_DEBUG*/
/*#define GC_TEST // TEST mode is a sub-mode of STANDALONE*/
/* #define GC_ALLOC_TRACE */
/* #define GC_STAT */
#ifndef GC_STAT_DATA
#define GC_STAT_DATA 1
#endif
#define GC_HEAD_PADDING 4
/* Standalone GC is used for testing.*/
#ifndef GC_EMBEDDED
# ifndef GC_STANDALONE
# define GC_STANDALONE
# endif
#endif
#if defined(GC_EMBEDDED) && defined(GC_STANDALONE)
# error "Can not define GC_EMBEDDED and GC_STANDALONE at the same time"
#endif
#if BH_DEBUG != 0
/*instrument mode ignore GC_DEBUG feature, for instrument testing gc_alloc_vo_i_heap only has func_name parameter*/
#if !defined INSTRUMENT_TEST_ENABLED && !defined GC_DEBUG
# define GC_DEBUG
#endif
#endif
#if defined(GC_EMBEDDED) && defined(GC_TEST)
# error "Can not defined GC_EMBEDDED and GC_TEST at the same time"
#endif
typedef void *gc_handle_t;
typedef void *gc_object_t;
#define NULL_REF ((gc_object_t)NULL)
#define GC_SUCCESS (0)
@ -68,72 +33,23 @@ typedef void *gc_object_t;
#define GC_MAX_HEAP_SIZE (256 * BH_KB)
typedef int64 gc_int64;
typedef unsigned int gc_uint32;
typedef signed int gc_int32;
typedef unsigned short gc_uint16;
typedef signed short gc_int16;
typedef unsigned char gc_uint8;
typedef signed char gc_int8;
typedef gc_uint32 gc_size_t;
typedef enum {
MMT_SHARED = 0,
MMT_INSTANCE = 1,
MMT_APPMANAGER = MMT_SHARED,
MMT_VERIFIER = MMT_SHARED,
MMT_JHI = MMT_SHARED,
MMT_LOADER = MMT_SHARED,
MMT_APPLET = MMT_INSTANCE,
MMT_INTERPRETER = MMT_INSTANCE
} gc_mm_t;
#ifdef GC_STAT
#define GC_HEAP_STAT_SIZE (128 / 4)
typedef struct {
int usage;
int usage_block;
int vo_usage;
int jo_usage;
int free;
int free_block;
int vo_free;
int jo_free;
int usage_sizes[GC_HEAP_STAT_SIZE];
int free_sizes[GC_HEAP_STAT_SIZE];
}gc_stat_t;
extern void gc_heap_stat(void* heap, gc_stat_t* gc_stat);
extern void __gc_print_stat(void *heap, int verbose);
#define gc_print_stat __gc_print_stat
#else
#define gc_print_stat(heap, verbose)
#endif
#if GC_STAT_DATA != 0
typedef void * gc_handle_t;
typedef void * gc_object_t;
typedef int64 gc_int64;
typedef uint32 gc_uint32;
typedef int32 gc_int32;
typedef uint16 gc_uint16;
typedef int16 gc_int16;
typedef uint8 gc_uint8;
typedef int8 gc_int8;
typedef uint32 gc_size_t;
typedef enum {
GC_STAT_TOTAL = 0,
GC_STAT_FREE,
GC_STAT_HIGHMARK,
GC_STAT_COUNT,
GC_STAT_TIME,
GC_STAT_MAX_1,
GC_STAT_MAX_2,
GC_STAT_MAX_3,
GC_STAT_MAX
} GC_STAT_INDEX;
#endif
/*////////////// Exported APIs*/
/**
* GC initialization from a buffer
*
@ -142,7 +58,8 @@ typedef enum {
*
* @return gc handle if success, NULL otherwise
*/
extern gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size);
gc_handle_t
gc_init_with_pool(char *buf, gc_size_t buf_size);
/**
* Destroy heap which is initilized from a buffer
@ -152,9 +69,8 @@ extern gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size);
* @return GC_SUCCESS if success
* GC_ERROR for bad parameters or failed system resource freeing.
*/
extern int gc_destroy_with_pool(gc_handle_t handle);
#if GC_STAT_DATA != 0
int
gc_destroy_with_pool(gc_handle_t handle);
/**
* Get Heap Stats
@ -163,166 +79,44 @@ extern int gc_destroy_with_pool(gc_handle_t handle);
* @param size [in] the size of stats
* @param mmt [in] type of heap, MMT_SHARED or MMT_INSTANCE
*/
extern void* gc_heap_stats(void *heap, uint32* stats, int size, gc_mm_t mmt);
void *
gc_heap_stats(void *heap, uint32* stats, int size);
/**
* Set GC threshold factor
*
* @param heap [in] the heap to set
* @param factor [in] the threshold size is free_size * factor / 1000
*
* @return GC_SUCCESS if success.
*/
extern int gc_set_threshold_factor(void *heap, unsigned int factor);
#if BH_ENABLE_GC_VERIFY == 0
#endif
gc_object_t
gc_alloc_vo(void *heap, gc_size_t size);
/*////// Allocate heap object*/
gc_object_t
gc_realloc_vo(void *heap, void *ptr, gc_size_t size);
/* There are two versions of allocate functions. The functions with _i suffix should be only used*/
/* internally. Functions without _i suffix are just wrappers with the corresponded functions with*/
/* _i suffix. Allocation operation code position are record under DEBUG model for debugging.*/
#ifdef GC_DEBUG
# define ALLOC_EXTRA_PARAMETERS ,const char*file_name,int line_number
# define ALLOC_EXTRA_ARGUMENTS , __FILE__, __LINE__
# define ALLOC_PASSDOWN_EXTRA_ARGUMENTS , file_name, line_number
# define gc_alloc_vo_h(heap, size) gc_alloc_vo_i_heap(heap, size, __FILE__, __LINE__)
# define gc_realloc_vo_h(heap, ptr, size) gc_realloc_vo_i_heap(heap, ptr, size, __FILE__, __LINE__)
# define gc_free_h(heap, obj) gc_free_i_heap(heap, obj, __FILE__, __LINE__)
#else
# define ALLOC_EXTRA_PARAMETERS
# define ALLOC_EXTRA_ARGUMENTS
# define ALLOC_PASSDOWN_EXTRA_ARGUMENTS
# define gc_alloc_vo_h gc_alloc_vo_i_heap
# define gc_realloc_vo_h gc_realloc_vo_i_heap
# define gc_free_h gc_free_i_heap
#endif
int
gc_free_vo(void *heap, gc_object_t obj);
/**
* Invoke a GC
*
* @param heap
*
* @return GC_SUCCESS if success
*/
extern int gci_gc_heap(void *heap);
#else /* else of BH_ENABLE_GC_VERIFY */
/**
* Allocate VM Object in specific heap.
*
* @param heap heap to allocate.
* @param size bytes to allocate.
*
* @return pointer to VM object allocated
* NULL if failed.
*/
extern gc_object_t _gc_alloc_vo_i_heap(void *heap,
gc_size_t size ALLOC_EXTRA_PARAMETERS);
extern gc_object_t _gc_realloc_vo_i_heap(void *heap, void *ptr,
gc_size_t size ALLOC_EXTRA_PARAMETERS);
extern gc_object_t _gc_alloc_jo_i_heap(void *heap,
gc_size_t size ALLOC_EXTRA_PARAMETERS);
#ifdef INSTRUMENT_TEST_ENABLED
extern gc_object_t gc_alloc_vo_i_heap_instr(void *heap, gc_size_t size, const char* func_name );
extern gc_object_t gc_realloc_vo_i_heap_instr(void *heap, void *ptr, gc_size_t size,
const char* func_name );
extern gc_object_t gc_alloc_jo_i_heap_instr(void *heap, gc_size_t size, const char* func_name);
# define gc_alloc_vo_i_heap(heap, size) gc_alloc_vo_i_heap_instr(heap, size, __FUNCTION__)
# define gc_realloc_vo_i_heap(heap, ptr, size) gc_realloc_vo_i_heap_instr(heap, ptr, size, __FUNCTION__)
# define gc_alloc_jo_i_heap(heap, size) gc_alloc_jo_i_heap_instr(heap, size, __FUNCTION__)
#else
# define gc_alloc_vo_i_heap _gc_alloc_vo_i_heap
# define gc_realloc_vo_i_heap _gc_realloc_vo_i_heap
# define gc_alloc_jo_i_heap _gc_alloc_jo_i_heap
#endif
gc_object_t
gc_alloc_vo_internal(void *heap, gc_size_t size,
const char *file, int line);
/**
* Allocate Java object in specific heap.
*
* @param heap heap to allocate.
* @param size bytes to allocate.
*
* @return pointer to Java object allocated
* NULL if failed.
*/
extern gc_object_t _gc_alloc_jo_i_heap(void *heap,
gc_size_t size ALLOC_EXTRA_PARAMETERS);
gc_object_t
gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size,
const char *file, int line);
/**
* Free VM object
*
* @param heap heap to free.
* @param obj pointer to object need free.
*
* @return GC_SUCCESS if success
*/
extern int gc_free_i_heap(void *heap, gc_object_t obj ALLOC_EXTRA_PARAMETERS);
int
gc_free_vo_internal(void *heap, gc_object_t obj,
const char *file, int line);
/**
* Add ref to rootset of gc for current instance.
*
* @param obj pointer to real load of a valid Java object managed by gc for current instance.
*
* @return GC_SUCCESS if success.
* GC_ERROR for invalid parameters.
*/
extern int gc_add_root(void* heap, gc_object_t obj);
#define gc_alloc_vo(heap, size) \
gc_alloc_vo_internal(heap, size, __FILE__, __LINE__)
/*////////////// Imported APIs which should be implemented in other components*/
#define gc_realloc_vo(heap, ptr, size) \
gc_realloc_vo_internal(heap, ptr, size, __FILE__, __LINE__)
/*////// Java object layout related APIs*/
#define gc_free_vo(heap, obj) \
gc_free_vo_internal(heap, obj, __FILE__, __LINE__)
/**
* Get Java object size from corresponding VM module
*
* @param obj pointer to the real load of a Java object.
*
* @return size of java object.
*/
extern gc_size_t vm_get_java_object_size(gc_object_t obj);
/**
* Get reference list of this object
*
* @param obj [in] pointer to java object.
* @param is_compact_mode [in] indicate the java object mode. GC_TRUE or GC_FALSE.
* @param ref_num [out] the size of ref_list.
* @param ref_list [out] if is_compact_mode is GC_FALSE, this parameter will be set to a list of offset.
* @param ref_start_offset [out] If is_compact_mode is GC_TRUE, this parameter will be set to the start offset of the references in this object.
*
* @return GC_SUCCESS if success.
* GC_ERROR when error occurs.
*/
extern int vm_get_java_object_ref_list(gc_object_t obj, int *is_compact_mode,
gc_size_t *ref_num, gc_uint16 **ref_list, gc_uint32 *ref_start_offset);
/**
* Get gc handle for current instance
*
*
* @return instance heap handle.
*/
extern gc_handle_t app_manager_get_cur_applet_heap(void);
/**
* Begin current instance heap rootset enumeration
*
*
* @return GC_SUCCESS if success.
* GC_ERROR when error occurs.
*/
extern int vm_begin_rootset_enumeration(void *heap);
#ifdef _INSTRUMENT_TEST_ENABLED
extern int vm_begin_rootset_enumeration_instr(void *heap, const char*func_name);
#define vm_begin_rootset_enumeration(heap) vm_begin_rootset_enumeration_instr(heap, __FUNCTION__)
#else
#define vm_begin_rootset_enumeration _vm_begin_rootset_enumeration
#endif /* INSTUMENT_TEST_ENABLED*/
#ifndef offsetof
#define offsetof(Type, field) ((size_t)(&((Type *)0)->field))
#endif
#endif /* end of BH_ENABLE_GC_VERIFY */
#ifdef __cplusplus
}

View File

@ -13,54 +13,53 @@ extern "C" {
#include "bh_platform.h"
#include "ems_gc.h"
/* basic block managed by EMS gc is the so-called HMU (heap memory unit)*/
typedef enum _hmu_type_enum
{
/* HMU (heap memory unit) basic block type */
typedef enum hmu_type_enum {
HMU_TYPE_MIN = 0,
HMU_TYPE_MAX = 3,
HMU_JO = 3,
HMU_VO = 2,
HMU_FC = 1,
HMU_FM = 0
}hmu_type_t;
} hmu_type_t;
typedef struct _hmu_struct
{
typedef struct hmu_struct {
gc_uint32 header;
}hmu_t;
} hmu_t;
#if defined(GC_VERIFY)
#if BH_ENABLE_GC_VERIFY != 0
#define GC_OBJECT_PREFIX_PADDING_CNT 3
#define GC_OBJECT_SUFFIX_PADDING_CNT 4
#define GC_OBJECT_PADDING_VALUE (0x12345678)
typedef struct _gc_object_prefix
{
typedef struct gc_object_prefix {
const char *file_name;
gc_int32 line_no;
gc_int32 size;
gc_uint32 padding[GC_OBJECT_PREFIX_PADDING_CNT];
}gc_object_prefix_t;
} gc_object_prefix_t;
typedef struct gc_object_suffix {
gc_uint32 padding[GC_OBJECT_SUFFIX_PADDING_CNT];
} gc_object_suffix_t;
#define OBJ_PREFIX_SIZE (sizeof(gc_object_prefix_t))
typedef struct _gc_object_suffix
{
gc_uint32 padding[GC_OBJECT_SUFFIX_PADDING_CNT];
}gc_object_suffix_t;
#define OBJ_SUFFIX_SIZE (sizeof(gc_object_suffix_t))
extern void hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size, const char *file_name, int line_no);
extern void hmu_verify(hmu_t *hmu);
void
hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
const char *file_name, int line_no);
void
hmu_verify(hmu_t *hmu);
#define SKIP_OBJ_PREFIX(p) ((void*)((gc_uint8*)(p) + OBJ_PREFIX_SIZE))
#define SKIP_OBJ_SUFFIX(p) ((void*)((gc_uint8*)(p) + OBJ_SUFFIX_SIZE))
#define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
#else
#else /* else of BH_ENABLE_GC_VERIFY */
#define OBJ_PREFIX_SIZE 0
#define OBJ_SUFFIX_SIZE 0
@ -70,7 +69,7 @@ extern void hmu_verify(hmu_t *hmu);
#define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
#endif /* GC_DEBUG*/
#endif /* end of BH_ENABLE_GC_VERIFY */
#define hmu_obj_size(s) ((s)-OBJ_EXTRA_SIZE)
@ -79,7 +78,9 @@ extern void hmu_verify(hmu_t *hmu);
#define GC_SMALLEST_SIZE GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + 8)
#define GC_GET_REAL_SIZE(x) GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + (((x) > 8) ? (x): 8))
/*////// functions for bit operation*/
/**
* hmu bit operation
*/
#define SETBIT(v, offset) (v) |= (1 << (offset))
#define GETBIT(v, offset) ((v) & (1 << (offset)) ? 1 : 0)
@ -92,7 +93,9 @@ extern void hmu_verify(hmu_t *hmu);
#define CLRBITS(v, offset, size) (v) &= ~(((1 << size) - 1) << offset)
#define GETBITS(v, offset, size) (((v) & ((uint32)(((1 << size) - 1) << offset))) >> offset)
/*////// gc object layout definition*/
/**
* gc object layout definition
*/
#define HMU_SIZE (sizeof(hmu_t))
@ -121,6 +124,12 @@ extern void hmu_verify(hmu_t *hmu);
#define hmu_unmark_jo(hmu) CLRBIT ((hmu)->header, HMU_JO_MB_OFFSET)
#define hmu_is_jo_marked(hmu) GETBIT ((hmu)->header, HMU_JO_MB_OFFSET)
/**
* The hmu size is divisible by 8, its lowest 3 bits are 0, so we only
* store its higher bits of bit [29..3], and bit [2..0] are not stored.
* After that, the maximal heap size can be enlarged from (1<<27) = 128MB
* to (1<<27) * 8 = 1GB.
*/
#define HMU_SIZE_SIZE 27
#define HMU_SIZE_OFFSET 0
@ -129,10 +138,12 @@ extern void hmu_verify(hmu_t *hmu);
#define hmu_is_vo_freed(hmu) GETBIT ((hmu)->header, HMU_VO_FB_OFFSET)
#define hmu_unfree_vo(hmu) CLRBIT ((hmu)->header, HMU_VO_FB_OFFSET)
#define hmu_get_size(hmu) GETBITS ((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE)
#define hmu_set_size(hmu, size) SETBITS ((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE, size)
#define hmu_get_size(hmu) (GETBITS ((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE) << 3)
#define hmu_set_size(hmu, size) SETBITS ((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE, ((size) >> 3))
/*////// HMU free chunk management*/
/**
* HMU free chunk management
*/
#define HMU_NORMAL_NODE_CNT 32
#define HMU_FC_NORMAL_MAX_SIZE ((HMU_NORMAL_NODE_CNT - 1) << 3)
@ -141,51 +152,43 @@ extern void hmu_verify(hmu_t *hmu);
#error "Too small GC_MAX_HEAP_SIZE"
#endif
typedef struct _hmu_normal_node
{
typedef struct hmu_normal_node {
hmu_t hmu_header;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
struct {
uint32 parts[2];
} next;
#else
struct _hmu_normal_node *next;
#endif
gc_int32 next_offset;
} hmu_normal_node_t;
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
static inline hmu_normal_node_t *
get_hmu_normal_node_next(hmu_normal_node_t *node)
{
hmu_normal_node_t *next;
bh_memcpy_s(&next, (uint32)sizeof(hmu_normal_node_t *),
&node->next.parts, (uint32)sizeof(uint32) * 2);
return next;
return node->next_offset
? (hmu_normal_node_t *)((uint8*)node + node->next_offset)
: NULL;
}
static inline void
set_hmu_normal_node_next(hmu_normal_node_t *node, hmu_normal_node_t *next)
{
bh_memcpy_s(&node->next.parts, (uint32)sizeof(uint32) * 2,
&next, (uint32)sizeof(hmu_normal_node_t *));
if (next) {
bh_assert((uint8*)next - (uint8*)node < INT32_MAX);
node->next_offset = (gc_int32)(intptr_t)
((uint8*)next - (uint8*)node);
}
else {
node->next_offset = 0;
}
}
#else
#define get_hmu_normal_node_next(node) (node)->next
#define set_hmu_normal_node_next(node, _next) (node)->next = _next
#endif
typedef struct _hmu_tree_node
{
typedef struct hmu_tree_node {
hmu_t hmu_header;
gc_size_t size;
struct _hmu_tree_node *left;
struct _hmu_tree_node *right;
struct _hmu_tree_node *parent;
struct hmu_tree_node *left;
struct hmu_tree_node *right;
struct hmu_tree_node *parent;
} hmu_tree_node_t;
typedef struct _gc_heap_struct
{
gc_handle_t heap_id; /* for double checking*/
typedef struct gc_heap_struct {
/* for double checking*/
gc_handle_t heap_id;
gc_uint8 *base_addr;
gc_size_t current_size;
@ -198,95 +201,32 @@ typedef struct _gc_heap_struct
/* order in kfc_tree is: size[left] <= size[cur] < size[right]*/
hmu_tree_node_t kfc_tree_root;
/* for rootset enumeration of private heap*/
void *root_set;
/* whether the fast mode of marking process that requires
additional memory fails. When the fast mode fails, the
marking process can still be done in the slow mode, which
doesn't need additional memory (by walking through all
blocks and marking sucessors of marked nodes until no new
node is marked). TODO: slow mode is not implemented. */
unsigned is_fast_marking_failed : 1;
#if GC_STAT_DATA != 0
gc_size_t highmark_size;
gc_size_t init_size;
gc_size_t total_gc_count;
gc_size_t highmark_size;
gc_size_t total_free_size;
gc_size_t gc_threshold;
gc_size_t gc_threshold_factor;
gc_int64 total_gc_time;
#endif
} gc_heap_t;
/*////// MISC internal used APIs*/
/**
* MISC internal used APIs
*/
extern void gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size);
extern int gci_is_heap_valid(gc_heap_t *heap);
void
gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size);
#ifdef GC_DEBUG
extern void gci_verify_heap(gc_heap_t *heap);
extern void gci_dump(char* buf, gc_heap_t *heap);
#endif
int
gci_is_heap_valid(gc_heap_t *heap);
#if GC_STAT_DATA != 0
/**
* Verify heap integrity
*/
void
gci_verify_heap(gc_heap_t *heap);
/* the default GC threshold size is free_size * GC_DEFAULT_THRESHOLD_FACTOR / 1000 */
#define GC_DEFAULT_THRESHOLD_FACTOR 400
static inline void gc_update_threshold(gc_heap_t *heap)
{
heap->gc_threshold = heap->total_free_size * heap->gc_threshold_factor / 1000;
}
#endif
/*////// MISC data structures*/
#define MARK_NODE_OBJ_CNT 256
/* mark node is used for gc marker*/
typedef struct _mark_node_struct
{
/* number of to-expand objects can be saved in this node*/
gc_size_t cnt;
/* the first unused index*/
int idx;
/* next node on the node list*/
struct _mark_node_struct *next;
/* the actual to-expand objects list*/
gc_object_t set[MARK_NODE_OBJ_CNT];
}mark_node_t;
/*////// Imported APIs wrappers under TEST mode*/
#ifdef GC_TEST
extern int (*gct_vm_get_java_object_ref_list)(
gc_object_t obj,
int *is_compact_mode, /* can be set to GC_TRUE, or GC_FALSE */
gc_size_t *ref_num,
gc_uint16 **ref_list,
gc_uint32 *ref_start_offset);
extern int (*gct_vm_mutex_init)(korp_mutex *mutex);
extern int (*gct_vm_mutex_destroy)(korp_mutex *mutex);
extern int (*gct_vm_mutex_lock)(korp_mutex *mutex);
extern int (*gct_vm_mutex_unlock)(korp_mutex *mutex);
extern gc_handle_t (*gct_vm_get_gc_handle_for_current_instance)(void);
extern int (*gct_vm_begin_rootset_enumeration)(void* heap);
extern int (*gct_vm_gc_finished)(void);
#else
#define gct_vm_get_java_object_ref_list bh_get_java_object_ref_list
#define gct_vm_mutex_init os_mutex_init
#define gct_vm_mutex_destroy os_mutex_destroy
#define gct_vm_mutex_lock os_mutex_lock
#define gct_vm_mutex_unlock os_mutex_unlock
#define gct_vm_get_gc_handle_for_current_instance app_manager_get_cur_applet_heap
#define gct_vm_begin_rootset_enumeration vm_begin_rootset_enumeration
#define gct_vm_gc_finished jeff_runtime_gc_finished
#endif
/**
* Dump heap nodes
*/
void
gci_dump(gc_heap_t *heap);
#ifdef __cplusplus
}

View File

@ -5,13 +5,19 @@
#include "ems_gc_internal.h"
#if defined(GC_VERIFY)
/* Set default value to prefix and suffix*/
#if BH_ENABLE_GC_VERIFY != 0
/* @hmu should not be NULL and it should have been correctly initilized (except for prefix and suffix part)*/
/* @tot_size is offered here because hmu_get_size can not be used till now. @tot_size should not be smaller than OBJ_EXTRA_SIZE.*/
/* For VO, @tot_size should be equal to object total size.*/
void hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size, const char *file_name, int line_no)
/**
* Set default value to prefix and suffix
* @param hmu should not be NULL and should have been correctly initilized
* (except prefix and suffix part)
* @param tot_size is offered here because hmu_get_size can not be used till now.
* tot_size should not be smaller than OBJ_EXTRA_SIZE.
* For VO, tot_size should be equal to object total size.
*/
void
hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
const char *file_name, int line_no)
{
gc_object_prefix_t *prefix = NULL;
gc_object_suffix_t *suffix = NULL;
@ -28,17 +34,18 @@ void hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size, const char *file
prefix->file_name = file_name;
prefix->line_no = line_no;
prefix->size = tot_size;
for(i = 0;i < GC_OBJECT_PREFIX_PADDING_CNT;i++)
{
for(i = 0;i < GC_OBJECT_PREFIX_PADDING_CNT;i++) {
prefix->padding[i] = GC_OBJECT_PADDING_VALUE;
}
for(i = 0;i < GC_OBJECT_SUFFIX_PADDING_CNT;i++)
{
for(i = 0;i < GC_OBJECT_SUFFIX_PADDING_CNT;i++) {
suffix->padding[i] = GC_OBJECT_PADDING_VALUE;
}
}
void hmu_verify(hmu_t *hmu)
void
hmu_verify(hmu_t *hmu)
{
gc_object_prefix_t *prefix = NULL;
gc_object_suffix_t *suffix = NULL;
@ -83,5 +90,6 @@ void hmu_verify(hmu_t *hmu)
bh_assert(is_padding_ok);
}
}
#endif
#endif /* end of BH_ENABLE_GC_VERIFY */

View File

@ -5,37 +5,8 @@
#include "ems_gc_internal.h"
#define HEAP_INC_FACTOR 1
/* Check if current platform is compatible with current GC design*/
/* Return GC_ERROR if not;*/
/* Return GC_SUCCESS otherwise.*/
int gci_check_platform()
{
#define CHECK(x, y) do { \
if((x) != (y)) { \
os_printf("Platform checking failed on LINE %d at FILE %s.",\
__LINE__, __FILE__); \
return GC_ERROR; \
} \
} while(0)
CHECK(8, sizeof(gc_int64));
CHECK(4, sizeof(gc_uint32));
CHECK(4, sizeof(gc_int32));
CHECK(2, sizeof(gc_uint16));
CHECK(2, sizeof(gc_int16));
CHECK(1, sizeof(gc_int8));
CHECK(1, sizeof(gc_uint8));
CHECK(4, sizeof(gc_size_t));
/*CHECK(4, sizeof(void *));*/
return GC_SUCCESS;
}
gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size)
gc_handle_t
gc_init_with_pool(char *buf, gc_size_t buf_size)
{
char *buf_end = buf + buf_size;
char *buf_aligned = (char*) (((uintptr_t) buf + 7) & (uintptr_t)~7);
@ -46,14 +17,8 @@ gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size)
hmu_tree_node_t *root = NULL, *q = NULL;
int i = 0, ret;
/* check system compatibility*/
if (gci_check_platform() == GC_ERROR) {
os_printf("Check platform compatibility failed");
return NULL;
}
if (buf_size < 1024) {
os_printf("[GC_ERROR]heap_init_size(%d) < 1024", buf_size);
os_printf("[GC_ERROR]heap_init_size(%d) < 1024\n", buf_size);
return NULL;
}
@ -63,30 +28,20 @@ gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size)
memset(heap, 0, sizeof *heap);
memset(base_addr, 0, heap_max_size);
ret = gct_vm_mutex_init(&heap->lock);
ret = os_mutex_init(&heap->lock);
if (ret != BHT_OK) {
os_printf("[GC_ERROR]failed to init lock ");
os_printf("[GC_ERROR]failed to init lock\n");
return NULL;
}
#ifdef BH_FOOTPRINT
os_printf("\nINIT HEAP 0x%08x %d\n", base_addr, heap_max_size);
#endif
/* init all data structures*/
heap->max_size = heap_max_size;
heap->current_size = heap_max_size;
heap->base_addr = (gc_uint8*) base_addr;
heap->heap_id = (gc_handle_t) heap;
heap->base_addr = (gc_uint8*)base_addr;
heap->heap_id = (gc_handle_t)heap;
#if GC_STAT_DATA != 0
heap->total_free_size = heap->current_size;
heap->highmark_size = 0;
heap->total_gc_count = 0;
heap->total_gc_time = 0;
heap->gc_threshold_factor = GC_DEFAULT_THRESHOLD_FACTOR;
gc_update_threshold(heap);
#endif
for (i = 0; i < HMU_NORMAL_NODE_CNT; i++) {
/* make normal node look like a FC*/
@ -112,30 +67,29 @@ gc_handle_t gc_init_with_pool(char *buf, gc_size_t buf_size)
q->parent = root;
q->size = heap->current_size;
bh_assert(
root->size <= HMU_FC_NORMAL_MAX_SIZE
&& HMU_FC_NORMAL_MAX_SIZE < q->size); /*@NOTIFY*/
bh_assert(root->size <= HMU_FC_NORMAL_MAX_SIZE
&& HMU_FC_NORMAL_MAX_SIZE < q->size);
#if BH_ENABLE_MEMORY_PROFILING != 0
os_printf("heap is successfully initialized with max_size=%u.",
os_printf("heap is successfully initialized with max_size=%u.\n",
heap_max_size);
#endif
return heap;
}
int gc_destroy_with_pool(gc_handle_t handle)
int
gc_destroy_with_pool(gc_handle_t handle)
{
gc_heap_t *heap = (gc_heap_t *) handle;
gct_vm_mutex_destroy(&heap->lock);
os_mutex_destroy(&heap->lock);
memset(heap->base_addr, 0, heap->max_size);
memset(heap, 0, sizeof(gc_heap_t));
return GC_SUCCESS;
}
#if defined(GC_VERIFY)
/* Verify heap integrity*/
/* @heap should not be NULL and it should be a valid heap*/
void gci_verify_heap(gc_heap_t *heap)
#if BH_ENABLE_GC_VERIFY != 0
void
gci_verify_heap(gc_heap_t *heap)
{
hmu_t *cur = NULL, *end = NULL;
@ -151,9 +105,9 @@ void gci_verify_heap(gc_heap_t *heap)
}
#endif
void* gc_heap_stats(void *heap_arg, uint32* stats, int size, gc_mm_t mmt)
void *
gc_heap_stats(void *heap_arg, uint32* stats, int size)
{
(void) mmt;
int i;
gc_heap_t *heap = (gc_heap_t *) heap_arg;
@ -168,15 +122,10 @@ void* gc_heap_stats(void *heap_arg, uint32* stats, int size, gc_mm_t mmt)
case GC_STAT_HIGHMARK:
stats[i] = heap->highmark_size;
break;
case GC_STAT_COUNT:
stats[i] = heap->total_gc_count;
break;
case GC_STAT_TIME:
stats[i] = (uint32)heap->total_gc_time;
break;
default:
break;
}
}
return heap;
}

View File

@ -22,19 +22,19 @@ void mem_allocator_destroy(mem_allocator_t allocator)
void *
mem_allocator_malloc(mem_allocator_t allocator, uint32_t size)
{
return gc_alloc_vo_h((gc_handle_t) allocator, size);
return gc_alloc_vo((gc_handle_t) allocator, size);
}
void *
mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size)
{
return gc_realloc_vo_h((gc_handle_t) allocator, ptr, size);
return gc_realloc_vo((gc_handle_t) allocator, ptr, size);
}
void mem_allocator_free(mem_allocator_t allocator, void *ptr)
{
if (ptr)
gc_free_h((gc_handle_t) allocator, ptr);
gc_free_vo((gc_handle_t) allocator, ptr);
}
#else /* else of DEFAULT_MEM_ALLOCATOR */

View File

@ -3,14 +3,14 @@ WAMR porting guide
=========================
This document describes how to port WAMR to a new platform "**super-os**"
This document describes how to port WAMR to a new platform "**new-os**"
# Step 1: Implement platform API layer
-------------------------
Firstly create the folder **`core/shared/platform/super-os`** for platform API layer implementations. In the folder you just created, you must provide the following files:
Firstly create the folder **`core/shared/platform/new-os`** for platform API layer implementations. In the folder you just created, you must provide the following files:
- `platform_internal.h`: It can be used for any platform specific definitions such as macros, data types and internal APIs.
@ -42,11 +42,11 @@ Some platforms such as ZephyrOS don't provide math functions e.g. sqrt, fabs and
# Step 2: Create the mini product for the platform
-------------------------
You can build a mini WAMR product which is only the vmcore for you platform. Normally you need to implement the main function which loads a WASM file and run it with the WASM runtime. You don't have to do this step if there is no such need for your platform.
You can build a mini WAMR product which is only the vmcore for you platform. Normally you need to implement the main function which loads a WASM file and run it with the WASM runtime. You don't have to do this step if there is no mini-product need for your platform porting.
Firstly create folder **product-mini/platforms/super-os** for the platform mini product build, then refer to the linux platform mini-product for creating the CMakeList.txt and the C implementations.
Firstly create folder **product-mini/platforms/new-os** for the platform mini product build, then refer to the linux platform mini-product for creating the CMakeList.txt and the C implementations.