Refactor ems memory allocator (#1077)

Restore some codes modified by last patch and fix some issues
This commit is contained in:
Wenyong Huang 2022-04-11 17:43:13 +08:00 committed by GitHub
parent 68cdf30476
commit e2be9bbf20
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1040 additions and 1070 deletions

View File

@ -246,12 +246,10 @@ else ()
message (" Reference types disabled")
endif ()
if (WAMR_BUILD_GC EQUAL 1)
add_definitions (-DWASM_ENABLE_GC=1)
if (WAMR_TEST_GC EQUAL 1)
add_definitions (-DWASM_ENABLE_GC=1 -DWASM_GC_MANUALLY=1 -DGC_IN_EVERY_ALLOCATION=1)
message(" GC testing enabled")
endif()
message (" GC enabled")
if (WAMR_TEST_GC EQUAL 1)
message(" GC testing enabled")
endif()
endif ()
if (DEFINED WAMR_BH_VPRINTF)
add_definitions (-DBH_VPRINTF=${WAMR_BH_VPRINTF})

View File

@ -3,13 +3,10 @@
set (IWASM_GC_DIR ${CMAKE_CURRENT_LIST_DIR})
if (WAMR_BUILD_GC EQUAL 1)
add_definitions (-DWASM_ENABLE_GC=1)
if (WAMR_TEST_GC EQUAL 1)
add_definitions (-DWASM_ENABLE_GC=1 -DWASM_GC_MANUALLY=1 -DGC_IN_EVERY_ALLOCATION=1)
message(" GC testing enabled")
endif()
message (" GC enabled")
add_definitions (-DWASM_ENABLE_GC=1)
if (WAMR_TEST_GC EQUAL 1)
add_definitions (-DGC_MANUALLY=1 -DGC_IN_EVERY_ALLOCATION=1)
endif ()
include_directories (${IWASM_GC_DIR})

View File

@ -169,39 +169,27 @@ wasm_runtime_free(void *ptr)
}
#if WASM_ENABLE_GC != 0
static inline void *
wasm_gc_malloc_internal(void *heap_handle, unsigned int size)
{
return mem_allocator_malloc_with_gc(heap_handle, size);
}
void *
wasm_gc_malloc(void *heap_handle, unsigned int size)
wasm_runtime_gc_malloc(void *heap_handle, unsigned int size)
{
if (size == 0) {
LOG_WARNING("warning: wasm_runtime_malloc with size zero\n");
LOG_WARNING("warning: wasm_runtime_gc_malloc with size zero\n");
/* At lease alloc 1 byte to avoid malloc failed */
size = 1;
}
return wasm_gc_malloc_internal(heap_handle, size);
return mem_allocator_malloc_with_gc((mem_allocator_t)heap_handle, size);
}
#if WASM_GC_MANUALLY != 0
static inline void
wasm_gc_free_internal(void *ptr)
void
wasm_runtime_gc_free(void *heap_handle, void *ptr)
{
if (!ptr) {
LOG_WARNING("warning: wasm_gc_free with NULL pointer\n");
LOG_WARNING("warning: wasm_runtime_gc_free with NULL pointer\n");
return;
}
mem_allocator_free_with_gc(pool_allocator, ptr);
mem_allocator_free_with_gc((mem_allocator_t)heap_handle, ptr);
}
void
wasm_gc_free(void *ptr)
{
wasm_runtime_free_internal(ptr);
}
#endif // WASM_GC_MANUALLY
#endif // WASM_ENABLE_GC
#endif /* end of WASM_GC_MANUALLY != 0 */
#endif /* end of WASM_ENABLE_GC != 0 */

View File

@ -219,28 +219,12 @@ wasm_runtime_malloc(unsigned int size);
WASM_RUNTIME_API_EXTERN void *
wasm_runtime_realloc(void *ptr, unsigned int size);
/*
/**
* Free memory to runtime memory environment.
*/
WASM_RUNTIME_API_EXTERN void
wasm_runtime_free(void *ptr);
/**
* Allocate memory from object heap.
*
* @param size bytes need to allocate
*
* @return the pointer to memory allocated
*/
WASM_RUNTIME_API_EXTERN void *
wasm_gc_malloc(void *heap_handle, unsigned int size);
/*
* Free memory to gc memory environment. (internal test only)
*/
WASM_RUNTIME_API_EXTERN void
wasm_gc_free(void *ptr);
/**
* Get the package type of a buffer.
*

View File

@ -2741,6 +2741,15 @@ wasm_interp_dump_call_stack(struct WASMExecEnv *exec_env)
#endif /* end of WASM_ENABLE_DUMP_CALL_STACK */
#if WASM_ENABLE_GC != 0
bool
wasm_runtime_get_wasm_object_ref_list(WASMObjectRef obj,
bool *p_is_compact_mode,
uint32 *p_ref_num, uint16 **p_ref_list,
uint32 *p_ref_start_offset)
{
return false;
}
bool
wasm_runtime_traverse_gc_rootset(WASMExecEnv *exec_env, void *heap)
{
@ -2759,7 +2768,7 @@ wasm_runtime_gc_prepare(WASMExecEnv *exec_env)
}
void
wasm_runtime_gc_finished(WASMExecEnv *exec_env)
wasm_runtime_gc_finalize(WASMExecEnv *exec_env)
{
#if 0
wasm_thread_resume_all();

File diff suppressed because it is too large Load Diff

View File

@ -3,203 +3,37 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "ems_gc_internal.h"
#include "ems_gc.h"
extern int
_vm_begin_rootset_enumeration(void *heap);
#define GB (1 << 30UL)
#ifdef GC_STAT
void
gc_heap_stat(void *heap_ptr, gc_stat_t *stat)
{
hmu_t *cur = NULL, *end = NULL, *last = NULL;
hmu_type_t ut;
gc_size_t size;
gc_heap_t *heap = (gc_heap_t *)heap_ptr;
#define MARK_NODE_OBJ_CNT 256
memset(stat, 0, sizeof(gc_stat_t));
cur = (hmu_t *)heap->base_addr;
end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
#if WASM_ENABLE_GC != 0
while (cur < end) {
ut = hmu_get_ut(cur);
size = hmu_get_size(cur);
bh_assert(size > 0);
/* mark node is used for gc marker*/
typedef struct mark_node_struct {
/* number of to-expand objects can be saved in this node */
gc_size_t cnt;
if (ut == HMU_FC || ut == HMU_FM
|| (ut == HMU_VO && hmu_is_vo_freed(cur))
|| (ut == HMU_WO && !hmu_is_wo_marked(cur))) {
if (ut == HMU_VO)
stat->vo_free += size;
if (ut == HMU_WO)
stat->wo_free += size;
stat->free += size;
stat->free_block++;
if (size / sizeof(int) < GC_HEAP_STAT_SIZE - 1)
stat->free_sizes[size / sizeof(int)] += 1;
else
stat->free_sizes[GC_HEAP_STAT_SIZE - 1] += 1;
}
else {
if (ut == HMU_VO)
stat->vo_usage += size;
if (ut == HMU_WO)
stat->wo_usage += size;
stat->usage += size;
stat->usage_block++;
if (size / sizeof(int) < GC_HEAP_STAT_SIZE - 1)
stat->usage_sizes[size / sizeof(int)] += 1;
else
stat->usage_sizes[GC_HEAP_STAT_SIZE - 1] += 1;
}
/* the first unused index */
uint32 idx;
cur = (hmu_t *)((char *)cur + size);
}
}
/* next node on the node list */
struct mark_node_struct *next;
void
__gc_print_stat(void *heap_ptr, int verbose)
{
gc_stat_t stat;
int i;
/* the actual to-expand objects list */
gc_object_t set[MARK_NODE_OBJ_CNT];
} mark_node_t;
bh_assert(heap_ptr != NULL);
gc_heap_t *heap = (gc_heap_t *)(heap_ptr);
gc_heap_stat(heap, &stat);
printf("# stat %s %x use %d free %d \n", "instance", heap, stat.usage,
stat.free);
printf("# stat %s %x wo_usage %d vo_usage %d \n", "instance", heap,
stat.wo_usage, stat.vo_usage);
printf("# stat %s %x wo_free %d vo_free %d \n", "instance", heap,
stat.wo_free, stat.vo_free);
printf("# stat gc %d size %d high %d\n", heap->total_gc_count,
heap->total_free_size, heap->highmark_size);
if (verbose) {
printf("usage sizes: \n");
for (i = 0; i < GC_HEAP_STAT_SIZE; i++)
if (stat.usage_sizes[i])
printf(" %d: %d; ", i * 4, stat.usage_sizes[i]);
printf(" \n");
printf("free sizes: \n");
for (i = 0; i < GC_HEAP_STAT_SIZE; i++)
if (stat.free_sizes[i])
printf(" %d: %d; ", i * 4, stat.free_sizes[i]);
}
}
#endif
#if GC_STAT_DATA != 0
void *
gc_heap_stats(void *heap_arg, int *stats, int size, gc_mm_t mmt)
{
int i;
gc_heap_t *heap = (gc_heap_t *)heap_arg;
if (!gci_is_heap_valid(heap)) {
for (i = 0; i < size; i++)
stats[i] = 0;
return NULL;
}
for (i = 0; i < size; i++) {
switch (i) {
case GC_STAT_TOTAL:
stats[i] = heap->current_size;
break;
case GC_STAT_FREE:
stats[i] = heap->total_free_size;
break;
case GC_STAT_HIGHMARK:
stats[i] = heap->highmark_size;
break;
case GC_STAT_COUNT:
stats[i] = heap->total_gc_count;
break;
case GC_STAT_TIME:
stats[i] = (int)heap->total_gc_time;
break;
default:
break;
}
}
return heap;
}
#ifdef STAT_SHOW_GC
void
gc_traverse_tree(hmu_tree_node_t *node, gc_size_t *stats, int *n)
{
if (!node)
return;
if (*n > 0)
gc_traverse_tree(node->right, stats, n);
if (*n > 0) {
(*n)--;
stats[*n] = node->size;
}
if (*n > 0)
gc_traverse_tree(node->left, stats, n);
}
extern void
bh_log_emit(const char *fmt, va_list ap);
static void
gc_log_stat(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
bh_log_emit(fmt, ap);
va_end(ap);
}
void
gc_show_stat(void *heap)
{
int stats[GC_STAT_MAX];
heap = gc_heap_stats(heap, stats, GC_STAT_MAX, MMT_INSTANCE);
gc_log_stat("\n[GC stats %x] %d %d %d %d %d\n", heap, stats[0], stats[1],
stats[2], stats[3], stats[4]);
}
void
gc_show_fragment(void *heap_arg)
{
int stats[3];
int n = 3;
gc_heap_t *heap = (gc_heap_t *)heap_arg;
memset(stats, 0, n * sizeof(int));
gct_vm_mutex_lock(&heap->lock);
gc_traverse_tree(&(heap->kfc_tree_root), (gc_size_t *)stats, &n);
gct_vm_mutex_unlock(&heap->lock);
gc_log_stat("\n[GC %x top sizes] %d %d %d\n", heap, stats[0], stats[1],
stats[2]);
}
#endif
#endif
/* Alloc a mark node from native heap*/
/* Return a valid mark node if successfull*/
/* Return NULL otherwise*/
/**
* Alloc a mark node from the native heap
*
* @return a valid mark node if success, NULL otherwise
*/
static mark_node_t *
alloc_mark_node(void)
{
mark_node_t *ret = (mark_node_t *)malloc(sizeof(mark_node_t));
mark_node_t *ret = (mark_node_t *)BH_MALLOC(sizeof(mark_node_t));
if (!ret) {
LOG_ERROR("alloc a new mark node failed");
@ -211,48 +45,42 @@ alloc_mark_node(void)
return ret;
}
/* Free a mark node*/
/* @node should not be NULL*/
/* @node should be a valid mark node allocated from native heap*/
/* Free a mark node to the native heap
*
* @param node the mark node to free, should not be NULL
*/
static void
free_mark_node(mark_node_t *node)
{
bh_assert(node);
free((gc_object_t)node);
BH_FREE((gc_object_t)node);
}
/* Sweep phase of mark_sweep algorithm*/
static void
wasm_runtime_gc_pre_sweep()
{}
/* @heap should be a valid instance heap which has already been marked*/
/**
* Sweep phase of mark_sweep algorithm
* @param heap the heap to sweep, should be a valid instance heap
* which has already been marked
*/
static void
sweep_instance_heap(gc_heap_t *heap)
{
hmu_t *cur = NULL, *end = NULL, *last = NULL;
hmu_type_t ut;
gc_size_t size;
int i;
int i, lsize;
#if GC_STAT_DATA != 0
gc_size_t tot_free = 0;
#endif
#if WAMR_ENABLE_MEMORY_PROFILING != 0
gc_size_t gc_freed_size = 0;
#endif
bh_assert(gci_is_heap_valid(heap));
cur = (hmu_t *)heap->base_addr;
last = NULL;
end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
/* reset KFC*/
int lsize =
/* reset KFC */
lsize =
(int)(sizeof(heap->kfc_normal_list) / sizeof(heap->kfc_normal_list[0]));
for (i = 0; i < lsize; i++) {
heap->kfc_normal_list[i].next = NULL;
@ -265,20 +93,15 @@ sweep_instance_heap(gc_heap_t *heap)
size = hmu_get_size(cur);
bh_assert(size > 0);
#if WAMR_ENABLE_MEMORY_PROFILING != 0
if (ut == HMU_WO && !hmu_is_wo_marked(cur))
gc_freed_size += size;
#endif
if (ut == HMU_FC || ut == HMU_FM
|| (ut == HMU_VO && hmu_is_vo_freed(cur))
|| (ut == HMU_WO && !hmu_is_wo_marked(cur))) {
/* merge previous free areas with current one*/
/* merge previous free areas with current one */
if (!last)
last = cur;
}
else {
/* current block is still live*/
/* current block is still live */
if (last) {
#if GC_STAT_DATA != 0
tot_free += (char *)cur - (char *)last;
@ -289,7 +112,7 @@ sweep_instance_heap(gc_heap_t *heap)
}
if (ut == HMU_WO) {
/* unmark it*/
/* unmark it */
hmu_unmark_wo(cur);
}
}
@ -315,19 +138,17 @@ sweep_instance_heap(gc_heap_t *heap)
gc_update_threshold(heap);
#endif
#if WAMR_ENABLE_MEMORY_PROFILING != 0
LOG_PROFILE_HEAP_GC((unsigned)heap, gc_freed_size);
#endif
}
/* Add to-expand node to the to-expand list*/
/* @heap should be a valid instance heap*/
/* @obj should be a valid wo inside @heap*/
/* GC_ERROR will be returned if no more resource for marking*/
/* GC_SUCCESS will be returned otherwise*/
/**
* Add a to-expand node to the to-expand list
*
* @param heap should be a valid instance heap
* @param obj should be a valid wo inside @heap
*
* @return GC_ERROR if there is no more resource for marking,
* GC_SUCCESS if success
*/
static int
add_wo_to_expand(gc_heap_t *heap, gc_object_t obj)
{
@ -402,9 +223,11 @@ gc_add_root(void *heap_p, gc_object_t obj)
return GC_SUCCESS;
}
/* Unmark all marked objects to do rollback*/
/* @heap should be a valid instance heap*/
/**
* Unmark all marked objects to do rollback
*
* @param heap the heap to do rollback, should be a valid instance heap
*/
static void
rollback_mark(gc_heap_t *heap)
{
@ -444,17 +267,19 @@ rollback_mark(gc_heap_t *heap)
bh_assert(cur == end);
}
/* GC instance heap*/
/* @heap should be a valid instance heap*/
/* GC_SUCCESS will be returned if everything goes well.*/
/* GC_ERROR will be returned otherwise.*/
/**
* Reclaim GC instance heap
*
* @param heap the heap to reclaim, should be a valid instance heap
*
* @return GC_SUCCESS if success, GC_ERROR otherwise
*/
static int
reclaim_instance_heap(gc_heap_t *heap)
{
mark_node_t *mark_node = NULL;
int idx = 0, ret = GC_ERROR, j = 0, is_compact_mode = GC_FALSE;
int idx = 0, ret = GC_ERROR, j = 0;
bool is_compact_mode = false;
gc_object_t obj = NULL, ref = NULL;
hmu_t *hmu = NULL;
gc_uint32 ref_num = 0, ref_start_offset = 0, size = 0, offset = 0;
@ -467,16 +292,16 @@ reclaim_instance_heap(gc_heap_t *heap)
if (ret != GC_SUCCESS)
return ret;
#if BH_GC_VERIFY != 0
#if BH_ENABLE_GC_VERIFY != 0
/* no matter whether the enumeration is successful or not, the data
* collected should be checked at first*/
collected should be checked at first */
mark_node = (mark_node_t *)heap->root_set;
while (mark_node) {
/* all nodes except first should be full filled*/
/* all nodes except first should be full filled */
bh_assert(mark_node == (mark_node_t *)heap->root_set
|| mark_node->idx == mark_node->cnt);
/* all nodes should be non-empty*/
/* all nodes should be non-empty */
bh_assert(mark_node->idx > 0);
for (idx = 0; idx < mark_node->idx; idx++) {
@ -491,8 +316,9 @@ reclaim_instance_heap(gc_heap_t *heap)
mark_node = mark_node->next;
}
#endif
/* TODO: when fast marking failed, we can still do slow
marking. Currently just simply roll it back. */
marking, currently just simply roll it back. */
if (heap->is_fast_marking_failed) {
LOG_ERROR("enumerate rootset failed");
LOG_ERROR("all marked wos will be unmarked to keep heap consistency");
@ -502,25 +328,25 @@ reclaim_instance_heap(gc_heap_t *heap)
return GC_ERROR;
}
/* the algorithm we use to mark all objects*/
/* the algorithm we use to mark all objects */
/* 1. mark rootset and organize them into a mark_node list (last marked
* roots at list header, i.e. stack top)*/
* roots at list header, i.e. stack top) */
/* 2. in every iteration, we use the top node to expand*/
/* 3. execute step 2 till no expanding*/
/* this is a BFS & DFS mixed algorithm, but more like DFS*/
/* 3. execute step 2 till no expanding */
/* this is a BFS & DFS mixed algorithm, but more like DFS */
mark_node = (mark_node_t *)heap->root_set;
while (mark_node) {
heap->root_set = mark_node->next;
/* note that mark_node->idx may change in each loop*/
/* note that mark_node->idx may change in each loop */
for (idx = 0; idx < (int)mark_node->idx; idx++) {
obj = mark_node->set[idx];
hmu = obj_to_hmu(obj);
size = hmu_get_size(hmu);
if (gct_vm_get_wasm_object_ref_list(obj, &is_compact_mode, &ref_num,
&ref_list, &ref_start_offset)
== GC_ERROR) {
if (!gct_vm_get_wasm_object_ref_list(obj, &is_compact_mode,
&ref_num, &ref_list,
&ref_start_offset)) {
LOG_ERROR("mark process failed because failed "
"vm_get_wasm_object_ref_list");
break;
@ -534,10 +360,10 @@ reclaim_instance_heap(gc_heap_t *heap)
if (is_compact_mode) {
for (j = 0; j < (int)ref_num; j++) {
offset = ref_start_offset + j * 4;
bh_assert(offset >= 0 && offset + 4 < size);
bh_assert(offset + 4 < size);
ref = *(gc_object_t *)(((gc_uint8 *)obj) + offset);
if (ref == NULL_REF)
continue; /* NULL REF*/
continue; /* NULL REF */
if (add_wo_to_expand(heap, ref) == GC_ERROR) {
LOG_ERROR("add_wo_to_expand failed");
break;
@ -553,7 +379,7 @@ reclaim_instance_heap(gc_heap_t *heap)
ref = *(gc_object_t *)(((gc_uint8 *)obj) + offset);
if (ref == NULL_REF)
continue; /* NULL REF*/
continue; /* NULL REF */
if (add_wo_to_expand(heap, ref) == GC_ERROR) {
LOG_ERROR("mark process failed");
break;
@ -564,9 +390,9 @@ reclaim_instance_heap(gc_heap_t *heap)
}
}
if (idx < (int)mark_node->idx)
break; /* not yet done*/
break; /* not yet done */
/* obj's in mark_node are all expanded*/
/* obj's in mark_node are all expanded */
free_mark_node(mark_node);
mark_node = heap->root_set;
}
@ -575,16 +401,13 @@ reclaim_instance_heap(gc_heap_t *heap)
LOG_ERROR("mark process is not successfully finished");
free_mark_node(mark_node);
/* roll back is required*/
/* roll back is required */
rollback_mark(heap);
return GC_ERROR;
}
/* mark finished*/
wasm_runtime_gc_pre_sweep();
/* now sweep*/
/* now sweep */
sweep_instance_heap(heap);
(void)size;
@ -592,12 +415,13 @@ reclaim_instance_heap(gc_heap_t *heap)
return GC_SUCCESS;
}
/* Do GC on given heap*/
/* @heap should not be NULL and it should be a valid heap*/
/* GC_ERROR returned for failure*/
/* GC_SUCCESS otherwise*/
/**
* Do GC on given heap
*
* @param the heap to do GC, should be a valid heap
*
* @return GC_SUCCESS if success, GC_ERROR otherwise
*/
int
gci_gc_heap(void *h)
{
@ -606,22 +430,31 @@ gci_gc_heap(void *h)
bh_assert(gci_is_heap_valid(heap));
LOG_VERBOSE("#reclaim instance heap %x", heap);
// gc_print_stat(heap, 0);
if (!heap->is_reclaim_enabled)
/* Ignore if GC reclaim isn't enabled */
return GC_SUCCESS;
LOG_VERBOSE("#reclaim instance heap %p", heap);
gct_vm_gc_prepare();
gct_vm_mutex_lock(&heap->lock);
heap->is_doing_reclaim = 1;
ret = reclaim_instance_heap(heap);
heap->is_doing_reclaim = 0;
gct_vm_mutex_unlock(&heap->lock);
gct_vm_gc_finished();
LOG_VERBOSE("#reclaim instance heap %x done", heap);
#if BH_ENABLE_GC_VERIFY
gct_vm_gc_finished();
LOG_VERBOSE("#reclaim instance heap %p done", heap);
#if BH_ENABLE_GC_VERIFY != 0
gci_verify_heap(heap);
#endif
#ifdef STAT_SHOW_GC
#if GC_STAT_SHOW != 0
gc_show_stat(heap);
gc_show_fragment(heap);
#endif
@ -635,40 +468,19 @@ gc_is_dead_object(void *obj)
return !hmu_is_wo_marked(obj_to_hmu(obj));
}
#ifdef GC_TEST
gc_size_t (*gct_vm_get_wasm_object_size)(gc_object_t obj);
int (*gct_vm_get_wasm_object_ref_list)(
gc_object_t obj,
int *is_compact_mode, /* can be set to GC_TRUE, or GC_FALSE */
gc_size_t *ref_num, gc_uint16 **ref_list, gc_uint32 *ref_start_offset);
int (*gct_vm_mutex_init)(korp_mutex *mutex);
int (*gct_vm_mutex_destroy)(korp_mutex *mutex);
int (*gct_vm_mutex_lock)(korp_mutex *mutex);
int (*gct_vm_mutex_unlock)(korp_mutex *mutex);
int (*gct_vm_begin_rootset_enumeration)();
int (*gct_vm_gc_prepare)();
int (*gct_vm_gc_finished)();
#else
int
vm_begin_rootset_enumeration(void *heap)
{
os_printf("Error: Unimplemented vm_begin_rootset_enumeration function!\n");
return GC_ERROR;
}
#else
int
vm_get_wasm_object_ref_list(
gc_object_t obj,
int *is_compact_mode, /* can be set to GC_TRUE, or GC_FALSE */
gc_size_t *ref_num, gc_uint16 **ref_list, gc_uint32 *ref_start_offset)
gci_gc_heap(void *h)
{
os_printf(
"Error: Unimplemented gct_vm_get_wasm_object_ref_list function!\n");
(void)h;
return GC_ERROR;
}
void
wasm_runtime_gc_prepare(){};
void
wasm_runtime_gc_finished(){};
#endif
#endif /* end of WASM_ENABLE_GC != 0 */

View File

@ -1,7 +1,15 @@
/*
* Copyright (C) 2022 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/**
* @file ems_gc.h
* @date Wed Aug 3 10:46:38 2011
*
* @brief This file defines GC modules types and interfaces.
*/
#ifndef _EMS_GC_H
#define _EMS_GC_H
@ -11,6 +19,22 @@
extern "C" {
#endif
#ifndef GC_STAT_DATA
#define GC_STAT_DATA 0
#endif
#ifndef GC_STAT_SHOW
#define GC_STAT_SHOW 0
#endif
#ifndef GC_IN_EVERY_ALLOCATION
#define GC_IN_EVERY_ALLOCATION 0
#endif
#ifndef GC_MANUALLY
#define GC_MANUALLY 0
#endif
#define GC_HEAD_PADDING 4
#define NULL_REF ((gc_object_t)NULL)
@ -25,6 +49,7 @@ extern "C" {
typedef void *gc_handle_t;
typedef void *gc_object_t;
typedef uint64 gc_uint64;
typedef int64 gc_int64;
typedef uint32 gc_uint32;
typedef int32 gc_int32;
@ -38,36 +63,11 @@ typedef enum {
GC_STAT_TOTAL = 0,
GC_STAT_FREE,
GC_STAT_HIGHMARK,
GC_STAT_COUNT,
GC_STAT_TIME,
GC_STAT_MAX
} GC_STAT_INDEX;
int
gci_gc_heap(void *h);
/**
* Root set enumeration.
* TODO: This need to be implemented in the ems_gc.c when the heap layout and
* wasm reference is determined.
*
*/
int
vm_begin_rootset_enumeration(void *heap);
/**
* Reference iteration
* TODO: This function need to be implemented in the ems_gc.c when wasm object
* layout is determined.
*/
int
vm_get_wasm_object_ref_list(
gc_object_t obj,
int *is_compact_mode, /* can be set to GC_TRUE, or GC_FALSE */
gc_size_t *ref_num, gc_uint16 **ref_list, gc_uint32 *ref_start_offset);
void
wasm_runtime_gc_prepare();
void
wasm_runtime_gc_finished();
/**
* GC initialization from a buffer, which is separated into
* two parts: the beginning of the buffer is used to create
@ -107,6 +107,17 @@ gc_init_with_struct_and_pool(char *struct_buf, gc_size_t struct_buf_size,
int
gc_destroy_with_pool(gc_handle_t handle);
#if WASM_ENABLE_GC != 0
/**
* Enable or disable GC reclaim for a heap
*
* @param handle handle of the heap
* @param enabled enable the GC reclaim or not, true/false to enable/disable
*/
void
gc_enable_heap_reclaim(gc_handle_t handle, bool enabled);
#endif
/**
* Return heap struct size
*/
@ -156,12 +167,14 @@ gc_realloc_vo(void *heap, void *ptr, gc_size_t size);
int
gc_free_vo(void *heap, gc_object_t obj);
void
gc_free_wo(void *vheap, void *ptr);
#if WASM_ENABLE_GC != 0
gc_object_t
gc_alloc_wo(void *heap, gc_size_t size);
void
gc_free_wo(void *vheap, void *ptr);
#endif
#else /* else of BH_ENABLE_GC_VERIFY */
gc_object_t
@ -174,6 +187,14 @@ gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size, const char *file,
int
gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line);
#if WASM_ENABLE_GC != 0
gc_object_t
gc_alloc_wo(void *heap, gc_size_t size);
void
gc_free_wo(void *vheap, void *ptr);
#endif
/* clang-format off */
#define gc_alloc_vo(heap, size) \
gc_alloc_vo_internal(heap, size, __FILE__, __LINE__)
@ -183,16 +204,70 @@ gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line);
#define gc_free_vo(heap, obj) \
gc_free_vo_internal(heap, obj, __FILE__, __LINE__)
/* clang-format on */
void
gc_free_wo(void *vheap, void *ptr);
#if WASM_ENABLE_GC != 0
#define gc_alloc_wo(heap, size) \
gc_alloc_wo_internal(heap, size, __FILE__, __LINE__)
#define gc_free_wo(heap, obj) \
gc_free_wo_internal(heap, obj, __FILE__, __LINE__)
#endif
/* clang-format on */
#endif /* end of BH_ENABLE_GC_VERIFY */
#if WASM_ENABLE_GC != 0
int
gci_gc_heap(void *heap);
/**
* Root set enumeration.
* TODO: This need to be implemented in the ems_gc.c when the heap layout and
* wasm reference is determined.
*/
int
vm_begin_rootset_enumeration(void *heap);
/**
* Reference iteration
* TODO: This function need to be implemented in the ems_gc.c when wasm object
* layout is determined.
*/
int
vm_get_wasm_object_ref_list(gc_object_t obj, bool *p_is_compact_mode,
gc_size_t *ref_num, gc_uint16 **ref_list,
gc_uint32 *ref_start_offset);
void
wasm_runtime_gc_prepare();
void
wasm_runtime_gc_finalize();
#endif /* end of WASM_ENABLE_GC != 0 */
#define GC_HEAP_STAT_SIZE (128 / 4)
typedef struct {
int usage;
int usage_block;
int vo_usage;
int wo_usage;
int free;
int free_block;
int vo_free;
int wo_free;
int usage_sizes[GC_HEAP_STAT_SIZE];
int free_sizes[GC_HEAP_STAT_SIZE];
} gc_stat_t;
void
gc_show_stat(gc_handle_t handle);
#if WASM_ENABLE_GC != 0
void
gc_show_fragment(gc_handle_t handle);
#endif
#ifdef __cplusplus
}
#endif

View File

@ -1,5 +1,5 @@
/*
* Copyright (C) 2022 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
@ -10,55 +10,62 @@
extern "C" {
#endif
#include "bh_platform.h"
#include "ems_gc.h"
/* basic block managed by EMS gc is the so-called HMU (heap memory unit)*/
typedef enum _hmu_type_enum {
/* HMU (heap memory unit) basic block type */
typedef enum hmu_type_enum {
HMU_TYPE_MIN = 0,
HMU_TYPE_MAX = 3,
HMU_WO = 3,
HMU_VO = 2,
HMU_WO = 3, /* WASM Object */
HMU_VO = 2, /* VM Object */
HMU_FC = 1,
HMU_FM = 0
} hmu_type_t;
typedef struct _hmu_struct {
typedef struct hmu_struct {
gc_uint32 header;
} hmu_t;
#if BH_ENABLE_GC_VERIFY != 0
#if UINTPTR_MAX > UINT32_MAX
/* 2 prefix paddings for 64-bit pointer */
#define GC_OBJECT_PREFIX_PADDING_CNT 2
#else
/* 3 prefix paddings for 32-bit pointer */
#define GC_OBJECT_PREFIX_PADDING_CNT 3
#endif
#define GC_OBJECT_SUFFIX_PADDING_CNT 4
#define GC_OBJECT_PADDING_VALUE (0x12345678)
typedef struct _gc_object_prefix {
typedef struct gc_object_prefix {
const char *file_name;
gc_int32 line_no;
gc_int32 size;
gc_uint32 padding[GC_OBJECT_PREFIX_PADDING_CNT];
} gc_object_prefix_t;
#define OBJ_PREFIX_SIZE (sizeof(gc_object_prefix_t))
typedef struct _gc_object_suffix {
typedef struct gc_object_suffix {
gc_uint32 padding[GC_OBJECT_SUFFIX_PADDING_CNT];
} gc_object_suffix_t;
#define OBJ_PREFIX_SIZE (sizeof(gc_object_prefix_t))
#define OBJ_SUFFIX_SIZE (sizeof(gc_object_suffix_t))
extern void
void
hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
const char *file_name, int line_no);
extern void
hmu_verify(hmu_t *hmu);
void
hmu_verify(void *vheap, hmu_t *hmu);
#define SKIP_OBJ_PREFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_PREFIX_SIZE))
#define SKIP_OBJ_SUFFIX(p) ((void *)((gc_uint8 *)(p) + OBJ_SUFFIX_SIZE))
#define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
#else
#else /* else of BH_ENABLE_GC_VERIFY */
#define OBJ_PREFIX_SIZE 0
#define OBJ_SUFFIX_SIZE 0
@ -68,11 +75,11 @@ hmu_verify(hmu_t *hmu);
#define OBJ_EXTRA_SIZE (HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE)
#endif /* GC_DEBUG*/
#endif /* end of BH_ENABLE_GC_VERIFY */
#define hmu_obj_size(s) ((s)-OBJ_EXTRA_SIZE)
#define GC_ALIGN_8(s) (((int)(s) + 7) & ~7)
#define GC_ALIGN_8(s) (((uint32)(s) + 7) & (uint32)~7)
#define GC_SMALLEST_SIZE \
GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + 8)
@ -80,22 +87,29 @@ hmu_verify(hmu_t *hmu);
GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE \
+ (((x) > 8) ? (x) : 8))
/*////// functions for bit operation*/
/**
* hmu bit operation
*/
#define SETBIT(v, offset) (v) |= (1 << (offset))
#define GETBIT(v, offset) ((v) & (1 << (offset)) ? 1 : 0)
#define CLRBIT(v, offset) (v) &= ~(1 << (offset))
#define SETBIT(v, offset) (v) |= ((uint32)1 << (offset))
#define GETBIT(v, offset) ((v) & ((uint32)1 << (offset)) ? 1 : 0)
#define CLRBIT(v, offset) (v) &= (~((uint32)1 << (offset)))
#define SETBITS(v, offset, size, value) \
do { \
(v) &= ~(((1 << size) - 1) << offset); \
(v) |= value << offset; \
/* clang-format off */
#define SETBITS(v, offset, size, value) \
do { \
(v) &= ~((((uint32)1 << size) - 1) << offset); \
(v) |= ((uint32)value << offset); \
} while (0)
#define CLRBITS(v, offset, size) (v) &= ~(((1 << size) - 1) << offset)
#define CLRBITS(v, offset, size) \
(v) &= ~((((uint32)1 << size) - 1) << offset)
#define GETBITS(v, offset, size) \
(((v) & (((1 << size) - 1) << offset)) >> offset)
(((v) & (((((uint32)1 << size) - 1) << offset))) >> offset)
/* clang-format on */
/*////// gc object layout definition*/
/**
* gc object layout definition
*/
#define HMU_SIZE (sizeof(hmu_t))
@ -105,10 +119,14 @@ hmu_verify(hmu_t *hmu);
#define HMU_UT_SIZE 2
#define HMU_UT_OFFSET 30
#define hmu_get_ut(hmu) GETBITS((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE)
/* clang-format off */
#define hmu_get_ut(hmu) \
GETBITS((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE)
#define hmu_set_ut(hmu, type) \
SETBITS((hmu)->header, HMU_UT_OFFSET, HMU_UT_SIZE, type)
#define hmu_is_ut_valid(tp) (tp >= HMU_TYPE_MIN && tp <= HMU_TYPE_MAX)
#define hmu_is_ut_valid(tp) \
(tp >= HMU_TYPE_MIN && tp <= HMU_TYPE_MAX)
/* clang-format on */
/* P in use bit means the previous chunk is in use */
#define HMU_P_OFFSET 29
@ -125,6 +143,12 @@ hmu_verify(hmu_t *hmu);
#define hmu_unmark_wo(hmu) CLRBIT((hmu)->header, HMU_WO_MB_OFFSET)
#define hmu_is_wo_marked(hmu) GETBIT((hmu)->header, HMU_WO_MB_OFFSET)
/**
* The hmu size is divisible by 8, its lowest 3 bits are 0, so we only
* store its higher bits of bit [29..3], and bit [2..0] are not stored.
* After that, the maximal heap size can be enlarged from (1<<27) = 128MB
* to (1<<27) * 8 = 1GB.
*/
#define HMU_SIZE_SIZE 27
#define HMU_SIZE_OFFSET 0
@ -133,46 +157,76 @@ hmu_verify(hmu_t *hmu);
#define hmu_is_vo_freed(hmu) GETBIT((hmu)->header, HMU_VO_FB_OFFSET)
#define hmu_unfree_vo(hmu) CLRBIT((hmu)->header, HMU_VO_FB_OFFSET)
#define hmu_get_size(hmu) GETBITS((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE)
#define hmu_get_size(hmu) \
(GETBITS((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE) << 3)
#define hmu_set_size(hmu, size) \
SETBITS((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE, size)
SETBITS((hmu)->header, HMU_SIZE_OFFSET, HMU_SIZE_SIZE, ((size) >> 3))
/*////// HMU free chunk management*/
/**
* HMU free chunk management
*/
#ifndef HMU_NORMAL_NODE_CNT
#define HMU_NORMAL_NODE_CNT 32
#endif
#define HMU_FC_NORMAL_MAX_SIZE ((HMU_NORMAL_NODE_CNT - 1) << 3)
#define HMU_IS_FC_NORMAL(size) ((size) < HMU_FC_NORMAL_MAX_SIZE)
#if HMU_FC_NORMAL_MAX_SIZE >= GC_MAX_HEAP_SIZE
#error "Too small GC_MAX_HEAP_SIZE"
#endif
typedef struct _hmu_normal_node {
typedef struct hmu_normal_node {
hmu_t hmu_header;
struct _hmu_normal_node *next;
gc_int32 next_offset;
} hmu_normal_node_t;
typedef struct _hmu_tree_node {
typedef struct hmu_normal_list {
hmu_normal_node_t *next;
} hmu_normal_list_t;
static inline hmu_normal_node_t *
get_hmu_normal_node_next(hmu_normal_node_t *node)
{
return node->next_offset
? (hmu_normal_node_t *)((uint8 *)node + node->next_offset)
: NULL;
}
static inline void
set_hmu_normal_node_next(hmu_normal_node_t *node, hmu_normal_node_t *next)
{
if (next) {
bh_assert((uint8 *)next - (uint8 *)node < INT32_MAX);
node->next_offset = (gc_int32)(intptr_t)((uint8 *)next - (uint8 *)node);
}
else {
node->next_offset = 0;
}
}
typedef struct hmu_tree_node {
hmu_t hmu_header;
gc_size_t size;
struct _hmu_tree_node *left;
struct _hmu_tree_node *right;
struct _hmu_tree_node *parent;
struct hmu_tree_node *left;
struct hmu_tree_node *right;
struct hmu_tree_node *parent;
} hmu_tree_node_t;
typedef struct _gc_heap_struct {
gc_handle_t heap_id; /* for double checking*/
typedef struct gc_heap_struct {
/* for double checking*/
gc_handle_t heap_id;
gc_uint8 *base_addr;
gc_size_t current_size;
gc_size_t max_size;
korp_mutex lock;
hmu_normal_node_t kfc_normal_list[HMU_NORMAL_NODE_CNT];
hmu_normal_list_t kfc_normal_list[HMU_NORMAL_NODE_CNT];
/* order in kfc_tree is: size[left] <= size[cur] < size[right]*/
hmu_tree_node_t kfc_tree_root;
#if WASM_ENABLE_GC != 0
/* for rootset enumeration of private heap*/
void *root_set;
@ -187,40 +241,33 @@ typedef struct _gc_heap_struct {
/* whether the heap is doing reclaim */
unsigned is_doing_reclaim : 1;
/* Whether the heap can do reclaim */
unsigned is_reclaim_enabled : 1;
#endif
/* whether heap is corrupted, e.g. the hmu nodes are modified
by user */
bool is_heap_corrupted;
#if GC_STAT_DATA != 0
gc_size_t highmark_size;
gc_size_t init_size;
gc_size_t total_gc_count;
gc_size_t highmark_size;
gc_size_t total_free_size;
#if WASM_ENABLE_GC != 0
gc_size_t gc_threshold;
gc_size_t gc_threshold_factor;
gc_int64 total_gc_time;
gc_size_t total_gc_count;
gc_size_t total_gc_time;
#endif
#if GC_STAT_DATA != 0
gc_uint64 total_size_allocated;
gc_uint64 total_size_freed;
#endif
} gc_heap_t;
/*////// MISC internal used APIs*/
#if WASM_ENABLE_GC != 0
extern void
gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size);
extern int
gci_is_heap_valid(gc_heap_t *heap);
#ifdef GC_DEBUG
extern void
gci_verify_heap(gc_heap_t *heap);
extern void
gci_dump(char *buf, gc_heap_t *heap);
#endif
#if GC_STAT_DATA != 0
/* the default GC threshold size is free_size * GC_DEFAULT_THRESHOLD_FACTOR /
* 1000 */
#define GC_DEFAULT_THRESHOLD_FACTOR 400
#define GC_DEFAULT_THRESHOLD_FACTOR 300
static inline void
gc_update_threshold(gc_heap_t *heap)
@ -228,55 +275,48 @@ gc_update_threshold(gc_heap_t *heap)
heap->gc_threshold =
heap->total_free_size * heap->gc_threshold_factor / 1000;
}
#endif
/*////// MISC data structures*/
bool
wasm_runtime_get_wasm_object_ref_list(gc_object_t obj, bool *p_is_compact_mode,
gc_uint32 *p_ref_num,
gc_uint16 **p_ref_list,
gc_uint32 *p_ref_start_offset);
#define MARK_NODE_OBJ_CNT 256
/* mark node is used for gc marker*/
typedef struct _mark_node_struct {
/* number of to-expand objects can be saved in this node*/
gc_size_t cnt;
/* the first unused index*/
uint32 idx;
/* next node on the node list*/
struct _mark_node_struct *next;
/* the actual to-expand objects list*/
gc_object_t set[MARK_NODE_OBJ_CNT];
} mark_node_t;
/*////// Imported APIs wrappers under TEST mode*/
#ifdef GC_TEST
extern int (*gct_vm_get_wasm_object_ref_list)(
gc_object_t obj,
int *is_compact_mode, /* can be set to GC_TRUE, or GC_FALSE */
gc_size_t *ref_num, gc_uint16 **ref_list, gc_uint32 *ref_start_offset);
extern int (*gct_vm_mutex_init)(korp_mutex *mutex);
extern int (*gct_vm_mutex_destroy)(korp_mutex *mutex);
extern int (*gct_vm_mutex_lock)(korp_mutex *mutex);
extern int (*gct_vm_mutex_unlock)(korp_mutex *mutex);
extern gc_handle_t (*gct_vm_get_gc_handle_for_current_instance)(void);
extern int (*gct_vm_begin_rootset_enumeration)(void *heap);
extern int (*gct_vm_gc_prepare)(void);
extern int (*gct_vm_gc_finished)(void);
#else
#define gct_vm_get_wasm_object_ref_list vm_get_wasm_object_ref_list
#define gct_vm_get_wasm_object_ref_list wasm_runtime_get_wasm_object_ref_list
#define gct_vm_mutex_init os_mutex_init
#define gct_vm_mutex_destroy os_mutex_destroy
#define gct_vm_mutex_lock os_mutex_lock
#define gct_vm_mutex_unlock os_mutex_unlock
#define gct_vm_begin_rootset_enumeration vm_begin_rootset_enumeration
#define gct_vm_gc_prepare wasm_runtime_gc_prepare
#define gct_vm_gc_finished wasm_runtime_gc_finished
#endif
#define gct_vm_gc_finished wasm_runtime_gc_finalize
#endif /* end of WAMS_ENABLE_GC != 0 */
/**
* MISC internal used APIs
*/
bool
gci_add_fc(gc_heap_t *heap, hmu_t *hmu, gc_size_t size);
int
gci_is_heap_valid(gc_heap_t *heap);
/**
* Verify heap integrity
*/
void
gci_verify_heap(gc_heap_t *heap);
/**
* Dump heap nodes
*/
void
gci_dump(gc_heap_t *heap);
#ifdef __cplusplus
}
#endif
#endif
#endif /* end of _EMS_GC_INTERNAL_H */

View File

@ -1,18 +1,20 @@
/*
* Copyright (C) 2022 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "ems_gc_internal.h"
#if BH_ENABLE_GC_VERIFY != 0
/* Set default value to prefix and suffix*/
/* @hmu should not be NULL and it should have been correctly initilized (except
* for prefix and suffix part)*/
/* @tot_size is offered here because hmu_get_size can not be used till now.
* @tot_size should not be smaller than OBJ_EXTRA_SIZE.*/
/* For VO, @tot_size should be equal to object total size.*/
/**
* Set default value to prefix and suffix
* @param hmu should not be NULL and should have been correctly initilized
* (except prefix and suffix part)
* @param tot_size is offered here because hmu_get_size can not be used
* till now. tot_size should not be smaller than OBJ_EXTRA_SIZE.
* For VO, tot_size should be equal to object total size.
*/
void
hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
const char *file_name, int line_no)
@ -22,7 +24,7 @@ hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
gc_uint32 i = 0;
bh_assert(hmu);
bh_assert(hmu_get_ut(hmu) == HMU_JO || hmu_get_ut(hmu) == HMU_VO);
bh_assert(hmu_get_ut(hmu) == HMU_WO || hmu_get_ut(hmu) == HMU_VO);
bh_assert(tot_size >= OBJ_EXTRA_SIZE);
bh_assert(!(tot_size & 7));
bh_assert(hmu_get_ut(hmu) != HMU_VO || hmu_get_size(hmu) >= tot_size);
@ -33,17 +35,20 @@ hmu_init_prefix_and_suffix(hmu_t *hmu, gc_size_t tot_size,
prefix->file_name = file_name;
prefix->line_no = line_no;
prefix->size = tot_size;
for (i = 0; i < GC_OBJECT_PREFIX_PADDING_CNT; i++) {
prefix->padding[i] = GC_OBJECT_PADDING_VALUE;
}
for (i = 0; i < GC_OBJECT_SUFFIX_PADDING_CNT; i++) {
suffix->padding[i] = GC_OBJECT_PADDING_VALUE;
}
}
void
hmu_verify(hmu_t *hmu)
hmu_verify(void *vheap, hmu_t *hmu)
{
gc_heap_t *heap = (gc_heap_t *)vheap;
gc_object_prefix_t *prefix = NULL;
gc_object_suffix_t *suffix = NULL;
gc_uint32 i = 0;
@ -59,7 +64,7 @@ hmu_verify(hmu_t *hmu)
size = prefix->size;
suffix = (gc_object_suffix_t *)((gc_uint8 *)hmu + size - OBJ_SUFFIX_SIZE);
if (ut == HMU_VO || ut == HMU_JO) {
if (ut == HMU_VO || ut == HMU_WO) {
/* check padding*/
for (i = 0; i < GC_OBJECT_PREFIX_PADDING_CNT; i++) {
if (prefix->padding[i] != GC_OBJECT_PADDING_VALUE) {
@ -75,11 +80,12 @@ hmu_verify(hmu_t *hmu)
}
if (!is_padding_ok) {
LOG_ERROR("Invalid padding for object created at %s:%d",
os_printf("Invalid padding for object created at %s:%d\n",
(prefix->file_name ? prefix->file_name : ""),
prefix->line_no);
heap->is_heap_corrupted = true;
}
bh_assert(is_padding_ok);
}
}
#endif
#endif /* end of BH_ENABLE_GC_VERIFY */

View File

@ -1,47 +1,10 @@
/*
* Copyright (C) 2022 Intel Corporation. All rights reserved.
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "ems_gc_internal.h"
#define HEAP_INC_FACTOR 1
/* Check if current platform is compatible with current GC design*/
/* Return GC_ERROR if not;*/
/* Return GC_SUCCESS otherwise.*/
int
gci_check_platform()
{
#define CHECK(x, y) \
do { \
if ((x) != (y)) { \
LOG_ERROR("Platform checking failed on LINE %d at FILE %s.", \
__LINE__, __FILE__); \
return GC_ERROR; \
} \
} while (0)
CHECK(8, sizeof(gc_int64));
CHECK(4, sizeof(gc_uint32));
CHECK(4, sizeof(gc_int32));
CHECK(2, sizeof(gc_uint16));
CHECK(2, sizeof(gc_int16));
CHECK(1, sizeof(gc_int8));
CHECK(1, sizeof(gc_uint8));
CHECK(4, sizeof(gc_size_t));
CHECK(4, sizeof(void *));
return GC_SUCCESS;
}
static void
adjust_ptr(uint8 **p_ptr, intptr_t offset)
{
if (*p_ptr)
*p_ptr += offset;
}
static gc_handle_t
gc_init_internal(gc_heap_t *heap, char *base_addr, gc_size_t heap_max_size)
{
@ -62,6 +25,13 @@ gc_init_internal(gc_heap_t *heap, char *base_addr, gc_size_t heap_max_size)
heap->base_addr = (gc_uint8 *)base_addr;
heap->heap_id = (gc_handle_t)heap;
heap->total_free_size = heap->current_size;
heap->highmark_size = 0;
#if WASM_ENABLE_GC != 0
heap->gc_threshold_factor = GC_DEFAULT_THRESHOLD_FACTOR;
gc_update_threshold(heap);
#endif
root = &heap->kfc_tree_root;
memset(root, 0, sizeof *root);
root->size = sizeof *root;
@ -178,12 +148,29 @@ gc_destroy_with_pool(gc_handle_t handle)
return GC_SUCCESS;
}
#if WASM_ENABLE_GC != 0
void
gc_enable_heap_reclaim(gc_handle_t handle, bool enabled)
{
gc_heap_t *heap = (gc_heap_t *)handle;
heap->is_reclaim_enabled = enabled ? 1 : 0;
}
#endif
uint32
gc_get_heap_struct_size()
{
return sizeof(gc_heap_t);
}
static void
adjust_ptr(uint8 **p_ptr, intptr_t offset)
{
if (*p_ptr)
*p_ptr += offset;
}
int
gc_migrate(gc_handle_t handle, char *pool_buf_new, gc_size_t pool_buf_size)
{
@ -278,152 +265,173 @@ gci_verify_heap(gc_heap_t *heap)
}
#endif
#if 0
/* Initialize a heap*/
/* @heap can not be NULL*/
/* @heap_max_size can not exceed GC_MAX_HEAP_SIZE and it should not euqal to or smaller than HMU_FC_NORMAL_MAX_SIZE.*/
/* @heap_max_size will be rounded down to page size at first.*/
/* This function will alloc resource for given heap and initalize all data structures.*/
/* Return GC_ERROR if any errors occur.*/
/* Return GC_SUCCESS otherwise.*/
static int init_heap(gc_heap_t *heap, gc_size_t heap_max_size)
{
void *base_addr = NULL;
hmu_normal_node_t *p = NULL;
hmu_tree_node_t *root = NULL, *q = NULL;
int i = 0;
int ret = 0;
bh_assert(heap);
if(heap_max_size < 1024) {
LOG_ERROR("[GC_ERROR]heap_init_size(%d) < 1024 ", heap_max_size);
return GC_ERROR;
}
memset(heap, 0, sizeof *heap);
ret = gct_vm_mutex_init(&heap->lock);
if (ret != BHT_OK) {
LOG_ERROR("[GC_ERROR]failed to init lock ");
return GC_ERROR;
}
heap_max_size = (heap_max_size + 7) & ~(unsigned int)7;
/* alloc memory for this heap*/
base_addr = os_malloc(heap_max_size + GC_HEAD_PADDING);
if(!base_addr)
{
LOG_ERROR("[GC_ERROR]reserve heap with size(%u) failed", heap_max_size);
(void) gct_vm_mutex_destroy(&heap->lock);
return GC_ERROR;
}
base_addr = (char*) base_addr + GC_HEAD_PADDING;
#ifdef BH_FOOTPRINT
printf("\nINIT HEAP 0x%08x %d\n", base_addr, heap_max_size);
#endif
bh_assert(((int) base_addr & 7) == 4);
/* init all data structures*/
heap->max_size = heap_max_size;
heap->current_size = heap_max_size;
heap->base_addr = (gc_uint8*)base_addr;
heap->heap_id = (gc_handle_t)heap;
#if GC_STAT_DATA != 0
heap->total_free_size = heap->current_size;
heap->highmark_size = 0;
heap->total_gc_count = 0;
heap->total_gc_time = 0;
heap->gc_threshold_factor = GC_DEFAULT_THRESHOLD_FACTOR;
gc_update_threshold(heap);
#endif
for(i = 0; i < HMU_NORMAL_NODE_CNT;i++)
{
/* make normal node look like a FC*/
p = &heap->kfc_normal_list[i];
memset(p, 0, sizeof *p);
hmu_set_ut(&p->hmu_header, HMU_FC);
hmu_set_size(&p->hmu_header, sizeof *p);
}
root = &heap->kfc_tree_root;
memset(root, 0, sizeof *root);
root->size = sizeof *root;
hmu_set_ut(&root->hmu_header, HMU_FC);
hmu_set_size(&root->hmu_header, sizeof *root);
q = (hmu_tree_node_t *)heap->base_addr;
memset(q, 0, sizeof *q);
hmu_set_ut(&q->hmu_header, HMU_FC);
hmu_set_size(&q->hmu_header, heap->current_size);
hmu_mark_pinuse(&q->hmu_header);
root->right = q;
q->parent = root;
q->size = heap->current_size;
/* #if !defined(NVALGRIND) */
/* VALGRIND_MAKE_MEM_NOACCESS (base_addr, heap_max_size); */
/* #endif */
bh_assert(root->size <= HMU_FC_NORMAL_MAX_SIZE && HMU_FC_NORMAL_MAX_SIZE < q->size); /*@NOTIFY*/
return GC_SUCCESS;
}
#endif
#if GC_STAT_DATA != 0
/**
* Set GC threshold factor
*
* @param heap [in] the heap to set
* @param factor [in] the threshold size is free_size * factor / 1000
*
* @return GC_SUCCESS if success.
*/
int
gc_set_threshold_factor(void *instance_heap, unsigned int factor)
{
gc_heap_t *heap = (gc_heap_t *)instance_heap;
if (!gci_is_heap_valid(heap)) {
LOG_ERROR("gc_set_threshold_factor with incorrect private heap");
return GC_ERROR;
}
heap->gc_threshold_factor = factor;
gc_update_threshold(heap);
return GC_SUCCESS;
}
#endif
#if BH_ENABLE_GC_VERIFY != 0
/* Verify heap integrity*/
/* @heap should not be NULL and it should be a valid heap*/
void
gci_verify_heap(gc_heap_t *heap)
gc_heap_stat(void *heap_ptr, gc_stat_t *stat)
{
hmu_t *cur = NULL, *end = NULL;
hmu_type_t ut;
gc_size_t size;
gc_heap_t *heap = (gc_heap_t *)heap_ptr;
bh_assert(heap && gci_is_heap_valid(heap));
memset(stat, 0, sizeof(gc_stat_t));
cur = (hmu_t *)heap->base_addr;
end = (hmu_t *)(heap->base_addr + heap->current_size);
end = (hmu_t *)((char *)heap->base_addr + heap->current_size);
while (cur < end) {
hmu_verify(cur);
cur = (hmu_t *)((gc_uint8 *)cur + hmu_get_size(cur));
ut = hmu_get_ut(cur);
size = hmu_get_size(cur);
bh_assert(size > 0);
if (ut == HMU_FC || ut == HMU_FM
|| (ut == HMU_VO && hmu_is_vo_freed(cur))
|| (ut == HMU_WO && !hmu_is_wo_marked(cur))) {
if (ut == HMU_VO)
stat->vo_free += size;
if (ut == HMU_WO)
stat->wo_free += size;
stat->free += size;
stat->free_block++;
if (size / sizeof(int) < GC_HEAP_STAT_SIZE - 1)
stat->free_sizes[size / sizeof(int)] += 1;
else
stat->free_sizes[GC_HEAP_STAT_SIZE - 1] += 1;
}
else {
if (ut == HMU_VO)
stat->vo_usage += size;
if (ut == HMU_WO)
stat->wo_usage += size;
stat->usage += size;
stat->usage_block++;
if (size / sizeof(int) < GC_HEAP_STAT_SIZE - 1)
stat->usage_sizes[size / sizeof(int)] += 1;
else
stat->usage_sizes[GC_HEAP_STAT_SIZE - 1] += 1;
}
cur = (hmu_t *)((char *)cur + size);
}
bh_assert(cur == end);
}
void
gc_print_stat(void *heap_ptr, int verbose)
{
gc_stat_t stat;
int i;
bh_assert(heap_ptr != NULL);
gc_heap_t *heap = (gc_heap_t *)(heap_ptr);
gc_heap_stat(heap, &stat);
os_printf("# stat %s %x use %d free %d \n", "instance", heap, stat.usage,
stat.free);
os_printf("# stat %s %x wo_usage %d vo_usage %d \n", "instance", heap,
stat.wo_usage, stat.vo_usage);
os_printf("# stat %s %x wo_free %d vo_free %d \n", "instance", heap,
stat.wo_free, stat.vo_free);
#if WASM_ENABLE_GC == 0
os_printf("# stat free size %d high %d\n", heap->total_free_size,
heap->highmark_size);
#else
os_printf("# stat gc %d free size %d high %d\n", heap->total_gc_count,
heap->total_free_size, heap->highmark_size);
#endif
if (verbose) {
os_printf("usage sizes: \n");
for (i = 0; i < GC_HEAP_STAT_SIZE; i++)
if (stat.usage_sizes[i])
os_printf(" %d: %d; ", i * 4, stat.usage_sizes[i]);
os_printf(" \n");
os_printf("free sizes: \n");
for (i = 0; i < GC_HEAP_STAT_SIZE; i++)
if (stat.free_sizes[i])
os_printf(" %d: %d; ", i * 4, stat.free_sizes[i]);
}
}
void *
gc_heap_stats(void *heap_arg, uint32 *stats, int size)
{
int i;
gc_heap_t *heap = (gc_heap_t *)heap_arg;
if (!gci_is_heap_valid(heap)) {
for (i = 0; i < size; i++)
stats[i] = 0;
return NULL;
}
for (i = 0; i < size; i++) {
switch (i) {
case GC_STAT_TOTAL:
stats[i] = heap->current_size;
break;
case GC_STAT_FREE:
stats[i] = heap->total_free_size;
break;
case GC_STAT_HIGHMARK:
stats[i] = heap->highmark_size;
break;
#if WASM_ENABLE_GC != 0
case GC_STAT_COUNT:
stats[i] = heap->total_gc_count;
break;
case GC_STAT_TIME:
stats[i] = heap->total_gc_time;
break;
#endif
default:
break;
}
}
return heap;
}
void
gc_traverse_tree(hmu_tree_node_t *node, gc_size_t *stats, int *n)
{
if (!node)
return;
if (*n > 0)
gc_traverse_tree(node->right, stats, n);
if (*n > 0) {
(*n)--;
stats[*n] = node->size;
}
if (*n > 0)
gc_traverse_tree(node->left, stats, n);
}
void
gc_show_stat(void *heap)
{
uint32 stats[GC_STAT_MAX];
heap = gc_heap_stats(heap, stats, GC_STAT_MAX);
os_printf("\n[GC stats %x] %d %d %d %d %d\n", heap, stats[0], stats[1],
stats[2], stats[3], stats[4]);
}
#if WASM_ENABLE_GC != 0
void
gc_show_fragment(void *heap_arg)
{
int stats[3];
int n = 3;
gc_heap_t *heap = (gc_heap_t *)heap_arg;
memset(stats, 0, n * sizeof(int));
gct_vm_mutex_lock(&heap->lock);
gc_traverse_tree(&(heap->kfc_tree_root), (gc_size_t *)stats, &n);
gct_vm_mutex_unlock(&heap->lock);
os_printf("\n[GC %x top sizes] %d %d %d\n", heap, stats[0], stats[1],
stats[2]);
}
#endif

View File

@ -1,13 +0,0 @@
#Copyright(C) 2019 Intel Corporation.All rights reserved.
#SPDX - License - Identifier : Apache - 2.0 WITH LLVM - exception
set (GC_DIR ${CMAKE_CURRENT_LIST_DIR})
include_directories(${GC_DIR})
file (GLOB_RECURSE source_all
${
GC_DIR}/*.c)
set (GC_SHARED_SOURCE ${source_all})

View File

@ -60,7 +60,6 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr)
void *
mem_allocator_malloc_with_gc(mem_allocator_t allocator, uint32_t size)
{
// gc_alloc_vo
return gc_alloc_wo((gc_handle_t)allocator, size);
}
@ -72,6 +71,12 @@ mem_allocator_free_with_gc(mem_allocator_t allocator, void *ptr)
gc_free_wo((gc_handle_t)allocator, ptr);
}
#endif
void
mem_allocator_enable_heap_reclaim(mem_allocator_t allocator, bool enabled)
{
return gc_enable_heap_reclaim((gc_handle_t)allocator, enabled);
}
#endif
int

View File

@ -4,12 +4,12 @@
set (MEM_ALLOC_DIR ${CMAKE_CURRENT_LIST_DIR})
include_directories(${MEM_ALLOC_DIR} ${MEM_ALLOC_DIR}/../gc)
include_directories(${MEM_ALLOC_DIR})
file (GLOB_RECURSE source_all
${MEM_ALLOC_DIR}/ems/*.c
${MEM_ALLOC_DIR}/tlsf/*.c
${MEM_ALLOC_DIR}/*.c
${MEM_ALLOC_DIR}/mem_alloc.c
)
set (MEM_ALLOC_SHARED_SOURCE ${source_all})

View File

@ -52,9 +52,11 @@ mem_allocator_malloc_with_gc(mem_allocator_t allocator, uint32_t size);
#if WASM_GC_MANUALLY != 0
void
mem_allocator_free_with_gc(mem_allocator_t allocator, void *ptr);
#endif
#endif
#endif
void
mem_allocator_enable_heap_reclaim(mem_allocator_t allocator, bool enabled);
#endif /* end of WASM_ENABLE_GC != 0 */
#ifdef __cplusplus
}