This commit is contained in:
liang.he 2026-04-17 09:35:35 +08:00 committed by GitHub
commit 167aa1b93d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 1420 additions and 79 deletions

2
.gitignore vendored
View File

@ -1,3 +1,5 @@
.*
!.gitignore
.cache .cache
.clangd .clangd

View File

@ -1030,6 +1030,24 @@ wasm_runtime_free_internal(void *ptr)
} }
} }
static inline void *
wasm_runtime_aligned_alloc_internal(unsigned int size, unsigned int alignment)
{
if (memory_mode == MEMORY_MODE_UNKNOWN) {
LOG_ERROR("wasm_runtime_aligned_alloc failed: memory hasn't been "
"initialized.\n");
return NULL;
}
if (memory_mode != MEMORY_MODE_POOL) {
LOG_ERROR("wasm_runtime_aligned_alloc failed: only supported in POOL "
"memory mode.\n");
return NULL;
}
return mem_allocator_malloc_aligned(pool_allocator, size, alignment);
}
void * void *
wasm_runtime_malloc(unsigned int size) wasm_runtime_malloc(unsigned int size)
{ {
@ -1052,6 +1070,35 @@ wasm_runtime_malloc(unsigned int size)
return wasm_runtime_malloc_internal(size); return wasm_runtime_malloc_internal(size);
} }
void *
wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment)
{
if (alignment == 0) {
LOG_WARNING(
"warning: wasm_runtime_aligned_alloc with zero alignment\n");
return NULL;
}
if (size == 0) {
LOG_WARNING("warning: wasm_runtime_aligned_alloc with size zero\n");
/* Allocate at least alignment bytes (smallest multiple of alignment) */
size = alignment;
#if BH_ENABLE_GC_VERIFY != 0
exit(-1);
#endif
}
#if WASM_ENABLE_FUZZ_TEST != 0
if (size >= WASM_MEM_ALLOC_MAX_SIZE) {
LOG_WARNING(
"warning: wasm_runtime_aligned_alloc with too large size\n");
return NULL;
}
#endif
return wasm_runtime_aligned_alloc_internal(size, alignment);
}
void * void *
wasm_runtime_realloc(void *ptr, unsigned int size) wasm_runtime_realloc(void *ptr, unsigned int size)
{ {

View File

@ -422,6 +422,22 @@ wasm_runtime_destroy(void);
WASM_RUNTIME_API_EXTERN void * WASM_RUNTIME_API_EXTERN void *
wasm_runtime_malloc(unsigned int size); wasm_runtime_malloc(unsigned int size);
/**
* Allocate memory with specified alignment from runtime memory environment.
* This function mimics aligned_alloc() behavior in WebAssembly context.
*
* Note: Only supported in POOL memory mode. Other modes will return NULL.
* Note: Allocated memory cannot be reallocated with wasm_runtime_realloc().
*
* @param size bytes need to allocate (must be multiple of alignment)
* @param alignment alignment requirement (must be power of 2, >= 8, <= page
* size)
*
* @return the pointer to aligned memory allocated, or NULL on failure
*/
WASM_RUNTIME_API_EXTERN void *
wasm_runtime_aligned_alloc(unsigned int size, unsigned int alignment);
/** /**
* Reallocate memory from runtime memory environment * Reallocate memory from runtime memory environment
* *

View File

@ -552,6 +552,21 @@ alloc_hmu_ex(gc_heap_t *heap, gc_size_t size)
return alloc_hmu(heap, size); return alloc_hmu(heap, size);
} }
/* Convert object pointer to HMU pointer - handles aligned allocations */
hmu_t *
obj_to_hmu(gc_object_t obj)
{
/* Check for aligned allocation magic signature */
if (gc_is_aligned_allocation(obj)) {
/* This is an aligned allocation, read offset */
uint32_t *offset_ptr = ALIGNED_ALLOC_GET_OFFSET_PTR(obj);
return (hmu_t *)((char *)obj - *offset_ptr);
}
/* Normal allocation: standard offset */
return (hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1;
}
#if BH_ENABLE_GC_VERIFY == 0 #if BH_ENABLE_GC_VERIFY == 0
gc_object_t gc_object_t
gc_alloc_vo(void *vheap, gc_size_t size) gc_alloc_vo(void *vheap, gc_size_t size)
@ -566,7 +581,7 @@ gc_alloc_vo_internal(void *vheap, gc_size_t size, const char *file, int line)
gc_size_t tot_size = 0, tot_size_unaligned; gc_size_t tot_size = 0, tot_size_unaligned;
/* hmu header + prefix + obj + suffix */ /* hmu header + prefix + obj + suffix */
tot_size_unaligned = HMU_SIZE + OBJ_PREFIX_SIZE + size + OBJ_SUFFIX_SIZE; tot_size_unaligned = size + OBJ_EXTRA_SIZE;
/* aligned size*/ /* aligned size*/
tot_size = GC_ALIGN_8(tot_size_unaligned); tot_size = GC_ALIGN_8(tot_size_unaligned);
if (tot_size < size) if (tot_size < size)
@ -612,6 +627,124 @@ finish:
return ret; return ret;
} }
#if BH_ENABLE_GC_VERIFY == 0
gc_object_t
gc_alloc_vo_aligned(void *vheap, gc_size_t size, gc_size_t alignment)
#else
gc_object_t
gc_alloc_vo_aligned_internal(void *vheap, gc_size_t size, gc_size_t alignment,
const char *file, int line)
#endif
{
gc_heap_t *heap = (gc_heap_t *)vheap;
hmu_t *hmu = NULL;
gc_object_t ret = NULL;
gc_size_t tot_size, tot_size_unaligned;
gc_uint8 *base_obj;
uintptr_t aligned_addr;
uint32_t offset, alignment_log2;
uint32_t max_alignment;
/* Get system page size for maximum alignment check */
max_alignment = (uint32_t)os_getpagesize();
/* Validation */
if (alignment == 0 || (alignment & (alignment - 1)) != 0) {
/* Zero or not power of 2 */
return NULL;
}
if (alignment < GC_MIN_ALIGNMENT) {
alignment = GC_MIN_ALIGNMENT;
}
if (alignment > max_alignment) {
/* Exceeds page size */
return NULL;
}
if (size % alignment != 0) {
/* POSIX requirement: size must be multiple of alignment */
return NULL;
}
if (size > SIZE_MAX - GC_ALIGNED_SMALLEST_SIZE(alignment)) {
/* Would overflow */
return NULL;
}
#if BH_ENABLE_GC_CORRUPTION_CHECK != 0
if (heap->is_heap_corrupted) {
LOG_ERROR("[GC_ERROR]Heap is corrupted, allocate memory failed.\n");
return NULL;
}
#endif
/* Calculate total size needed */
tot_size_unaligned = size + OBJ_EXTRA_SIZE + ALIGNED_ALLOC_EXTRA_OVERHEAD
+ (alignment > 8 ? (alignment - 8) : 8);
tot_size = GC_ALIGN_8(tot_size_unaligned);
if (tot_size < size) {
/* Integer overflow */
return NULL;
}
LOCK_HEAP(heap);
hmu = alloc_hmu_ex(heap, tot_size);
if (!hmu)
goto finish;
bh_assert(hmu_get_size(hmu) >= tot_size);
tot_size = hmu_get_size(hmu);
#if GC_STAT_DATA != 0
heap->total_size_allocated += tot_size;
#endif
/* Get base object pointer */
base_obj = (gc_uint8 *)hmu + HMU_SIZE + OBJ_PREFIX_SIZE;
/* Find next aligned address, reserving space for metadata */
aligned_addr =
(((uintptr_t)base_obj + ALIGNED_ALLOC_METADATA_SIZE + alignment - 1)
& ~(uintptr_t)(alignment - 1));
ret = (gc_object_t)aligned_addr;
/* Verify we have enough space */
bh_assert((gc_uint8 *)ret + size + OBJ_SUFFIX_SIZE
<= (gc_uint8 *)hmu + tot_size);
/* Calculate offset from HMU to returned pointer */
offset = (uint32_t)((char *)ret - (char *)hmu);
/* Calculate log2 of alignment for magic value */
alignment_log2 = 0;
while ((1U << alignment_log2) < alignment) {
alignment_log2++;
}
/* Store offset before returned pointer */
*ALIGNED_ALLOC_GET_OFFSET_PTR(ret) = offset;
/* Store magic with encoded alignment */
*ALIGNED_ALLOC_GET_MAGIC_PTR(ret) =
ALIGNED_ALLOC_MAGIC_VALUE | alignment_log2;
/* Initialize HMU */
hmu_set_ut(hmu, HMU_VO);
hmu_unfree_vo(hmu);
#if BH_ENABLE_GC_VERIFY != 0
hmu_init_prefix_and_suffix(hmu, tot_size, file, line);
#endif
finish:
UNLOCK_HEAP(heap);
return ret;
}
#if BH_ENABLE_GC_VERIFY == 0 #if BH_ENABLE_GC_VERIFY == 0
gc_object_t gc_object_t
gc_realloc_vo(void *vheap, void *ptr, gc_size_t size) gc_realloc_vo(void *vheap, void *ptr, gc_size_t size)
@ -644,6 +777,13 @@ gc_realloc_vo_internal(void *vheap, void *ptr, gc_size_t size, const char *file,
} }
#endif #endif
/* Check if this is an aligned allocation - not supported */
if (gc_is_aligned_allocation(obj_old)) {
LOG_ERROR("[GC_ERROR]gc_realloc_vo does not support aligned "
"allocations\n");
return NULL;
}
if (obj_old) { if (obj_old) {
hmu_old = obj_to_hmu(obj_old); hmu_old = obj_to_hmu(obj_old);
tot_size_old = hmu_get_size(hmu_old); tot_size_old = hmu_get_size(hmu_old);

View File

@ -193,6 +193,9 @@ gc_alloc_vo(void *heap, gc_size_t size);
gc_object_t gc_object_t
gc_realloc_vo(void *heap, void *ptr, gc_size_t size); gc_realloc_vo(void *heap, void *ptr, gc_size_t size);
gc_object_t
gc_alloc_vo_aligned(void *heap, gc_size_t size, gc_size_t alignment);
int int
gc_free_vo(void *heap, gc_object_t obj); gc_free_vo(void *heap, gc_object_t obj);
@ -213,6 +216,10 @@ gc_object_t
gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size, const char *file, gc_realloc_vo_internal(void *heap, void *ptr, gc_size_t size, const char *file,
int line); int line);
gc_object_t
gc_alloc_vo_aligned_internal(void *heap, gc_size_t size, gc_size_t alignment,
const char *file, int line);
int int
gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line); gc_free_vo_internal(void *heap, gc_object_t obj, const char *file, int line);
@ -231,6 +238,9 @@ gc_free_wo_internal(void *vheap, void *ptr, const char *file, int line);
#define gc_realloc_vo(heap, ptr, size) \ #define gc_realloc_vo(heap, ptr, size) \
gc_realloc_vo_internal(heap, ptr, size, __FILE__, __LINE__) gc_realloc_vo_internal(heap, ptr, size, __FILE__, __LINE__)
#define gc_alloc_vo_aligned(heap, size, alignment) \
gc_alloc_vo_aligned_internal(heap, size, alignment, __FILE__, __LINE__)
#define gc_free_vo(heap, obj) \ #define gc_free_vo(heap, obj) \
gc_free_vo_internal(heap, obj, __FILE__, __LINE__) gc_free_vo_internal(heap, obj, __FILE__, __LINE__)

View File

@ -81,11 +81,160 @@ hmu_verify(void *vheap, hmu_t *hmu);
#define GC_ALIGN_8(s) (((uint32)(s) + 7) & (uint32)~7) #define GC_ALIGN_8(s) (((uint32)(s) + 7) & (uint32)~7)
#define GC_SMALLEST_SIZE \ /* Minimum alignment for allocations */
GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE + 8) #ifndef GC_MIN_ALIGNMENT
#define GC_GET_REAL_SIZE(x) \ #define GC_MIN_ALIGNMENT 8
GC_ALIGN_8(HMU_SIZE + OBJ_PREFIX_SIZE + OBJ_SUFFIX_SIZE \ #endif
+ (((x) > 8) ? (x) : 8))
/* Smallest allocation size for normal allocations
* The +8 ensures minimum allocation size for tree node structure */
#define GC_SMALLEST_SIZE GC_ALIGN_8(OBJ_EXTRA_SIZE + 8)
#define GC_GET_REAL_SIZE(x) GC_ALIGN_8(OBJ_EXTRA_SIZE + (((x) > 8) ? (x) : 8))
/*
* ============================================================================
* Aligned Memory Allocation
* ============================================================================
*
* This module implements aligned memory allocation similar to C11
* aligned_alloc() and POSIX posix_memalign() for WAMR's garbage collector.
*
* POSIX aligned_alloc() Specification:
* ------------------------------------
* From C11 §7.22.3.1 and POSIX.1-2017:
* void *aligned_alloc(size_t alignment, size_t size);
*
* Requirements:
* - alignment: Must be a valid alignment supported by the implementation,
* typically a power of 2
* - size: Must be an integral multiple of alignment
* - Returns: Pointer aligned to the specified alignment boundary, or NULL
* - Memory must be freed with free() (not realloc'd)
* - Behavior: If size is 0, may return NULL or unique pointer (impl-defined)
*
* IMPORTANT: POSIX does not require realloc() to preserve alignment.
* Calling realloc() on aligned_alloc() memory has undefined behavior.
*
* WAMR Implementation Strategy:
* -----------------------------
* We implement alignment through over-allocation with metadata tracking:
*
* 1. **Validation Phase**:
* - Check alignment is power-of-2, >= 8 bytes, <= system page size
* - Check size is multiple of alignment
* - Return NULL if validation fails
*
* 2. **Over-Allocation**:
* - Allocate (size + alignment + metadata_overhead) bytes
* - Extra space allows us to find an aligned boundary within the block
* - Calculate log2(alignment) for efficient offset storage
*
* 3. **Alignment Adjustment**:
* - Find next aligned address within allocated block
* - Calculate offset from original allocation to aligned address
* - Store offset in metadata for later free() operation
*
* 4. **Magic Marker Storage**:
* - Store magic marker (0xA11C0000 | offset) in 4 bytes before user pointer
* - Upper 16 bits: 0xA11C identifies aligned allocation
* - Lower 16 bits: offset from HMU to aligned pointer (max 65535 bytes)
* - This marker prevents unsafe realloc() operations
*
* 5. **Realloc Prevention**:
* - gc_realloc_vo_internal() checks for magic marker
* - Returns NULL if realloc attempted on aligned allocation
* - User must manually allocate new memory and copy data
*
* Memory Layout Diagram:
* ----------------------
*
* Low Address High Address
*
* HMU Header Padding Offset Magic Aligned Data Padding
* (4 bytes) (variable)(4 bytes)(4 bytes) (size) (overhead)
*
* 8 bytes
* hmu user_ptr (returned, aligned)
*
* Padding is variable-length to satisfy alignment constraint:
* align_up(HMU_SIZE + ALIGNED_ALLOC_METADATA_SIZE, alignment)
* For alignment >= 12: HMU_SIZE + padding + 8 = alignment
* For alignment < 12: HMU_SIZE + padding + 8 = round_up(12, alignment)
*
* Constraints and Limitations:
* ----------------------------
* - Minimum alignment: 8 bytes (GC_MIN_ALIGNMENT)
* - Maximum alignment: System page size (os_getpagesize(), typically 4KB)
* - Maximum offset: 65535 bytes (16-bit storage limit)
* - Realloc support: None - returns NULL (prevents alignment loss)
* - Free support: Full - use mem_allocator_free() / wasm_runtime_free()
* - Thread safety: Protected by LOCK_HEAP/UNLOCK_HEAP
*
* Usage Example:
* --------------
* // Allocate 256 bytes aligned to 64-byte boundary (e.g., for SIMD)
* void *ptr = wasm_runtime_aligned_alloc(256, 64);
* assert((uintptr_t)ptr % 64 == 0); // Guaranteed aligned
*
* // Use the memory...
*
* // Free normally (alignment metadata handled automatically)
* wasm_runtime_free(ptr);
*
* // INVALID: Cannot realloc aligned memory
* void *new_ptr = wasm_runtime_realloc(ptr, 512); // Returns NULL!
*/
/* Aligned allocation constants */
/* Size of offset field before aligned ptr */
#define ALIGNED_ALLOC_OFFSET_SIZE 4
/* Size of magic marker before aligned ptr */
#define ALIGNED_ALLOC_MAGIC_SIZE 4
/* Total: 8 bytes */
#define ALIGNED_ALLOC_METADATA_SIZE \
(ALIGNED_ALLOC_OFFSET_SIZE + ALIGNED_ALLOC_MAGIC_SIZE)
/* Aligned allocation magic markers */
#define ALIGNED_ALLOC_MAGIC_MASK 0xFFFF0000
#define ALIGNED_ALLOC_MAGIC_VALUE 0xA11C0000
/* Get magic pointer from aligned object pointer */
#define ALIGNED_ALLOC_GET_MAGIC_PTR(obj) \
((uint32_t *)((char *)(obj)-ALIGNED_ALLOC_MAGIC_SIZE))
/* Get offset pointer from aligned object pointer */
#define ALIGNED_ALLOC_GET_OFFSET_PTR(obj) \
((uint32_t *)((char *)(obj)-ALIGNED_ALLOC_METADATA_SIZE))
/* Extra overhead for aligned allocations beyond normal OBJ_EXTRA_SIZE */
#define ALIGNED_ALLOC_EXTRA_OVERHEAD ALIGNED_ALLOC_METADATA_SIZE
/* Smallest allocation size for aligned allocations */
#define GC_ALIGNED_SMALLEST_SIZE(alignment) \
GC_ALIGN_8(OBJ_EXTRA_SIZE + ALIGNED_ALLOC_METADATA_SIZE \
+ ((alignment) > 8 ? (alignment - 8) : 8))
/**
* Check if a gc_object was allocated with alignment requirements.
*
* Aligned allocations store a magic marker (0xA11C0000) in the 4 bytes
* immediately before the object pointer. This marker is used to identify
* aligned allocations to prevent unsafe realloc operations.
*
* @param obj the gc_object to check (user-visible pointer)
* @return true if obj is an aligned allocation, false otherwise
*/
static inline bool
gc_is_aligned_allocation(gc_object_t obj)
{
if (!obj)
return false;
uint32_t *magic_ptr = ALIGNED_ALLOC_GET_MAGIC_PTR(obj);
return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK)
== ALIGNED_ALLOC_MAGIC_VALUE);
}
/** /**
* hmu bit operation * hmu bit operation
@ -105,16 +254,57 @@ hmu_verify(void *vheap, hmu_t *hmu);
(v) &= ~((((uint32)1 << size) - 1) << offset) (v) &= ~((((uint32)1 << size) - 1) << offset)
#define GETBITS(v, offset, size) \ #define GETBITS(v, offset, size) \
(((v) & (((((uint32)1 << size) - 1) << offset))) >> offset) (((v) & (((((uint32)1 << size) - 1) << offset))) >> offset)
/* clang-format on */
/** /**
* gc object layout definition * gc object layout definition
*
* #### Header Bit Layout
*
* ```
* 31 30 29 28 27 0
*
* UTUT P * Size or Type-Specific Data
*
* ```
*
* #### Bit Fields Breakdown
*
* | Bits | Field | Description |
* | --------- | ----------------------- | -------------------------------------------- |
* | **31-30** | **UT** (Usage Type) | 2 bits for chunk type |
* | **29** | **P** (Previous In Use) | 1 bit indicating if previous chunk is in use |
* | **28** | **Type-specific** | Meaning depends on UT field |
* | **27-0** | **Type-specific** | Size or other data depending on UT |
*
* #### Memory Layout in Heap
*
* ```
*
* HMU Header (4 bytes)
*
* OBJ_PREFIX (if BH_ENABLE_GC_VERIFY)
* - file_name pointer
* - line_no
* - size
* - padding values (for corruption detection)
*
* User Data (aligned to 8 bytes)
* ...
*
* OBJ_SUFFIX (if BH_ENABLE_GC_VERIFY)
* - padding values (for corruption detection)
*
* ```
*/ */
/* clang-format on */
#define HMU_SIZE (sizeof(hmu_t)) #define HMU_SIZE (sizeof(hmu_t))
#define hmu_to_obj(hmu) (gc_object_t)(SKIP_OBJ_PREFIX((hmu_t *)(hmu) + 1)) #define hmu_to_obj(hmu) (gc_object_t)(SKIP_OBJ_PREFIX((hmu_t *)(hmu) + 1))
#define obj_to_hmu(obj) ((hmu_t *)((gc_uint8 *)(obj)-OBJ_PREFIX_SIZE) - 1)
/* obj_to_hmu function - handles both normal and aligned allocations */
hmu_t *
obj_to_hmu(gc_object_t obj);
#define HMU_UT_SIZE 2 #define HMU_UT_SIZE 2
#define HMU_UT_OFFSET 30 #define HMU_UT_OFFSET 30

View File

@ -57,6 +57,24 @@ mem_allocator_free(mem_allocator_t allocator, void *ptr)
gc_free_vo((gc_handle_t)allocator, ptr); gc_free_vo((gc_handle_t)allocator, ptr);
} }
#if BH_ENABLE_GC_VERIFY == 0
void *
mem_allocator_malloc_aligned(mem_allocator_t allocator, uint32_t size,
uint32_t alignment)
{
return gc_alloc_vo_aligned((gc_handle_t)allocator, size, alignment);
}
#else
void *
mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, uint32_t size,
uint32_t alignment, const char *file,
int line)
{
return gc_alloc_vo_aligned_internal((gc_handle_t)allocator, size, alignment,
file, line);
}
#endif
#if WASM_ENABLE_GC != 0 #if WASM_ENABLE_GC != 0
void * void *
mem_allocator_malloc_with_gc(mem_allocator_t allocator, uint32_t size) mem_allocator_malloc_with_gc(mem_allocator_t allocator, uint32_t size)

View File

@ -46,6 +46,34 @@ mem_allocator_realloc(mem_allocator_t allocator, void *ptr, uint32_t size);
void void
mem_allocator_free(mem_allocator_t allocator, void *ptr); mem_allocator_free(mem_allocator_t allocator, void *ptr);
/* Aligned allocation support */
#ifndef GC_MIN_ALIGNMENT
#define GC_MIN_ALIGNMENT 8
#endif
#if BH_ENABLE_GC_VERIFY == 0
void *
mem_allocator_malloc_aligned(mem_allocator_t allocator, uint32_t size,
uint32_t alignment);
#define mem_allocator_malloc_aligned_internal(allocator, size, alignment, \
file, line) \
mem_allocator_malloc_aligned(allocator, size, alignment)
#else /* BH_ENABLE_GC_VERIFY != 0 */
void *
mem_allocator_malloc_aligned_internal(mem_allocator_t allocator, uint32_t size,
uint32_t alignment, const char *file,
int line);
#define mem_allocator_malloc_aligned(allocator, size, alignment) \
mem_allocator_malloc_aligned_internal(allocator, size, alignment, \
__FILE__, __LINE__)
#endif /* end of BH_ENABLE_GC_VERIFY */
int int
mem_allocator_migrate(mem_allocator_t allocator, char *pool_buf_new, mem_allocator_migrate(mem_allocator_t allocator, char *pool_buf_new,
uint32 pool_buf_size); uint32 pool_buf_size);

View File

@ -74,6 +74,21 @@ endif()
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
FetchContent_MakeAvailable(googletest) FetchContent_MakeAvailable(googletest)
# Fetch CMocka for C unit tests
if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.24")
FetchContent_Declare(
cmocka
URL https://git.cryptomilk.org/projects/cmocka.git/snapshot/cmocka-2.0.1.tar.gz
DOWNLOAD_EXTRACT_TIMESTAMP ON
)
else()
FetchContent_Declare(
cmocka
URL https://git.cryptomilk.org/projects/cmocka.git/snapshot/cmocka-2.0.1.tar.gz
)
endif()
FetchContent_MakeAvailable(cmocka)
include(GoogleTest) include(GoogleTest)
enable_testing() enable_testing()
@ -86,10 +101,10 @@ add_subdirectory(linear-memory-wasm)
add_subdirectory(linear-memory-aot) add_subdirectory(linear-memory-aot)
add_subdirectory(linux-perf) add_subdirectory(linux-perf)
add_subdirectory(gc) add_subdirectory(gc)
add_subdirectory(tid-allocator)
add_subdirectory(unsupported-features) add_subdirectory(unsupported-features)
add_subdirectory(exception-handling) add_subdirectory(exception-handling)
add_subdirectory(running-modes) add_subdirectory(running-modes)
add_subdirectory(mem-alloc)
if(FULL_TEST) if(FULL_TEST)
message(STATUS "FULL_TEST=ON: include llm-enhanced-test") message(STATUS "FULL_TEST=ON: include llm-enhanced-test")

View File

@ -0,0 +1,59 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
cmake_minimum_required(VERSION 3.14)
project(test-mem-alloc)
# Enable test build flag
add_definitions(-DWAMR_BUILD_TEST=1)
# Test-specific feature configuration
set(WAMR_BUILD_AOT 0)
set(WAMR_BUILD_FAST_INTERP 0)
set(WAMR_BUILD_INTERP 1)
set(WAMR_BUILD_JIT 0)
set(WAMR_BUILD_LIBC_WASI 0)
include(../unit_common.cmake)
# Test source files
set(TEST_SOURCES
test_runner.c
${WAMR_RUNTIME_LIB_SOURCE}
)
#
# Create test executable
#
## Normal test executable
add_executable(mem-alloc-test ${TEST_SOURCES})
# Add include directories for mem-alloc internals
target_include_directories(mem-alloc-test PRIVATE
${WAMR_ROOT_DIR}/core/shared/mem-alloc
${WAMR_ROOT_DIR}/core/shared/mem-alloc/ems
)
## GC test executable
add_executable(mem-alloc-gc-test ${TEST_SOURCES})
target_include_directories(mem-alloc-gc-test PRIVATE
${WAMR_ROOT_DIR}/core/shared/mem-alloc
${WAMR_ROOT_DIR}/core/shared/mem-alloc/ems
)
target_compile_options(mem-alloc-gc-test PRIVATE -DWAMR_BUILD_GC=1 -DWAMR_BUILD_GC_VERIFY=1)
# Link dependencies
target_link_libraries(mem-alloc-test cmocka::cmocka m)
target_link_libraries(mem-alloc-gc-test cmocka::cmocka m)
# Add to ctest
add_test(NAME mem-alloc-test COMMAND mem-alloc-test)
set_tests_properties(mem-alloc-test PROPERTIES TIMEOUT 60)
add_test(NAME mem-alloc-gc-test COMMAND mem-alloc-gc-test)
set_tests_properties(mem-alloc-gc-test PROPERTIES TIMEOUT 60)

View File

@ -0,0 +1,789 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdint.h>
#include <string.h>
#include <cmocka.h>
#if WAMR_BUILD_TEST != 1
#error "WAMR_BUILD_TEST must be defined as 1"
#endif
#include "mem_alloc.h"
#include "ems_gc_internal.h"
#include "wasm_export.h"
/* Test helper: Check if pointer is aligned */
static inline bool
is_aligned(void *ptr, size_t alignment)
{
return ((uintptr_t)ptr % alignment) == 0;
}
/* Test helper: Check if allocation is aligned (has magic value) */
static inline bool
is_aligned_allocation(gc_object_t obj)
{
uint32_t *magic_ptr = (uint32_t *)((char *)obj - 4);
return ((*magic_ptr & ALIGNED_ALLOC_MAGIC_MASK)
== ALIGNED_ALLOC_MAGIC_VALUE);
}
/* Test: Normal allocation still works (regression) */
static void
test_normal_alloc_basic(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Normal allocation should still work */
ptr = mem_allocator_malloc(allocator, 128);
assert_non_null(ptr);
/* Should be 8-byte aligned */
assert_true(is_aligned(ptr, 8));
/* Should NOT be marked as aligned allocation */
assert_false(is_aligned_allocation(ptr));
/* Free should work */
mem_allocator_free(allocator, ptr);
mem_allocator_destroy(allocator);
}
/* Test: Valid alignment powers of 2 */
static void
test_aligned_alloc_valid_alignments(void **state)
{
mem_allocator_t allocator;
char heap_buf[128 * 1024];
void *ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Test each valid alignment */
int alignments[] = { 8, 16, 32, 64, 128, 256, 512, 1024 };
int num_alignments = sizeof(alignments) / sizeof(alignments[0]);
for (int i = 0; i < num_alignments; i++) {
int align = alignments[i];
/* Allocate with size = multiple of alignment */
ptr = mem_allocator_malloc_aligned(allocator, align * 2, align);
assert_non_null(ptr);
/* Verify alignment */
assert_true(is_aligned(ptr, align));
/* Verify marked as aligned */
assert_true(is_aligned_allocation(ptr));
/* Free */
mem_allocator_free(allocator, ptr);
}
mem_allocator_destroy(allocator);
}
/* Test: Realloc rejects aligned allocations */
static void
test_realloc_rejects_aligned(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *ptr, *new_ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Allocate aligned */
ptr = mem_allocator_malloc_aligned(allocator, 128, 64);
assert_non_null(ptr);
assert_true(is_aligned_allocation(ptr));
/* Realloc should reject aligned allocation */
new_ptr = mem_allocator_realloc(allocator, ptr, 256);
assert_null(new_ptr);
/* Original pointer should still be valid - free it */
mem_allocator_free(allocator, ptr);
mem_allocator_destroy(allocator);
}
/* Test: Realloc still works for normal allocations */
static void
test_normal_realloc_works(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *ptr, *new_ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Allocate normal */
ptr = mem_allocator_malloc(allocator, 128);
assert_non_null(ptr);
/* Write some data */
memset(ptr, 0xAB, 128);
/* Realloc should work */
new_ptr = mem_allocator_realloc(allocator, ptr, 256);
assert_non_null(new_ptr);
/* Data should be preserved */
for (int i = 0; i < 128; i++) {
assert_int_equal(((unsigned char *)new_ptr)[i], 0xAB);
}
mem_allocator_free(allocator, new_ptr);
mem_allocator_destroy(allocator);
}
/* Test: Invalid alignments (not power of 2 or zero) */
static void
test_aligned_alloc_invalid_not_power_of_2(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* These should all fail (zero or not power of 2) */
int invalid_alignments[] = { 0, 3, 5, 7, 9, 15, 17, 100 };
int num_invalid =
sizeof(invalid_alignments) / sizeof(invalid_alignments[0]);
for (int i = 0; i < num_invalid; i++) {
ptr =
mem_allocator_malloc_aligned(allocator, 128, invalid_alignments[i]);
assert_null(ptr);
}
/* Small powers of 2 should succeed (adjusted to GC_MIN_ALIGNMENT) */
ptr = mem_allocator_malloc_aligned(allocator, 8, 1);
assert_non_null(ptr);
mem_allocator_free(allocator, ptr);
ptr = mem_allocator_malloc_aligned(allocator, 8, 2);
assert_non_null(ptr);
mem_allocator_free(allocator, ptr);
ptr = mem_allocator_malloc_aligned(allocator, 8, 4);
assert_non_null(ptr);
mem_allocator_free(allocator, ptr);
mem_allocator_destroy(allocator);
}
/* Test: Size must be multiple of alignment */
static void
test_aligned_alloc_size_not_multiple(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Size not multiple of alignment - should fail */
ptr = mem_allocator_malloc_aligned(allocator, 100, 64);
assert_null(ptr);
ptr = mem_allocator_malloc_aligned(allocator, 65, 64);
assert_null(ptr);
/* Size is multiple - should succeed */
ptr = mem_allocator_malloc_aligned(allocator, 128, 64);
assert_non_null(ptr);
mem_allocator_free(allocator, ptr);
mem_allocator_destroy(allocator);
}
/* Test: Mixed normal and aligned allocations */
static void
test_mixed_alloc_interleaved(void **state)
{
mem_allocator_t allocator;
char heap_buf[128 * 1024];
void *normal1, *aligned1, *normal2, *aligned2;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Allocate: normal -> aligned -> normal -> aligned */
normal1 = mem_allocator_malloc(allocator, 64);
assert_non_null(normal1);
assert_false(is_aligned_allocation(normal1));
aligned1 = mem_allocator_malloc_aligned(allocator, 128, 64);
assert_non_null(aligned1);
assert_true(is_aligned_allocation(aligned1));
assert_true(is_aligned(aligned1, 64));
normal2 = mem_allocator_malloc(allocator, 96);
assert_non_null(normal2);
assert_false(is_aligned_allocation(normal2));
aligned2 = mem_allocator_malloc_aligned(allocator, 256, 128);
assert_non_null(aligned2);
assert_true(is_aligned_allocation(aligned2));
assert_true(is_aligned(aligned2, 128));
/* Free in mixed order */
mem_allocator_free(allocator, normal1);
mem_allocator_free(allocator, aligned2);
mem_allocator_free(allocator, normal2);
mem_allocator_free(allocator, aligned1);
mem_allocator_destroy(allocator);
}
/* Test: obj_to_hmu works correctly for both types */
static void
test_mixed_obj_to_hmu(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *normal, *aligned;
hmu_t *hmu_normal, *hmu_aligned;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Allocate both types */
normal = mem_allocator_malloc(allocator, 128);
assert_non_null(normal);
aligned = mem_allocator_malloc_aligned(allocator, 128, 64);
assert_non_null(aligned);
/* Get HMU pointers */
hmu_normal = obj_to_hmu(normal);
hmu_aligned = obj_to_hmu(aligned);
assert_non_null(hmu_normal);
assert_non_null(hmu_aligned);
/* Both should have HMU_VO type */
assert_int_equal(hmu_get_ut(hmu_normal), HMU_VO);
assert_int_equal(hmu_get_ut(hmu_aligned), HMU_VO);
/* Sizes should be reasonable */
assert_true(hmu_get_size(hmu_normal) >= 128);
assert_true(hmu_get_size(hmu_aligned) >= 128);
/* Free both */
mem_allocator_free(allocator, normal);
mem_allocator_free(allocator, aligned);
mem_allocator_destroy(allocator);
}
/* Test: Many aligned allocations */
static void
test_aligned_alloc_many(void **state)
{
mem_allocator_t allocator;
char heap_buf[512 * 1024];
void *ptrs[100];
int count = 0;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Allocate as many as possible */
for (int i = 0; i < 100; i++) {
int align = (i % 4 == 0) ? 64 : 32;
ptrs[i] = mem_allocator_malloc_aligned(allocator, align * 2, align);
if (ptrs[i]) {
assert_true(is_aligned(ptrs[i], align));
count++;
}
else {
break;
}
}
assert_true(count > 10); /* At least some should succeed */
/* Free all */
for (int i = 0; i < count; i++) {
mem_allocator_free(allocator, ptrs[i]);
}
mem_allocator_destroy(allocator);
}
/* Test: Many mixed allocations */
static void
test_mixed_alloc_many(void **state)
{
mem_allocator_t allocator;
char heap_buf[512 * 1024];
void *ptrs[200];
int count = 0;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Alternate normal and aligned */
for (int i = 0; i < 200; i++) {
if (i % 2 == 0) {
/* Normal allocation */
ptrs[i] = mem_allocator_malloc(allocator, 64);
}
else {
/* Aligned allocation */
ptrs[i] = mem_allocator_malloc_aligned(allocator, 64, 32);
}
if (ptrs[i]) {
count++;
}
else {
break;
}
}
assert_true(count > 20);
/* Free in reverse order */
for (int i = count - 1; i >= 0; i--) {
mem_allocator_free(allocator, ptrs[i]);
}
mem_allocator_destroy(allocator);
}
/* Test: free a .ro data */
static void
test_free_ro_data(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Freeing a .ro data pointer should not crash */
const char *ro_str = "This is a read-only string.";
// FIXME: This case should trigger an exception because the pointer is not
// allocated by the allocator, but currently it just does nothing. We should
// add a check in mem_allocator_free to detect this case and return an
// error. mem_allocator_free(allocator, (void *)ro_str);
mem_allocator_destroy(allocator);
}
/* Test: free a freed pointer */
static void
test_free_freed_pointer(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
ptr = mem_allocator_malloc(allocator, 64);
assert_non_null(ptr);
mem_allocator_free(allocator, ptr);
/* Freeing the same pointer again should not crash */
mem_allocator_free(allocator, ptr);
mem_allocator_free(allocator, ptr);
mem_allocator_destroy(allocator);
}
/* Test: free a freed pointer from aligned-alloc */
static void
test_free_freed_pointer_aligned(void **state)
{
mem_allocator_t allocator;
char heap_buf[64 * 1024];
void *ptr;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
ptr = mem_allocator_malloc_aligned(allocator, 128, 64);
assert_non_null(ptr);
mem_allocator_free(allocator, ptr);
/* Freeing the same pointer again should not crash */
mem_allocator_free(allocator, ptr);
mem_allocator_free(allocator, ptr);
mem_allocator_free(allocator, ptr);
mem_allocator_destroy(allocator);
}
/* Test: wasm_runtime_aligned_alloc with valid inputs in POOL mode */
static void
test_wasm_runtime_aligned_alloc_valid(void **state)
{
RuntimeInitArgs init_args;
void *ptr;
memset(&init_args, 0, sizeof(RuntimeInitArgs));
init_args.mem_alloc_type = Alloc_With_Pool;
init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024);
init_args.mem_alloc_option.pool.heap_size = 256 * 1024;
assert_true(wasm_runtime_full_init(&init_args));
/* Test valid aligned allocation */
ptr = wasm_runtime_aligned_alloc(128, 64);
assert_non_null(ptr);
assert_true(is_aligned(ptr, 64));
/* Free should work */
wasm_runtime_free(ptr);
wasm_runtime_destroy();
free(init_args.mem_alloc_option.pool.heap_buf);
}
/* Test: wasm_runtime_aligned_alloc with zero size */
static void
test_wasm_runtime_aligned_alloc_zero_size(void **state)
{
RuntimeInitArgs init_args;
void *ptr;
memset(&init_args, 0, sizeof(RuntimeInitArgs));
init_args.mem_alloc_type = Alloc_With_Pool;
init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024);
init_args.mem_alloc_option.pool.heap_size = 256 * 1024;
assert_true(wasm_runtime_full_init(&init_args));
/* Zero size should allocate alignment bytes (like malloc(0) behavior) */
ptr = wasm_runtime_aligned_alloc(0, 64);
assert_non_null(ptr);
assert_true(is_aligned(ptr, 64));
wasm_runtime_free(ptr);
wasm_runtime_destroy();
free(init_args.mem_alloc_option.pool.heap_buf);
}
/* Test: wasm_runtime_aligned_alloc with zero alignment */
static void
test_wasm_runtime_aligned_alloc_zero_alignment(void **state)
{
RuntimeInitArgs init_args;
void *ptr;
memset(&init_args, 0, sizeof(RuntimeInitArgs));
init_args.mem_alloc_type = Alloc_With_Pool;
init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024);
init_args.mem_alloc_option.pool.heap_size = 256 * 1024;
assert_true(wasm_runtime_full_init(&init_args));
/* Zero alignment should return NULL */
ptr = wasm_runtime_aligned_alloc(128, 0);
assert_null(ptr);
wasm_runtime_destroy();
free(init_args.mem_alloc_option.pool.heap_buf);
}
/* Test: wasm_runtime_aligned_alloc in SYSTEM_ALLOCATOR mode returns NULL */
static void
test_wasm_runtime_aligned_alloc_system_mode(void **state)
{
RuntimeInitArgs init_args;
void *ptr;
memset(&init_args, 0, sizeof(RuntimeInitArgs));
init_args.mem_alloc_type = Alloc_With_System_Allocator;
assert_true(wasm_runtime_full_init(&init_args));
/* Should return NULL in non-POOL mode */
ptr = wasm_runtime_aligned_alloc(128, 64);
assert_null(ptr);
wasm_runtime_destroy();
}
/* Test: wasm_runtime_realloc rejects aligned allocations */
static void
test_wasm_runtime_realloc_rejects_aligned(void **state)
{
RuntimeInitArgs init_args;
void *ptr, *new_ptr;
memset(&init_args, 0, sizeof(RuntimeInitArgs));
init_args.mem_alloc_type = Alloc_With_Pool;
init_args.mem_alloc_option.pool.heap_buf = malloc(256 * 1024);
init_args.mem_alloc_option.pool.heap_size = 256 * 1024;
assert_true(wasm_runtime_full_init(&init_args));
/* Allocate with alignment */
ptr = wasm_runtime_aligned_alloc(128, 64);
assert_non_null(ptr);
/* Realloc should return NULL */
new_ptr = wasm_runtime_realloc(ptr, 256);
assert_null(new_ptr);
/* Original pointer still valid */
wasm_runtime_free(ptr);
wasm_runtime_destroy();
free(init_args.mem_alloc_option.pool.heap_buf);
}
/* Test: wasm_runtime_aligned_alloc with various alignments */
static void
test_wasm_runtime_aligned_alloc_multiple_alignments(void **state)
{
RuntimeInitArgs init_args;
int alignments[] = { 8, 16, 32, 64, 128, 256 };
int num_alignments = sizeof(alignments) / sizeof(alignments[0]);
memset(&init_args, 0, sizeof(RuntimeInitArgs));
init_args.mem_alloc_type = Alloc_With_Pool;
init_args.mem_alloc_option.pool.heap_buf = malloc(512 * 1024);
init_args.mem_alloc_option.pool.heap_size = 512 * 1024;
assert_true(wasm_runtime_full_init(&init_args));
for (int i = 0; i < num_alignments; i++) {
int align = alignments[i];
void *ptr = wasm_runtime_aligned_alloc(align * 2, align);
assert_non_null(ptr);
assert_true(is_aligned(ptr, align));
wasm_runtime_free(ptr);
}
wasm_runtime_destroy();
free(init_args.mem_alloc_option.pool.heap_buf);
}
/* Test: Normal allocation with huge size (near upper limit) */
static void
test_normal_alloc_huge_size(void **state)
{
mem_allocator_t allocator;
char heap_buf[1024 * 1024]; /* 1MB heap */
void *ptr;
size_t huge_size;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Try to allocate most of the heap */
huge_size = sizeof(heap_buf) - 4096; /* Leave some overhead */
ptr = mem_allocator_malloc(allocator, huge_size);
/* May succeed or fail depending on internal fragmentation */
if (ptr) {
/* If it succeeds, verify it's properly allocated */
assert_true(is_aligned(ptr, 8));
mem_allocator_free(allocator, ptr);
}
/* Try allocation at exact upper limit - should handle gracefully */
huge_size = SIZE_MAX - 1024;
ptr = mem_allocator_malloc(allocator, huge_size);
assert_null(ptr); /* Should fail gracefully, not crash */
mem_allocator_destroy(allocator);
}
/* Test: Aligned allocation with huge size (near upper limit) */
static void
test_aligned_alloc_huge_size(void **state)
{
mem_allocator_t allocator;
char heap_buf[1024 * 1024]; /* 1MB heap */
void *ptr;
size_t huge_size;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Try to allocate most of the heap with alignment */
huge_size = 512 * 1024; /* Size must be multiple of alignment */
ptr = mem_allocator_malloc_aligned(allocator, huge_size, 512);
/* May succeed or fail depending on alignment overhead */
if (ptr) {
assert_true(is_aligned(ptr, 512));
mem_allocator_free(allocator, ptr);
}
/* Try allocation at extreme size - should fail gracefully */
huge_size = (SIZE_MAX / 2) & ~(size_t)4095; /* Aligned to 4096 */
ptr = mem_allocator_malloc_aligned(allocator, huge_size, 4096);
assert_null(ptr); /* Should fail gracefully, not crash */
mem_allocator_destroy(allocator);
}
/* Test: Normal allocations until OOM */
static void
test_normal_alloc_until_oom(void **state)
{
mem_allocator_t allocator;
char heap_buf[256 * 1024];
void *ptrs[1000];
int count = 0;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Allocate until we run out of memory */
for (int i = 0; i < 1000; i++) {
ptrs[i] = mem_allocator_malloc(allocator, 1024);
if (ptrs[i]) {
count++;
}
else {
/* OOM reached - this is expected */
break;
}
}
/* Should have allocated at least some blocks */
assert_true(count > 10);
assert_true(count < 1000); /* Should not have allocated all */
/* Should still be able to free what we allocated */
for (int i = 0; i < count; i++) {
mem_allocator_free(allocator, ptrs[i]);
}
/* After freeing, should be able to allocate again */
void *ptr = mem_allocator_malloc(allocator, 1024);
assert_non_null(ptr);
mem_allocator_free(allocator, ptr);
mem_allocator_destroy(allocator);
}
/* Test: Aligned allocations until OOM */
static void
test_aligned_alloc_until_oom(void **state)
{
mem_allocator_t allocator;
char heap_buf[512 * 1024];
void *ptrs[500];
int count = 0;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Allocate with alignment until we run out of memory */
for (int i = 0; i < 500; i++) {
/* Alternate between different alignments */
int align = (i % 2 == 0) ? 64 : 128;
ptrs[i] = mem_allocator_malloc_aligned(allocator, align * 20, align);
if (ptrs[i]) {
assert_true(is_aligned(ptrs[i], align));
count++;
}
else {
/* OOM reached - this is expected */
break;
}
}
/* Should have allocated at least some blocks */
assert_true(count > 5);
assert_true(count < 500); /* Should not have allocated all */
/* Free all allocated blocks */
for (int i = 0; i < count; i++) {
mem_allocator_free(allocator, ptrs[i]);
}
/* After freeing, should be able to allocate again */
void *ptr = mem_allocator_malloc_aligned(allocator, 256, 64);
assert_non_null(ptr);
mem_allocator_free(allocator, ptr);
mem_allocator_destroy(allocator);
}
/* Test: Mixed normal and aligned allocations until OOM */
static void
test_mixed_alloc_until_oom(void **state)
{
mem_allocator_t allocator;
char heap_buf[128 * 1024];
void *ptrs[1000];
bool is_aligned_alloc[1000];
int count = 0;
allocator = mem_allocator_create(heap_buf, sizeof(heap_buf));
assert_non_null(allocator);
/* Alternate between normal and aligned allocations until OOM */
for (int i = 0; i < 1000; i++) {
if (i % 3 == 0) {
/* Aligned allocation */
ptrs[i] = mem_allocator_malloc_aligned(allocator, 128, 64);
is_aligned_alloc[i] = true;
}
else {
/* Normal allocation */
ptrs[i] = mem_allocator_malloc(allocator, 512);
is_aligned_alloc[i] = false;
}
if (ptrs[i]) {
if (is_aligned_alloc[i]) {
assert_true(is_aligned(ptrs[i], 64));
}
count++;
}
else {
/* OOM reached */
break;
}
}
/* Should have allocated a reasonable number of blocks */
assert_true(count > 20);
assert_true(count < 1000); /* Should not have allocated all */
/* Free in random order (every other block first) */
for (int i = 0; i < count; i += 2) {
mem_allocator_free(allocator, ptrs[i]);
}
for (int i = 1; i < count; i += 2) {
mem_allocator_free(allocator, ptrs[i]);
}
/* Verify allocator still works after OOM and free */
void *ptr1 = mem_allocator_malloc(allocator, 1024);
void *ptr2 = mem_allocator_malloc_aligned(allocator, 128, 64);
assert_non_null(ptr1);
assert_non_null(ptr2);
mem_allocator_free(allocator, ptr1);
mem_allocator_free(allocator, ptr2);
mem_allocator_destroy(allocator);
}

View File

@ -0,0 +1,43 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdint.h>
#include <cmocka.h>
/* Include test implementations */
#include "mem_alloc_test.c"
int
main(void)
{
const struct CMUnitTest tests[] = {
cmocka_unit_test(test_normal_alloc_basic),
cmocka_unit_test(test_aligned_alloc_valid_alignments),
cmocka_unit_test(test_realloc_rejects_aligned),
cmocka_unit_test(test_normal_realloc_works),
cmocka_unit_test(test_aligned_alloc_invalid_not_power_of_2),
cmocka_unit_test(test_aligned_alloc_size_not_multiple),
cmocka_unit_test(test_mixed_alloc_interleaved),
cmocka_unit_test(test_mixed_obj_to_hmu),
cmocka_unit_test(test_aligned_alloc_many),
cmocka_unit_test(test_mixed_alloc_many),
cmocka_unit_test(test_free_freed_pointer),
cmocka_unit_test(test_free_freed_pointer_aligned),
cmocka_unit_test(test_free_ro_data),
cmocka_unit_test(test_wasm_runtime_aligned_alloc_valid),
cmocka_unit_test(test_wasm_runtime_aligned_alloc_zero_size),
cmocka_unit_test(test_wasm_runtime_aligned_alloc_zero_alignment),
cmocka_unit_test(test_wasm_runtime_aligned_alloc_system_mode),
cmocka_unit_test(test_wasm_runtime_realloc_rejects_aligned),
cmocka_unit_test(test_wasm_runtime_aligned_alloc_multiple_alignments),
cmocka_unit_test(test_normal_alloc_huge_size),
cmocka_unit_test(test_aligned_alloc_huge_size),
cmocka_unit_test(test_normal_alloc_until_oom),
cmocka_unit_test(test_aligned_alloc_until_oom),
cmocka_unit_test(test_mixed_alloc_until_oom),
};
return cmocka_run_group_tests(tests, NULL, NULL);
}

View File

@ -8,6 +8,9 @@
#include "bh_platform.h" #include "bh_platform.h"
// FIXME: Resolve memory leak in bh_queue_test_suite.
// It includes release created queue and messages.
class bh_queue_test_suite : public testing::Test class bh_queue_test_suite : public testing::Test
{ {
protected: protected:
@ -87,6 +90,14 @@ enum {
// If RES_CMP == 1, the function bh_queue_enter_loop_run run error. // If RES_CMP == 1, the function bh_queue_enter_loop_run run error.
int RES_CMP = 0; int RES_CMP = 0;
/* Don't touch .ro data in msg body */
static void
local_ro_msg_body_cleaner(void *body)
{
(void)body;
return;
}
TEST_F(bh_queue_test_suite, bh_queue_create) TEST_F(bh_queue_test_suite, bh_queue_create)
{ {
EXPECT_NE(nullptr, bh_queue_create()); EXPECT_NE(nullptr, bh_queue_create());
@ -111,33 +122,44 @@ TEST_F(bh_queue_test_suite, bh_message_payload)
{ {
bh_message_t msg_ptr; bh_message_t msg_ptr;
msg_ptr = bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body", msg_ptr =
sizeof("test_msg_body"), nullptr); bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body",
sizeof("test_msg_body"), (void *)local_ro_msg_body_cleaner);
EXPECT_EQ("test_msg_body", bh_message_payload(msg_ptr)); EXPECT_EQ("test_msg_body", bh_message_payload(msg_ptr));
bh_free_msg(msg_ptr);
} }
TEST_F(bh_queue_test_suite, bh_message_payload_len) TEST_F(bh_queue_test_suite, bh_message_payload_len)
{ {
bh_message_t msg_ptr; bh_message_t msg_ptr;
msg_ptr = bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body", msg_ptr =
sizeof("test_msg_body"), nullptr); bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body",
sizeof("test_msg_body"), (void *)local_ro_msg_body_cleaner);
EXPECT_EQ(sizeof("test_msg_body"), bh_message_payload_len(msg_ptr)); EXPECT_EQ(sizeof("test_msg_body"), bh_message_payload_len(msg_ptr));
bh_free_msg(msg_ptr);
} }
TEST_F(bh_queue_test_suite, bh_message_type) TEST_F(bh_queue_test_suite, bh_message_type)
{ {
bh_message_t msg_ptr; bh_message_t msg_ptr;
msg_ptr = bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body", msg_ptr =
sizeof("test_msg_body"), nullptr); bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body",
sizeof("test_msg_body"), (void *)local_ro_msg_body_cleaner);
EXPECT_EQ(RESTFUL_REQUEST, bh_message_type(msg_ptr)); EXPECT_EQ(RESTFUL_REQUEST, bh_message_type(msg_ptr));
bh_free_msg(msg_ptr);
} }
TEST_F(bh_queue_test_suite, bh_new_msg) TEST_F(bh_queue_test_suite, bh_new_msg)
{ {
EXPECT_NE(nullptr, bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body", bh_message_t msg_ptr =
sizeof("test_msg_body"), nullptr)); bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body",
sizeof("test_msg_body"), (void *)local_ro_msg_body_cleaner);
EXPECT_NE(nullptr, msg_ptr);
bh_free_msg(msg_ptr);
} }
void void
@ -215,11 +237,14 @@ TEST_F(bh_queue_test_suite, bh_queue_get_message_count)
bh_message_t msg_ptr; bh_message_t msg_ptr;
bh_queue *queue_ptr = bh_queue_create(); bh_queue *queue_ptr = bh_queue_create();
// Normally.
msg_ptr = bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body",
sizeof("test_msg_body"), nullptr);
for (i = 1; i <= 20; i++) { for (i = 1; i <= 20; i++) {
bh_post_msg2(queue_ptr, msg_ptr); msg_ptr = bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body",
sizeof("test_msg_body"),
(void *)local_ro_msg_body_cleaner);
EXPECT_NE(nullptr, msg_ptr);
bool post_result = bh_post_msg2(queue_ptr, msg_ptr);
EXPECT_EQ(true, post_result);
} }
i = i - 1; i = i - 1;
// The count of msg is less than queue_ptr->max. // The count of msg is less than queue_ptr->max.
@ -227,7 +252,21 @@ TEST_F(bh_queue_test_suite, bh_queue_get_message_count)
// The count of msg is more than queue_ptr->max. // The count of msg is more than queue_ptr->max.
for (j = 1; j <= 60; j++) { for (j = 1; j <= 60; j++) {
bh_post_msg2(queue_ptr, msg_ptr); msg_ptr = bh_new_msg(RESTFUL_REQUEST, (void *)"test_msg_body",
sizeof("test_msg_body"),
(void *)local_ro_msg_body_cleaner);
EXPECT_NE(nullptr, msg_ptr);
bool post_result = bh_post_msg2(queue_ptr, msg_ptr);
// The first 30 messages should be posted successfully, and the rest
// should be dropped.
if (j <= 30) {
EXPECT_EQ(true, post_result);
}
else {
EXPECT_EQ(false, post_result);
}
} }
j = j - 1; j = j - 1;
EXPECT_EQ(queue_ptr->max, bh_queue_get_message_count(queue_ptr)); EXPECT_EQ(queue_ptr->max, bh_queue_get_message_count(queue_ptr));
@ -235,6 +274,8 @@ TEST_F(bh_queue_test_suite, bh_queue_get_message_count)
// Illegal parameters. // Illegal parameters.
EXPECT_EQ(0, bh_queue_get_message_count(nullptr)); EXPECT_EQ(0, bh_queue_get_message_count(nullptr));
bh_queue_destroy(queue_ptr);
} }
void void

View File

@ -1,35 +0,0 @@
# Copyright (C) 2023 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
cmake_minimum_required (VERSION 3.14)
project (tid_allocator_tests)
if (NOT DEFINED WAMR_BUILD_LIB_WASI_THREADS)
set (WAMR_BUILD_LIB_WASI_THREADS 1)
endif ()
if (NOT DEFINED WAMR_BUILD_INTERP)
set (WAMR_BUILD_INTERP 1)
endif ()
include (../unit_common.cmake)
add_library (tid_allocator_vmlib ${WAMR_RUNTIME_LIB_SOURCE})
add_library (wamr_gtest_main main.cpp)
target_link_libraries (wamr_gtest_main PUBLIC gtest tid_allocator_vmlib)
function (create_wamr_unit_test test_name)
set (sources ${ARGN})
add_executable (${test_name} ${sources})
target_link_libraries (
${test_name}
wamr_gtest_main
tid_allocator_vmlib
${LLVM_AVAILABLE_LIBS}
)
gtest_discover_tests (${test_name})
endfunction ()
include (${IWASM_DIR}/libraries/lib-wasi-threads/unit-test/lib_wasi_threads_unit_tests.cmake)

View File

@ -1,22 +0,0 @@
/*
* Copyright (C) 2023 Amazon.com Inc. or its affiliates. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <gtest/gtest.h>
#include "wasm_runtime_common.h"
int
main(int argc, char **argv)
{
::testing::InitGoogleTest(&argc, argv);
if (!wasm_runtime_init()) {
return -1;
}
int ret = RUN_ALL_TESTS();
wasm_runtime_destroy();
return ret;
}