Marcin Kolny 2024-05-13 04:03:38 +01:00 committed by GitHub
parent c85bada2a9
commit fe5e7a9981
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
26 changed files with 521 additions and 294 deletions

View File

@ -566,9 +566,7 @@ jobs:
test_option: $GC_TEST_OPTIONS test_option: $GC_TEST_OPTIONS
- running_mode: "multi-tier-jit" - running_mode: "multi-tier-jit"
test_option: $GC_TEST_OPTIONS test_option: $GC_TEST_OPTIONS
# aot, fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Memory64 # fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Memory64
- running_mode: "aot"
test_option: $MEMORY64_TEST_OPTIONS
- running_mode: "fast-interp" - running_mode: "fast-interp"
test_option: $MEMORY64_TEST_OPTIONS test_option: $MEMORY64_TEST_OPTIONS
- running_mode: "fast-jit" - running_mode: "fast-jit"
@ -616,6 +614,7 @@ jobs:
if: > if: >
((matrix.test_option == '$DEFAULT_TEST_OPTIONS' || matrix.test_option == '$THREADS_TEST_OPTIONS' ((matrix.test_option == '$DEFAULT_TEST_OPTIONS' || matrix.test_option == '$THREADS_TEST_OPTIONS'
|| matrix.test_option == '$WASI_TEST_OPTIONS' || matrix.test_option == '$GC_TEST_OPTIONS') || matrix.test_option == '$WASI_TEST_OPTIONS' || matrix.test_option == '$GC_TEST_OPTIONS')
&& matrix.test_option != '$MEMORY64_TEST_OPTIONS'
&& matrix.running_mode != 'fast-jit' && matrix.running_mode != 'jit' && matrix.running_mode != 'multi-tier-jit') && matrix.running_mode != 'fast-jit' && matrix.running_mode != 'jit' && matrix.running_mode != 'multi-tier-jit')
run: echo "TEST_ON_X86_32=true" >> $GITHUB_ENV run: echo "TEST_ON_X86_32=true" >> $GITHUB_ENV

View File

@ -9,6 +9,7 @@
#include "aot_reloc.h" #include "aot_reloc.h"
#include "../common/wasm_runtime_common.h" #include "../common/wasm_runtime_common.h"
#include "../common/wasm_native.h" #include "../common/wasm_native.h"
#include "../common/wasm_loader_common.h"
#include "../compilation/aot.h" #include "../compilation/aot.h"
#if WASM_ENABLE_DEBUG_AOT != 0 #if WASM_ENABLE_DEBUG_AOT != 0
@ -1043,6 +1044,12 @@ load_memory_info(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
for (i = 0; i < module->memory_count; i++) { for (i = 0; i < module->memory_count; i++) {
read_uint32(buf, buf_end, module->memories[i].memory_flags); read_uint32(buf, buf_end, module->memories[i].memory_flags);
if (!wasm_memory_check_flags(module->memories[i].memory_flags,
error_buf, error_buf_size, true)) {
return false;
}
read_uint32(buf, buf_end, module->memories[i].num_bytes_per_page); read_uint32(buf, buf_end, module->memories[i].num_bytes_per_page);
read_uint32(buf, buf_end, module->memories[i].mem_init_page_count); read_uint32(buf, buf_end, module->memories[i].mem_init_page_count);
read_uint32(buf, buf_end, module->memories[i].mem_max_page_count); read_uint32(buf, buf_end, module->memories[i].mem_max_page_count);
@ -3634,6 +3641,21 @@ fail:
return ret; return ret;
} }
#if WASM_ENABLE_MEMORY64 != 0
static bool
has_module_memory64(AOTModule *module)
{
/* TODO: multi-memories for now assuming the memory idx type is consistent
* across multi-memories */
if (module->import_memory_count > 0)
return !!(module->import_memories[0].memory_flags & MEMORY64_FLAG);
else if (module->memory_count > 0)
return !!(module->memories[0].memory_flags & MEMORY64_FLAG);
return false;
}
#endif
static bool static bool
load_from_sections(AOTModule *module, AOTSection *sections, load_from_sections(AOTModule *module, AOTSection *sections,
bool is_load_from_file_buf, char *error_buf, bool is_load_from_file_buf, char *error_buf,
@ -3645,6 +3667,7 @@ load_from_sections(AOTModule *module, AOTSection *sections,
uint32 i, func_index, func_type_index; uint32 i, func_index, func_type_index;
AOTFuncType *func_type; AOTFuncType *func_type;
AOTExport *exports; AOTExport *exports;
uint8 malloc_free_io_type = VALUE_TYPE_I32;
while (section) { while (section) {
buf = section->section_body; buf = section->section_body;
@ -3719,7 +3742,10 @@ load_from_sections(AOTModule *module, AOTSection *sections,
module->malloc_func_index = (uint32)-1; module->malloc_func_index = (uint32)-1;
module->free_func_index = (uint32)-1; module->free_func_index = (uint32)-1;
module->retain_func_index = (uint32)-1; module->retain_func_index = (uint32)-1;
#if WASM_ENABLE_MEMORY64 != 0
if (has_module_memory64(module))
malloc_free_io_type = VALUE_TYPE_I64;
#endif
exports = module->exports; exports = module->exports;
for (i = 0; i < module->export_count; i++) { for (i = 0; i < module->export_count; i++) {
if (exports[i].kind == EXPORT_KIND_FUNC if (exports[i].kind == EXPORT_KIND_FUNC
@ -3729,8 +3755,8 @@ load_from_sections(AOTModule *module, AOTSection *sections,
func_type_index = module->func_type_indexes[func_index]; func_type_index = module->func_type_indexes[func_index];
func_type = (AOTFuncType *)module->types[func_type_index]; func_type = (AOTFuncType *)module->types[func_type_index];
if (func_type->param_count == 1 && func_type->result_count == 1 if (func_type->param_count == 1 && func_type->result_count == 1
&& func_type->types[0] == VALUE_TYPE_I32 && func_type->types[0] == malloc_free_io_type
&& func_type->types[1] == VALUE_TYPE_I32) { && func_type->types[1] == malloc_free_io_type) {
bh_assert(module->malloc_func_index == (uint32)-1); bh_assert(module->malloc_func_index == (uint32)-1);
module->malloc_func_index = func_index; module->malloc_func_index = func_index;
LOG_VERBOSE("Found malloc function, name: %s, index: %u", LOG_VERBOSE("Found malloc function, name: %s, index: %u",
@ -3742,9 +3768,9 @@ load_from_sections(AOTModule *module, AOTSection *sections,
func_type_index = module->func_type_indexes[func_index]; func_type_index = module->func_type_indexes[func_index];
func_type = (AOTFuncType *)module->types[func_type_index]; func_type = (AOTFuncType *)module->types[func_type_index];
if (func_type->param_count == 2 && func_type->result_count == 1 if (func_type->param_count == 2 && func_type->result_count == 1
&& func_type->types[0] == VALUE_TYPE_I32 && func_type->types[0] == malloc_free_io_type
&& func_type->types[1] == VALUE_TYPE_I32 && func_type->types[1] == VALUE_TYPE_I32
&& func_type->types[2] == VALUE_TYPE_I32) { && func_type->types[2] == malloc_free_io_type) {
uint32 j; uint32 j;
WASMExport *export_tmp; WASMExport *export_tmp;
@ -3768,8 +3794,8 @@ load_from_sections(AOTModule *module, AOTSection *sections,
(AOTFuncType *)module->types[func_type_index]; (AOTFuncType *)module->types[func_type_index];
if (func_type->param_count == 1 if (func_type->param_count == 1
&& func_type->result_count == 1 && func_type->result_count == 1
&& func_type->types[0] == VALUE_TYPE_I32 && func_type->types[0] == malloc_free_io_type
&& func_type->types[1] == VALUE_TYPE_I32) { && func_type->types[1] == malloc_free_io_type) {
bh_assert(module->retain_func_index bh_assert(module->retain_func_index
== (uint32)-1); == (uint32)-1);
module->retain_func_index = export_tmp->index; module->retain_func_index = export_tmp->index;
@ -3795,7 +3821,7 @@ load_from_sections(AOTModule *module, AOTSection *sections,
func_type_index = module->func_type_indexes[func_index]; func_type_index = module->func_type_indexes[func_index];
func_type = (AOTFuncType *)module->types[func_type_index]; func_type = (AOTFuncType *)module->types[func_type_index];
if (func_type->param_count == 1 && func_type->result_count == 0 if (func_type->param_count == 1 && func_type->result_count == 0
&& func_type->types[0] == VALUE_TYPE_I32) { && func_type->types[0] == malloc_free_io_type) {
bh_assert(module->free_func_index == (uint32)-1); bh_assert(module->free_func_index == (uint32)-1);
module->free_func_index = func_index; module->free_func_index = func_index;
LOG_VERBOSE("Found free function, name: %s, index: %u", LOG_VERBOSE("Found free function, name: %s, index: %u",

View File

@ -792,16 +792,18 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
uint32 max_page_count = uint32 max_page_count =
wasm_runtime_get_max_mem(max_memory_pages, memory->mem_init_page_count, wasm_runtime_get_max_mem(max_memory_pages, memory->mem_init_page_count,
memory->mem_max_page_count); memory->mem_max_page_count);
uint32 default_max_pages;
uint32 inc_page_count, global_idx; uint32 inc_page_count, global_idx;
uint32 bytes_of_last_page, bytes_to_page_end; uint32 bytes_of_last_page, bytes_to_page_end;
uint64 aux_heap_base, uint64 aux_heap_base,
heap_offset = (uint64)num_bytes_per_page * init_page_count; heap_offset = (uint64)num_bytes_per_page * init_page_count;
uint64 memory_data_size, max_memory_data_size; uint64 memory_data_size, max_memory_data_size;
uint8 *p = NULL, *global_addr; uint8 *p = NULL, *global_addr;
bool is_memory64 = memory->memory_flags & MEMORY64_FLAG;
bool is_shared_memory = false; bool is_shared_memory = false;
#if WASM_ENABLE_SHARED_MEMORY != 0 #if WASM_ENABLE_SHARED_MEMORY != 0
is_shared_memory = memory->memory_flags & 0x02 ? true : false; is_shared_memory = memory->memory_flags & SHARED_MEMORY_FLAG ? true : false;
/* Shared memory */ /* Shared memory */
if (is_shared_memory && parent != NULL) { if (is_shared_memory && parent != NULL) {
AOTMemoryInstance *shared_memory_instance; AOTMemoryInstance *shared_memory_instance;
@ -813,6 +815,16 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
} }
#endif #endif
#if WASM_ENABLE_MEMORY64 != 0
if (is_memory64) {
default_max_pages = DEFAULT_MEM64_MAX_PAGES;
}
else
#endif
{
default_max_pages = DEFAULT_MAX_PAGES;
}
if (heap_size > 0 && module->malloc_func_index != (uint32)-1 if (heap_size > 0 && module->malloc_func_index != (uint32)-1
&& module->free_func_index != (uint32)-1) { && module->free_func_index != (uint32)-1) {
/* Disable app heap, use malloc/free function exported /* Disable app heap, use malloc/free function exported
@ -893,14 +905,14 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
} }
init_page_count += inc_page_count; init_page_count += inc_page_count;
max_page_count += inc_page_count; max_page_count += inc_page_count;
if (init_page_count > DEFAULT_MAX_PAGES) { if (init_page_count > default_max_pages) {
set_error_buf(error_buf, error_buf_size, set_error_buf(error_buf, error_buf_size,
"failed to insert app heap into linear memory, " "failed to insert app heap into linear memory, "
"try using `--heap-size=0` option"); "try using `--heap-size=0` option");
return NULL; return NULL;
} }
if (max_page_count > DEFAULT_MAX_PAGES) if (max_page_count > default_max_pages)
max_page_count = DEFAULT_MAX_PAGES; max_page_count = default_max_pages;
} }
LOG_VERBOSE("Memory instantiate:"); LOG_VERBOSE("Memory instantiate:");
@ -912,11 +924,11 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
heap_size); heap_size);
max_memory_data_size = (uint64)num_bytes_per_page * max_page_count; max_memory_data_size = (uint64)num_bytes_per_page * max_page_count;
bh_assert(max_memory_data_size <= MAX_LINEAR_MEMORY_SIZE); bh_assert(max_memory_data_size <= GET_MAX_LINEAR_MEMORY_SIZE(is_memory64));
(void)max_memory_data_size; (void)max_memory_data_size;
/* TODO: memory64 uses is_memory64 flag */ /* TODO: memory64 uses is_memory64 flag */
if (wasm_allocate_linear_memory(&p, is_shared_memory, false, if (wasm_allocate_linear_memory(&p, is_shared_memory, is_memory64,
num_bytes_per_page, init_page_count, num_bytes_per_page, init_page_count,
max_page_count, &memory_data_size) max_page_count, &memory_data_size)
!= BHT_OK) { != BHT_OK) {
@ -930,6 +942,11 @@ memory_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
memory_inst->cur_page_count = init_page_count; memory_inst->cur_page_count = init_page_count;
memory_inst->max_page_count = max_page_count; memory_inst->max_page_count = max_page_count;
memory_inst->memory_data_size = memory_data_size; memory_inst->memory_data_size = memory_data_size;
#if WASM_ENABLE_MEMORY64 != 0
if (is_memory64) {
memory_inst->is_memory64 = 1;
}
#endif
/* Init memory info */ /* Init memory info */
memory_inst->memory_data = p; memory_inst->memory_data = p;
@ -993,11 +1010,12 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
uint32 max_memory_pages, char *error_buf, uint32 max_memory_pages, char *error_buf,
uint32 error_buf_size) uint32 error_buf_size)
{ {
uint32 global_index, global_data_offset, base_offset, length; uint32 global_index, global_data_offset, length;
uint32 i, memory_count = module->memory_count; uint32 i, memory_count = module->memory_count;
AOTMemoryInstance *memories, *memory_inst; AOTMemoryInstance *memories, *memory_inst;
AOTMemInitData *data_seg; AOTMemInitData *data_seg;
uint64 total_size; uint64 total_size;
mem_offset_t base_offset;
module_inst->memory_count = memory_count; module_inst->memory_count = memory_count;
total_size = sizeof(AOTMemoryInstance *) * (uint64)memory_count; total_size = sizeof(AOTMemoryInstance *) * (uint64)memory_count;
@ -1036,7 +1054,9 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
initialized */ initialized */
continue; continue;
bh_assert(data_seg->offset.init_expr_type == INIT_EXPR_TYPE_I32_CONST bh_assert(data_seg->offset.init_expr_type
== (memory_inst->is_memory64 ? INIT_EXPR_TYPE_I64_CONST
: INIT_EXPR_TYPE_I32_CONST)
|| data_seg->offset.init_expr_type || data_seg->offset.init_expr_type
== INIT_EXPR_TYPE_GET_GLOBAL); == INIT_EXPR_TYPE_GET_GLOBAL);
@ -1057,11 +1077,28 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
module->globals[global_index - module->import_global_count] module->globals[global_index - module->import_global_count]
.data_offset; .data_offset;
#if WASM_ENABLE_MEMORY64 != 0
if (memory_inst->is_memory64) {
base_offset =
*(uint64 *)(module_inst->global_data + global_data_offset);
}
else
#endif
{
base_offset = base_offset =
*(uint32 *)(module_inst->global_data + global_data_offset); *(uint32 *)(module_inst->global_data + global_data_offset);
} }
}
else { else {
base_offset = (uint32)data_seg->offset.u.i32; #if WASM_ENABLE_MEMORY64 != 0
if (memory_inst->is_memory64) {
base_offset = data_seg->offset.u.i64;
}
else
#endif
{
base_offset = data_seg->offset.u.u32;
}
} }
/* Copy memory data */ /* Copy memory data */
@ -1071,7 +1108,8 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
/* Check memory data */ /* Check memory data */
/* check offset since length might negative */ /* check offset since length might negative */
if (base_offset > memory_inst->memory_data_size) { if (base_offset > memory_inst->memory_data_size) {
LOG_DEBUG("base_offset(%d) > memory_data_size(%" PRIu64 ")", LOG_DEBUG("base_offset(%" PR_MEM_OFFSET
") > memory_data_size(%" PRIu64 ")",
base_offset, memory_inst->memory_data_size); base_offset, memory_inst->memory_data_size);
#if WASM_ENABLE_REF_TYPES != 0 #if WASM_ENABLE_REF_TYPES != 0
set_error_buf(error_buf, error_buf_size, set_error_buf(error_buf, error_buf_size,
@ -1086,8 +1124,8 @@ memories_instantiate(AOTModuleInstance *module_inst, AOTModuleInstance *parent,
/* check offset + length(could be zero) */ /* check offset + length(could be zero) */
length = data_seg->byte_count; length = data_seg->byte_count;
if (base_offset + length > memory_inst->memory_data_size) { if (base_offset + length > memory_inst->memory_data_size) {
LOG_DEBUG("base_offset(%d) + length(%d) > memory_data_size(%" PRIu64 LOG_DEBUG("base_offset(%" PR_MEM_OFFSET
")", ") + length(%d) > memory_data_size(%" PRIu64 ")",
base_offset, length, memory_inst->memory_data_size); base_offset, length, memory_inst->memory_data_size);
#if WASM_ENABLE_REF_TYPES != 0 #if WASM_ENABLE_REF_TYPES != 0
set_error_buf(error_buf, error_buf_size, set_error_buf(error_buf, error_buf_size,
@ -2334,22 +2372,44 @@ aot_copy_exception(AOTModuleInstance *module_inst, char *exception_buf)
static bool static bool
execute_malloc_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env, execute_malloc_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
AOTFunctionInstance *malloc_func, AOTFunctionInstance *malloc_func,
AOTFunctionInstance *retain_func, uint32 size, AOTFunctionInstance *retain_func, uint64 size,
uint32 *p_result) uint64 *p_result)
{ {
#ifdef OS_ENABLE_HW_BOUND_CHECK #ifdef OS_ENABLE_HW_BOUND_CHECK
WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls(); WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls();
#endif #endif
WASMExecEnv *exec_env_created = NULL; WASMExecEnv *exec_env_created = NULL;
WASMModuleInstanceCommon *module_inst_old = NULL; WASMModuleInstanceCommon *module_inst_old = NULL;
uint32 argv[2], argc; union {
uint32 u32[3];
uint64 u64;
} argv;
uint32 argc;
bool ret; bool ret;
argv[0] = size; #if WASM_ENABLE_MEMORY64 != 0
argc = 1; bool is_memory64 = module_inst->memories[0]->is_memory64;
if (retain_func) { if (is_memory64) {
argv[1] = 0;
argc = 2; argc = 2;
PUT_I64_TO_ADDR(&argv.u64, size);
}
else
#endif
{
argc = 1;
argv.u32[0] = (uint32)size;
}
/* if __retain is exported, then this module is compiled by
assemblyscript, the memory should be managed by as's runtime,
in this case we need to call the retain function after malloc
the memory */
if (retain_func) {
/* the malloc function from assemblyscript is:
function __new(size: usize, id: u32)
id = 0 means this is an ArrayBuffer object */
argv.u32[argc] = 0;
argc++;
} }
if (exec_env) { if (exec_env) {
@ -2389,10 +2449,10 @@ execute_malloc_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
} }
} }
ret = aot_call_function(exec_env, malloc_func, argc, argv); ret = aot_call_function(exec_env, malloc_func, argc, argv.u32);
if (retain_func && ret) if (retain_func && ret)
ret = aot_call_function(exec_env, retain_func, 1, argv); ret = aot_call_function(exec_env, retain_func, 1, argv.u32);
if (module_inst_old) if (module_inst_old)
/* Restore the existing exec_env's module inst */ /* Restore the existing exec_env's module inst */
@ -2401,24 +2461,46 @@ execute_malloc_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
if (exec_env_created) if (exec_env_created)
wasm_exec_env_destroy(exec_env_created); wasm_exec_env_destroy(exec_env_created);
if (ret) if (ret) {
*p_result = argv[0]; #if WASM_ENABLE_MEMORY64 != 0
if (is_memory64)
*p_result = GET_I64_FROM_ADDR(&argv.u64);
else
#endif
{
*p_result = argv.u32[0];
}
}
return ret; return ret;
} }
static bool static bool
execute_free_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env, execute_free_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
AOTFunctionInstance *free_func, uint32 offset) AOTFunctionInstance *free_func, uint64 offset)
{ {
#ifdef OS_ENABLE_HW_BOUND_CHECK #ifdef OS_ENABLE_HW_BOUND_CHECK
WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls(); WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls();
#endif #endif
WASMExecEnv *exec_env_created = NULL; WASMExecEnv *exec_env_created = NULL;
WASMModuleInstanceCommon *module_inst_old = NULL; WASMModuleInstanceCommon *module_inst_old = NULL;
uint32 argv[2]; union {
uint32 u32[2];
uint64 u64;
} argv;
uint32 argc;
bool ret; bool ret;
argv[0] = offset; #if WASM_ENABLE_MEMORY64 != 0
if (module_inst->memories[0]->is_memory64) {
PUT_I64_TO_ADDR(&argv.u64, offset);
argc = 2;
}
else
#endif
{
argv.u32[0] = (uint32)offset;
argc = 1;
}
if (exec_env) { if (exec_env) {
#ifdef OS_ENABLE_HW_BOUND_CHECK #ifdef OS_ENABLE_HW_BOUND_CHECK
@ -2457,7 +2539,7 @@ execute_free_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
} }
} }
ret = aot_call_function(exec_env, free_func, 1, argv); ret = aot_call_function(exec_env, free_func, argc, argv.u32);
if (module_inst_old) if (module_inst_old)
/* Restore the existing exec_env's module inst */ /* Restore the existing exec_env's module inst */
@ -2477,7 +2559,7 @@ aot_module_malloc_internal(AOTModuleInstance *module_inst,
AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst); AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
AOTModule *module = (AOTModule *)module_inst->module; AOTModule *module = (AOTModule *)module_inst->module;
uint8 *addr = NULL; uint8 *addr = NULL;
uint32 offset = 0; uint64 offset = 0;
/* TODO: Memory64 size check based on memory idx type */ /* TODO: Memory64 size check based on memory idx type */
bh_assert(size <= UINT32_MAX); bh_assert(size <= UINT32_MAX);
@ -2509,7 +2591,7 @@ aot_module_malloc_internal(AOTModuleInstance *module_inst,
if (!malloc_func if (!malloc_func
|| !execute_malloc_function(module_inst, exec_env, malloc_func, || !execute_malloc_function(module_inst, exec_env, malloc_func,
retain_func, (uint32)size, &offset)) { retain_func, size, &offset)) {
return 0; return 0;
} }
addr = offset ? (uint8 *)memory_inst->memory_data + offset : NULL; addr = offset ? (uint8 *)memory_inst->memory_data + offset : NULL;
@ -2620,8 +2702,7 @@ aot_module_free_internal(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
free_func = aot_lookup_function(module_inst, "__unpin"); free_func = aot_lookup_function(module_inst, "__unpin");
if (free_func) if (free_func)
execute_free_function(module_inst, exec_env, free_func, execute_free_function(module_inst, exec_env, free_func, ptr);
(uint32)ptr);
} }
} }
} }
@ -2983,7 +3064,7 @@ aot_sqrtf(float x)
#if WASM_ENABLE_BULK_MEMORY != 0 #if WASM_ENABLE_BULK_MEMORY != 0
bool bool
aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index, uint32 offset, aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index, uint32 offset,
uint32 len, uint32 dst) uint32 len, size_t dst)
{ {
AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst); AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
AOTModule *aot_module; AOTModule *aot_module;
@ -3016,7 +3097,7 @@ aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index, uint32 offset,
(WASMModuleInstanceCommon *)module_inst, (uint64)dst); (WASMModuleInstanceCommon *)module_inst, (uint64)dst);
SHARED_MEMORY_LOCK(memory_inst); SHARED_MEMORY_LOCK(memory_inst);
bh_memcpy_s(maddr, (uint32)(memory_inst->memory_data_size - dst), bh_memcpy_s(maddr, CLAMP_U64_TO_U32(memory_inst->memory_data_size - dst),
data + offset, len); data + offset, len);
SHARED_MEMORY_UNLOCK(memory_inst); SHARED_MEMORY_UNLOCK(memory_inst);
return true; return true;

View File

@ -627,7 +627,7 @@ aot_sqrtf(float x);
#if WASM_ENABLE_BULK_MEMORY != 0 #if WASM_ENABLE_BULK_MEMORY != 0
bool bool
aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index, uint32 offset, aot_memory_init(AOTModuleInstance *module_inst, uint32 seg_index, uint32 offset,
uint32 len, uint32 dst); uint32 len, size_t dst);
bool bool
aot_data_drop(AOTModuleInstance *module_inst, uint32 seg_index); aot_data_drop(AOTModuleInstance *module_inst, uint32 seg_index);

View File

@ -0,0 +1,58 @@
/*
* Copyright (C) 2024 Amazon Inc. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "wasm_loader_common.h"
#include "bh_log.h"
#include "../interpreter/wasm.h"
static void
set_error_buf(char *error_buf, uint32 error_buf_size, const char *string,
bool is_aot)
{
if (error_buf != NULL) {
snprintf(error_buf, error_buf_size, "%s module load failed: %s",
is_aot ? "AOT" : "WASM", string);
}
}
bool
wasm_memory_check_flags(const uint8 mem_flag, char *error_buf,
uint32 error_buf_size, bool is_aot)
{
/* Check whether certain features indicated by mem_flag are enabled in
* runtime */
if (mem_flag > MAX_PAGE_COUNT_FLAG) {
#if WASM_ENABLE_SHARED_MEMORY == 0
if (mem_flag & SHARED_MEMORY_FLAG) {
LOG_VERBOSE("shared memory flag was found, please enable shared "
"memory, lib-pthread or lib-wasi-threads");
set_error_buf(error_buf, error_buf_size, "invalid limits flags",
is_aot);
return false;
}
#endif
#if WASM_ENABLE_MEMORY64 == 0
if (mem_flag & MEMORY64_FLAG) {
LOG_VERBOSE("memory64 flag was found, please enable memory64");
set_error_buf(error_buf, error_buf_size, "invalid limits flags",
is_aot);
return false;
}
#endif
}
if (mem_flag > MAX_PAGE_COUNT_FLAG + SHARED_MEMORY_FLAG + MEMORY64_FLAG) {
set_error_buf(error_buf, error_buf_size, "invalid limits flags",
is_aot);
return false;
}
else if ((mem_flag & SHARED_MEMORY_FLAG)
&& !(mem_flag & MAX_PAGE_COUNT_FLAG)) {
set_error_buf(error_buf, error_buf_size,
"shared memory must have maximum", is_aot);
return false;
}
return true;
}

View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2024 Amazon Inc. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _WASM_LOADER_COMMON_H
#define _WASM_LOADER_COMMON_H
#include "platform_common.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
wasm_memory_check_flags(const uint8 mem_flag, char *error_buf,
uint32 error_buf_size, bool is_aot);
#ifdef __cplusplus
}
#endif
#endif /* end of _WASM_LOADER_COMMON_H */

View File

@ -1005,15 +1005,7 @@ wasm_allocate_linear_memory(uint8 **data, bool is_shared_memory,
page_size = os_getpagesize(); page_size = os_getpagesize();
*memory_data_size = init_page_count * num_bytes_per_page; *memory_data_size = init_page_count * num_bytes_per_page;
#if WASM_ENABLE_MEMORY64 != 0 bh_assert(*memory_data_size <= GET_MAX_LINEAR_MEMORY_SIZE(is_memory64));
if (is_memory64) {
bh_assert(*memory_data_size <= MAX_LINEAR_MEM64_MEMORY_SIZE);
}
else
#endif
{
bh_assert(*memory_data_size <= MAX_LINEAR_MEMORY_SIZE);
}
*memory_data_size = align_as_and_cast(*memory_data_size, page_size); *memory_data_size = align_as_and_cast(*memory_data_size, page_size);
if (map_size > 0) { if (map_size > 0) {

View File

@ -362,6 +362,9 @@ LOAD_I16(void *addr)
#define SHARED_MEMORY_UNLOCK(memory) (void)0 #define SHARED_MEMORY_UNLOCK(memory) (void)0
#endif #endif
#define CLAMP_U64_TO_U32(value) \
((value) > UINT32_MAX ? UINT32_MAX : (uint32)(value))
typedef struct WASMModuleCommon { typedef struct WASMModuleCommon {
/* Module type, for module loaded from WASM bytecode binary, /* Module type, for module loaded from WASM bytecode binary,
this field is Wasm_Module_Bytecode, and this structure should this field is Wasm_Module_Bytecode, and this structure should

View File

@ -84,37 +84,46 @@ read_leb(const uint8 *buf, const uint8 *buf_end, uint32 *p_offset,
} }
/* NOLINTNEXTLINE */ /* NOLINTNEXTLINE */
#define read_leb_uint32(p, p_end, res) \ #define read_leb_generic(p, p_end, res, res_type, sign) \
do { \ do { \
uint32 off = 0; \ uint32 off = 0; \
uint64 res64; \ uint64 res64; \
if (!read_leb(p, p_end, &off, 32, false, &res64)) \ if (!read_leb(p, p_end, &off, sizeof(res_type) << 3, sign, &res64)) \
return false; \ return false; \
p += off; \ p += off; \
res = (uint32)res64; \ res = (res_type)res64; \
} while (0) } while (0)
/* NOLINTNEXTLINE */ /* NOLINTNEXTLINE */
#define read_leb_int32(p, p_end, res) \ #define read_leb_int32(p, p_end, res) \
do { \ read_leb_generic(p, p_end, res, int32, true)
uint32 off = 0; \
uint64 res64; \
if (!read_leb(p, p_end, &off, 32, true, &res64)) \
return false; \
p += off; \
res = (int32)res64; \
} while (0)
/* NOLINTNEXTLINE */ /* NOLINTNEXTLINE */
#define read_leb_int64(p, p_end, res) \ #define read_leb_int64(p, p_end, res) \
read_leb_generic(p, p_end, res, int64, true)
/* NOLINTNEXTLINE */
#define read_leb_uint32(p, p_end, res) \
read_leb_generic(p, p_end, res, uint32, false)
/* NOLINTNEXTLINE */
#define read_leb_uint64(p, p_end, res) \
read_leb_generic(p, p_end, res, uint64, false)
/* NOLINTNEXTLINE */
#if WASM_ENABLE_MEMORY64 != 0
#define read_leb_mem_offset(p, p_end, res) \
do { \ do { \
uint32 off = 0; \ if (IS_MEMORY64) { \
uint64 res64; \ read_leb_uint64(p, p_end, res); \
if (!read_leb(p, p_end, &off, 64, true, &res64)) \ } \
return false; \ else { \
p += off; \ read_leb_uint32(p, p_end, res); \
res = (int64)res64; \ } \
} while (0) } while (0)
#else
#define read_leb_mem_offset read_leb_uint32
#endif
/** /**
* Since wamrc uses a full feature Wasm loader, * Since wamrc uses a full feature Wasm loader,
@ -135,6 +144,13 @@ aot_validate_wasm(AOTCompContext *comp_ctx)
} }
} }
#if WASM_ENABLE_MEMORY64 != 0
if (comp_ctx->pointer_size < sizeof(uint64) && IS_MEMORY64) {
aot_set_last_error("Compiling wasm64 to 32bit platform is not allowed");
return false;
}
#endif
return true; return true;
} }
@ -933,7 +949,8 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
uint16 result_count; uint16 result_count;
uint32 br_depth, *br_depths, br_count; uint32 br_depth, *br_depths, br_count;
uint32 func_idx, type_idx, mem_idx, local_idx, global_idx, i; uint32 func_idx, type_idx, mem_idx, local_idx, global_idx, i;
uint32 bytes = 4, align, offset; uint32 bytes = 4, align;
mem_offset_t offset;
uint32 type_index; uint32 type_index;
bool sign = true; bool sign = true;
int32 i32_const; int32 i32_const;
@ -1892,7 +1909,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
sign = (opcode == WASM_OP_I32_LOAD16_S) ? true : false; sign = (opcode == WASM_OP_I32_LOAD16_S) ? true : false;
op_i32_load: op_i32_load:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align, offset, if (!aot_compile_op_i32_load(comp_ctx, func_ctx, align, offset,
bytes, sign, false)) bytes, sign, false))
return false; return false;
@ -1918,7 +1935,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
sign = (opcode == WASM_OP_I64_LOAD32_S) ? true : false; sign = (opcode == WASM_OP_I64_LOAD32_S) ? true : false;
op_i64_load: op_i64_load:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align, offset, if (!aot_compile_op_i64_load(comp_ctx, func_ctx, align, offset,
bytes, sign, false)) bytes, sign, false))
return false; return false;
@ -1926,14 +1943,14 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case WASM_OP_F32_LOAD: case WASM_OP_F32_LOAD:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_f32_load(comp_ctx, func_ctx, align, offset)) if (!aot_compile_op_f32_load(comp_ctx, func_ctx, align, offset))
return false; return false;
break; break;
case WASM_OP_F64_LOAD: case WASM_OP_F64_LOAD:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_f64_load(comp_ctx, func_ctx, align, offset)) if (!aot_compile_op_f64_load(comp_ctx, func_ctx, align, offset))
return false; return false;
break; break;
@ -1948,7 +1965,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
bytes = 2; bytes = 2;
op_i32_store: op_i32_store:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align, offset, if (!aot_compile_op_i32_store(comp_ctx, func_ctx, align, offset,
bytes, false)) bytes, false))
return false; return false;
@ -1967,7 +1984,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
bytes = 4; bytes = 4;
op_i64_store: op_i64_store:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align, offset, if (!aot_compile_op_i64_store(comp_ctx, func_ctx, align, offset,
bytes, false)) bytes, false))
return false; return false;
@ -1975,7 +1992,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case WASM_OP_F32_STORE: case WASM_OP_F32_STORE:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_f32_store(comp_ctx, func_ctx, align, if (!aot_compile_op_f32_store(comp_ctx, func_ctx, align,
offset)) offset))
return false; return false;
@ -1983,7 +2000,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case WASM_OP_F64_STORE: case WASM_OP_F64_STORE:
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_op_f64_store(comp_ctx, func_ctx, align, if (!aot_compile_op_f64_store(comp_ctx, func_ctx, align,
offset)) offset))
return false; return false;
@ -2540,7 +2557,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
if (opcode != WASM_OP_ATOMIC_FENCE) { if (opcode != WASM_OP_ATOMIC_FENCE) {
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
} }
switch (opcode) { switch (opcode) {
case WASM_OP_ATOMIC_WAIT32: case WASM_OP_ATOMIC_WAIT32:
@ -2705,7 +2722,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load: case SIMD_v128_load:
{ {
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_v128_load(comp_ctx, func_ctx, if (!aot_compile_simd_v128_load(comp_ctx, func_ctx,
align, offset)) align, offset))
return false; return false;
@ -2720,7 +2737,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load32x2_u: case SIMD_v128_load32x2_u:
{ {
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_extend( if (!aot_compile_simd_load_extend(
comp_ctx, func_ctx, opcode, align, offset)) comp_ctx, func_ctx, opcode, align, offset))
return false; return false;
@ -2733,7 +2750,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load64_splat: case SIMD_v128_load64_splat:
{ {
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_splat(comp_ctx, func_ctx, if (!aot_compile_simd_load_splat(comp_ctx, func_ctx,
opcode, align, offset)) opcode, align, offset))
return false; return false;
@ -2743,7 +2760,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_store: case SIMD_v128_store:
{ {
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_v128_store(comp_ctx, func_ctx, if (!aot_compile_simd_v128_store(comp_ctx, func_ctx,
align, offset)) align, offset))
return false; return false;
@ -3006,7 +3023,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load64_lane: case SIMD_v128_load64_lane:
{ {
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_lane(comp_ctx, func_ctx, if (!aot_compile_simd_load_lane(comp_ctx, func_ctx,
opcode, align, offset, opcode, align, offset,
*frame_ip++)) *frame_ip++))
@ -3020,7 +3037,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_store64_lane: case SIMD_v128_store64_lane:
{ {
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_store_lane(comp_ctx, func_ctx, if (!aot_compile_simd_store_lane(comp_ctx, func_ctx,
opcode, align, offset, opcode, align, offset,
*frame_ip++)) *frame_ip++))
@ -3032,7 +3049,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
case SIMD_v128_load64_zero: case SIMD_v128_load64_zero:
{ {
read_leb_uint32(frame_ip, frame_ip_end, align); read_leb_uint32(frame_ip, frame_ip_end, align);
read_leb_uint32(frame_ip, frame_ip_end, offset); read_leb_mem_offset(frame_ip, frame_ip_end, offset);
if (!aot_compile_simd_load_zero(comp_ctx, func_ctx, if (!aot_compile_simd_load_zero(comp_ctx, func_ctx,
opcode, align, offset)) opcode, align, offset))
return false; return false;

View File

@ -519,6 +519,15 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type)
wasm_runtime_free(aot_value); \ wasm_runtime_free(aot_value); \
} while (0) } while (0)
#if WASM_ENABLE_MEMORY64 != 0
#define IS_MEMORY64 \
(comp_ctx->comp_data->memories[0].memory_flags & MEMORY64_FLAG)
#define MEMORY64_COND_VALUE(VAL_IF_ENABLED, VAL_IF_DISABLED) \
(IS_MEMORY64 ? VAL_IF_ENABLED : VAL_IF_DISABLED)
#else
#define MEMORY64_COND_VALUE(VAL_IF_ENABLED, VAL_IF_DISABLED) (VAL_IF_DISABLED)
#endif
#define POP_I32(v) POP(v, VALUE_TYPE_I32) #define POP_I32(v) POP(v, VALUE_TYPE_I32)
#define POP_I64(v) POP(v, VALUE_TYPE_I64) #define POP_I64(v) POP(v, VALUE_TYPE_I64)
#define POP_F32(v) POP(v, VALUE_TYPE_F32) #define POP_F32(v) POP(v, VALUE_TYPE_F32)
@ -527,6 +536,10 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type)
#define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF) #define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF)
#define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF) #define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF)
#define POP_GC_REF(v) POP(v, VALUE_TYPE_GC_REF) #define POP_GC_REF(v) POP(v, VALUE_TYPE_GC_REF)
#define POP_MEM_OFFSET(v) \
POP(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32))
#define POP_PAGE_COUNT(v) \
POP(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32))
#define POP_COND(llvm_value) \ #define POP_COND(llvm_value) \
do { \ do { \
@ -590,6 +603,8 @@ set_local_gc_ref(AOTCompFrame *frame, int n, LLVMValueRef value, uint8 ref_type)
#define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF) #define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF)
#define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF) #define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF)
#define PUSH_GC_REF(v) PUSH(v, VALUE_TYPE_GC_REF) #define PUSH_GC_REF(v) PUSH(v, VALUE_TYPE_GC_REF)
#define PUSH_PAGE_COUNT(v) \
PUSH(v, MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32))
#define TO_LLVM_TYPE(wasm_type) \ #define TO_LLVM_TYPE(wasm_type) \
wasm_type_to_llvm_type(comp_ctx, &comp_ctx->basic_types, wasm_type) wasm_type_to_llvm_type(comp_ctx, &comp_ctx->basic_types, wasm_type)

View File

@ -38,6 +38,20 @@
#define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block) #define SET_BUILD_POS(block) LLVMPositionBuilderAtEnd(comp_ctx->builder, block)
static bool
zero_extend_u64(AOTCompContext *comp_ctx, LLVMValueRef *value, const char *name)
{
if (comp_ctx->pointer_size == sizeof(uint64)) {
/* zero extend to uint64 if the target is 64-bit */
*value = LLVMBuildZExt(comp_ctx->builder, *value, I64_TYPE, name);
if (!*value) {
aot_set_last_error("llvm build zero extend failed.");
return false;
}
}
return true;
}
static LLVMValueRef static LLVMValueRef
get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, get_memory_check_bound(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 bytes) uint32 bytes)
@ -82,9 +96,10 @@ get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
LLVMValueRef LLVMValueRef
aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 offset, uint32 bytes, bool enable_segue) mem_offset_t offset, uint32 bytes, bool enable_segue)
{ {
LLVMValueRef offset_const = I32_CONST(offset); LLVMValueRef offset_const =
MEMORY64_COND_VALUE(I64_CONST(offset), I32_CONST(offset));
LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp; LLVMValueRef addr, maddr, offset1, cmp1, cmp2, cmp;
LLVMValueRef mem_base_addr, mem_check_bound; LLVMValueRef mem_base_addr, mem_check_bound;
LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder); LLVMBasicBlockRef block_curr = LLVMGetInsertBlock(comp_ctx->builder);
@ -94,17 +109,27 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool is_target_64bit, is_local_of_aot_value = false; bool is_target_64bit, is_local_of_aot_value = false;
#if WASM_ENABLE_SHARED_MEMORY != 0 #if WASM_ENABLE_SHARED_MEMORY != 0
bool is_shared_memory = bool is_shared_memory =
comp_ctx->comp_data->memories[0].memory_flags & 0x02; comp_ctx->comp_data->memories[0].memory_flags & SHARED_MEMORY_FLAG;
#endif #endif
is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false; is_target_64bit = (comp_ctx->pointer_size == sizeof(uint64)) ? true : false;
if (comp_ctx->is_indirect_mode if (comp_ctx->is_indirect_mode
&& aot_intrinsic_check_capability(comp_ctx, "i32.const")) { && aot_intrinsic_check_capability(
comp_ctx, MEMORY64_COND_VALUE("i64.const", "i32.const"))) {
WASMValue wasm_value; WASMValue wasm_value;
wasm_value.i32 = offset; #if WASM_ENABLE_MEMORY64 != 0
if (IS_MEMORY64) {
wasm_value.i64 = offset;
}
else
#endif
{
wasm_value.i32 = (int32)offset;
}
offset_const = aot_load_const_from_table( offset_const = aot_load_const_from_table(
comp_ctx, func_ctx->native_symbol, &wasm_value, VALUE_TYPE_I32); comp_ctx, func_ctx->native_symbol, &wasm_value,
MEMORY64_COND_VALUE(VALUE_TYPE_I64, VALUE_TYPE_I32));
if (!offset_const) { if (!offset_const) {
return NULL; return NULL;
} }
@ -139,7 +164,7 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
local_idx_of_aot_value = aot_value_top->local_idx; local_idx_of_aot_value = aot_value_top->local_idx;
} }
POP_I32(addr); POP_MEM_OFFSET(addr);
/* /*
* Note: not throw the integer-overflow-exception here since it must * Note: not throw the integer-overflow-exception here since it must
@ -158,7 +183,7 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
if (mem_offset + bytes <= mem_data_size) { if (mem_offset + bytes <= mem_data_size) {
/* inside memory space */ /* inside memory space */
if (comp_ctx->pointer_size == sizeof(uint64)) if (comp_ctx->pointer_size == sizeof(uint64))
offset1 = I64_CONST((uint32)mem_offset); offset1 = I64_CONST(mem_offset);
else else
offset1 = I32_CONST((uint32)mem_offset); offset1 = I32_CONST((uint32)mem_offset);
CHECK_LLVM_CONST(offset1); CHECK_LLVM_CONST(offset1);
@ -206,7 +231,8 @@ aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) { if (!(mem_size = get_memory_curr_page_count(comp_ctx, func_ctx))) {
goto fail; goto fail;
} }
BUILD_ICMP(LLVMIntEQ, mem_size, I32_ZERO, cmp, "is_zero"); BUILD_ICMP(LLVMIntEQ, mem_size,
MEMORY64_COND_VALUE(I64_ZERO, I32_ZERO), cmp, "is_zero");
ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ"); ADD_BASIC_BLOCK(check_succ, "check_mem_size_succ");
LLVMMoveBasicBlockAfter(check_succ, block_curr); LLVMMoveBasicBlockAfter(check_succ, block_curr);
if (!aot_emit_exception(comp_ctx, func_ctx, if (!aot_emit_exception(comp_ctx, func_ctx,
@ -412,8 +438,8 @@ fail:
bool bool
aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign, uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic) bool sign, bool atomic)
{ {
LLVMValueRef maddr, value = NULL; LLVMValueRef maddr, value = NULL;
LLVMTypeRef data_type; LLVMTypeRef data_type;
@ -482,8 +508,8 @@ fail:
bool bool
aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign, uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic) bool sign, bool atomic)
{ {
LLVMValueRef maddr, value = NULL; LLVMValueRef maddr, value = NULL;
LLVMTypeRef data_type; LLVMTypeRef data_type;
@ -560,7 +586,7 @@ fail:
bool bool
aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset) uint32 align, mem_offset_t offset)
{ {
LLVMValueRef maddr, value; LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_f32_load; bool enable_segue = comp_ctx->enable_segue_f32_load;
@ -583,7 +609,7 @@ fail:
bool bool
aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset) uint32 align, mem_offset_t offset)
{ {
LLVMValueRef maddr, value; LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_f64_load; bool enable_segue = comp_ctx->enable_segue_f64_load;
@ -606,7 +632,8 @@ fail:
bool bool
aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool atomic) uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic)
{ {
LLVMValueRef maddr, value; LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_i32_store; bool enable_segue = comp_ctx->enable_segue_i32_store;
@ -656,7 +683,8 @@ fail:
bool bool
aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool atomic) uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic)
{ {
LLVMValueRef maddr, value; LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_i64_store; bool enable_segue = comp_ctx->enable_segue_i64_store;
@ -713,7 +741,7 @@ fail:
bool bool
aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset) uint32 align, mem_offset_t offset)
{ {
LLVMValueRef maddr, value; LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_f32_store; bool enable_segue = comp_ctx->enable_segue_f32_store;
@ -736,7 +764,7 @@ fail:
bool bool
aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset) uint32 align, mem_offset_t offset)
{ {
LLVMValueRef maddr, value; LLVMValueRef maddr, value;
bool enable_segue = comp_ctx->enable_segue_f64_store; bool enable_segue = comp_ctx->enable_segue_f64_store;
@ -774,7 +802,8 @@ get_memory_curr_page_count(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
} }
} }
return mem_size; return LLVMBuildIntCast(comp_ctx->builder, mem_size,
MEMORY64_COND_VALUE(I64_TYPE, I32_TYPE), "");
fail: fail:
return NULL; return NULL;
} }
@ -785,7 +814,7 @@ aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx); LLVMValueRef mem_size = get_memory_curr_page_count(comp_ctx, func_ctx);
if (mem_size) if (mem_size)
PUSH_I32(mem_size); PUSH_PAGE_COUNT(mem_size);
return mem_size ? true : false; return mem_size ? true : false;
fail: fail:
return false; return false;
@ -798,11 +827,14 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMValueRef delta, param_values[2], ret_value, func, value; LLVMValueRef delta, param_values[2], ret_value, func, value;
LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type; LLVMTypeRef param_types[2], ret_type, func_type, func_ptr_type;
int32 func_index; int32 func_index;
#if WASM_ENABLE_MEMORY64 != 0
LLVMValueRef u32_max, u32_cmp_result;
#endif
if (!mem_size) if (!mem_size)
return false; return false;
POP_I32(delta); POP_PAGE_COUNT(delta);
/* Function type of aot_enlarge_memory() */ /* Function type of aot_enlarge_memory() */
param_types[0] = INT8_PTR_TYPE; param_types[0] = INT8_PTR_TYPE;
@ -854,7 +886,7 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
/* Call function aot_enlarge_memory() */ /* Call function aot_enlarge_memory() */
param_values[0] = func_ctx->aot_inst; param_values[0] = func_ctx->aot_inst;
param_values[1] = delta; param_values[1] = LLVMBuildTrunc(comp_ctx->builder, delta, I32_TYPE, "");
if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func, if (!(ret_value = LLVMBuildCall2(comp_ctx->builder, func_type, func,
param_values, 2, "call"))) { param_values, 2, "call"))) {
aot_set_last_error("llvm build call failed."); aot_set_last_error("llvm build call failed.");
@ -862,15 +894,26 @@ aot_compile_op_memory_grow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
} }
BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret"); BUILD_ICMP(LLVMIntUGT, ret_value, I8_ZERO, ret_value, "mem_grow_ret");
#if WASM_ENABLE_MEMORY64 != 0
if (IS_MEMORY64) {
if (!(u32_max = I64_CONST(UINT32_MAX))) {
aot_set_last_error("llvm build const failed");
return false;
}
BUILD_ICMP(LLVMIntULE, delta, u32_max, u32_cmp_result, "page_size_cmp");
BUILD_OP(And, ret_value, u32_cmp_result, ret_value, "and");
}
#endif
/* ret_value = ret_value == true ? delta : pre_page_count */ /* ret_value = ret_value == true ? pre_page_count : -1 */
if (!(ret_value = LLVMBuildSelect(comp_ctx->builder, ret_value, mem_size, if (!(ret_value = LLVMBuildSelect(
I32_NEG_ONE, "mem_grow_ret"))) { comp_ctx->builder, ret_value, mem_size,
MEMORY64_COND_VALUE(I64_NEG_ONE, I32_NEG_ONE), "mem_grow_ret"))) {
aot_set_last_error("llvm build select failed."); aot_set_last_error("llvm build select failed.");
return false; return false;
} }
PUSH_I32(ret_value); PUSH_PAGE_COUNT(ret_value);
return true; return true;
fail: fail:
return false; return false;
@ -987,13 +1030,17 @@ aot_compile_op_memory_init(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
POP_I32(len); POP_I32(len);
POP_I32(offset); POP_I32(offset);
POP_I32(dst); POP_MEM_OFFSET(dst);
if (!zero_extend_u64(comp_ctx, &dst, "dst64")) {
return false;
}
param_types[0] = INT8_PTR_TYPE; param_types[0] = INT8_PTR_TYPE;
param_types[1] = I32_TYPE; param_types[1] = I32_TYPE;
param_types[2] = I32_TYPE; param_types[2] = I32_TYPE;
param_types[3] = I32_TYPE; param_types[3] = I32_TYPE;
param_types[4] = I32_TYPE; param_types[4] = SIZE_T_TYPE;
ret_type = INT8_TYPE; ret_type = INT8_TYPE;
if (comp_ctx->is_jit_mode) if (comp_ctx->is_jit_mode)
@ -1080,9 +1127,9 @@ aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMValueRef src, dst, src_addr, dst_addr, len, res; LLVMValueRef src, dst, src_addr, dst_addr, len, res;
bool call_aot_memmove = false; bool call_aot_memmove = false;
POP_I32(len); POP_MEM_OFFSET(len);
POP_I32(src); POP_MEM_OFFSET(src);
POP_I32(dst); POP_MEM_OFFSET(dst);
if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len))) if (!(src_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, src, len)))
return false; return false;
@ -1090,14 +1137,9 @@ aot_compile_op_memory_copy(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len))) if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
return false; return false;
if (comp_ctx->pointer_size == sizeof(uint64)) { if (!zero_extend_u64(comp_ctx, &len, "len64")) {
/* zero extend to uint64 if the target is 64-bit */
len = LLVMBuildZExt(comp_ctx->builder, len, I64_TYPE, "len64");
if (!len) {
aot_set_last_error("llvm build zero extend failed.");
return false; return false;
} }
}
call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode; call_aot_memmove = comp_ctx->is_indirect_mode || comp_ctx->is_jit_mode;
if (call_aot_memmove) { if (call_aot_memmove) {
@ -1174,21 +1216,16 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type; LLVMTypeRef param_types[3], ret_type, func_type, func_ptr_type;
LLVMValueRef func, params[3]; LLVMValueRef func, params[3];
POP_I32(len); POP_MEM_OFFSET(len);
POP_I32(val); POP_I32(val);
POP_I32(dst); POP_MEM_OFFSET(dst);
if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len))) if (!(dst_addr = check_bulk_memory_overflow(comp_ctx, func_ctx, dst, len)))
return false; return false;
if (comp_ctx->pointer_size == sizeof(uint64)) { if (!zero_extend_u64(comp_ctx, &len, "len64")) {
/* zero extend to uint64 if the target is 64-bit */
len = LLVMBuildZExt(comp_ctx->builder, len, I64_TYPE, "len64");
if (!len) {
aot_set_last_error("llvm build zero extend failed.");
return false; return false;
} }
}
param_types[0] = INT8_PTR_TYPE; param_types[0] = INT8_PTR_TYPE;
param_types[1] = I32_TYPE; param_types[1] = I32_TYPE;
@ -1251,7 +1288,7 @@ fail:
bool bool
aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 atomic_op, uint8 op_type, uint32 align, uint8 atomic_op, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes) mem_offset_t offset, uint32 bytes)
{ {
LLVMValueRef maddr, value, result; LLVMValueRef maddr, value, result;
bool enable_segue = (op_type == VALUE_TYPE_I32) bool enable_segue = (op_type == VALUE_TYPE_I32)
@ -1337,7 +1374,7 @@ fail:
bool bool
aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx, aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx, uint8 op_type, AOTFuncContext *func_ctx, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes) uint32 align, mem_offset_t offset, uint32 bytes)
{ {
LLVMValueRef maddr, value, expect, result; LLVMValueRef maddr, value, expect, result;
bool enable_segue = (op_type == VALUE_TYPE_I32) bool enable_segue = (op_type == VALUE_TYPE_I32)
@ -1442,7 +1479,7 @@ fail:
bool bool
aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 op_type, uint32 align, uint32 offset, uint8 op_type, uint32 align, mem_offset_t offset,
uint32 bytes) uint32 bytes)
{ {
LLVMValueRef maddr, value, timeout, expect, cmp; LLVMValueRef maddr, value, timeout, expect, cmp;
@ -1534,7 +1571,7 @@ fail:
bool bool
aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx, aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx, uint32 align, AOTFuncContext *func_ctx, uint32 align,
uint32 offset, uint32 bytes) mem_offset_t offset, uint32 bytes)
{ {
LLVMValueRef maddr, value, count; LLVMValueRef maddr, value, count;
LLVMValueRef param_values[3], ret_value, func; LLVMValueRef param_values[3], ret_value, func;

View File

@ -17,43 +17,43 @@ extern "C" {
bool bool
aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign, uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic); bool sign, bool atomic);
bool bool
aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, bool sign, uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic); bool sign, bool atomic);
bool bool
aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f32_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset); uint32 align, mem_offset_t offset);
bool bool
aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f64_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset); uint32 align, mem_offset_t offset);
bool bool
aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic); bool atomic);
bool bool
aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_i64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset, uint32 bytes, uint32 align, mem_offset_t offset, uint32 bytes,
bool atomic); bool atomic);
bool bool
aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f32_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset); uint32 align, mem_offset_t offset);
bool bool
aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_f64_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset); uint32 align, mem_offset_t offset);
LLVMValueRef LLVMValueRef
aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_check_memory_overflow(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 offset, uint32 bytes, bool enable_segue); mem_offset_t offset, uint32 bytes, bool enable_segue);
bool bool
aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx); aot_compile_op_memory_size(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
@ -89,22 +89,22 @@ aot_compile_op_memory_fill(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx);
bool bool
aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_atomic_rmw(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 atomic_op, uint8 op_type, uint32 align, uint8 atomic_op, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes); mem_offset_t offset, uint32 bytes);
bool bool
aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx, aot_compile_op_atomic_cmpxchg(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx, uint8 op_type, AOTFuncContext *func_ctx, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes); uint32 align, mem_offset_t offset, uint32 bytes);
bool bool
aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_op_atomic_wait(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 op_type, uint32 align, uint32 offset, uint8 op_type, uint32 align, mem_offset_t offset,
uint32 bytes); uint32 bytes);
bool bool
aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx, aot_compiler_op_atomic_notify(AOTCompContext *comp_ctx,
AOTFuncContext *func_ctx, uint32 align, AOTFuncContext *func_ctx, uint32 align,
uint32 offset, uint32 bytes); mem_offset_t offset, uint32 bytes);
bool bool
aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx, aot_compiler_op_atomic_fence(AOTCompContext *comp_ctx,

View File

@ -13,7 +13,7 @@
/* data_length in bytes */ /* data_length in bytes */
static LLVMValueRef static LLVMValueRef
simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align, simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
uint32 offset, uint32 data_length, LLVMTypeRef ptr_type, mem_offset_t offset, uint32 data_length, LLVMTypeRef ptr_type,
LLVMTypeRef data_type, bool enable_segue) LLVMTypeRef data_type, bool enable_segue)
{ {
LLVMValueRef maddr, data; LLVMValueRef maddr, data;
@ -42,7 +42,7 @@ simd_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
bool bool
aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset) uint32 align, mem_offset_t offset)
{ {
bool enable_segue = comp_ctx->enable_segue_v128_load; bool enable_segue = comp_ctx->enable_segue_v128_load;
LLVMTypeRef v128_ptr_type = enable_segue ? V128_PTR_TYPE_GS : V128_PTR_TYPE; LLVMTypeRef v128_ptr_type = enable_segue ? V128_PTR_TYPE_GS : V128_PTR_TYPE;
@ -62,7 +62,7 @@ fail:
bool bool
aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset) uint8 opcode, uint32 align, mem_offset_t offset)
{ {
LLVMValueRef sub_vector, result; LLVMValueRef sub_vector, result;
uint32 opcode_index = opcode - SIMD_v128_load8x8_s; uint32 opcode_index = opcode - SIMD_v128_load8x8_s;
@ -117,7 +117,7 @@ aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool bool
aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset) uint8 opcode, uint32 align, mem_offset_t offset)
{ {
uint32 opcode_index = opcode - SIMD_v128_load8_splat; uint32 opcode_index = opcode - SIMD_v128_load8_splat;
LLVMValueRef element, result; LLVMValueRef element, result;
@ -173,7 +173,7 @@ aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool bool
aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset, uint8 opcode, uint32 align, mem_offset_t offset,
uint8 lane_id) uint8 lane_id)
{ {
LLVMValueRef element, vector; LLVMValueRef element, vector;
@ -218,7 +218,7 @@ aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
bool bool
aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset) uint8 opcode, uint32 align, mem_offset_t offset)
{ {
LLVMValueRef element, result, mask; LLVMValueRef element, result, mask;
uint32 opcode_index = opcode - SIMD_v128_load32_zero; uint32 opcode_index = opcode - SIMD_v128_load32_zero;
@ -308,7 +308,7 @@ simd_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, uint32 align,
bool bool
aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset) uint32 align, mem_offset_t offset)
{ {
bool enable_segue = comp_ctx->enable_segue_v128_store; bool enable_segue = comp_ctx->enable_segue_v128_store;
LLVMTypeRef v128_ptr_type = enable_segue ? V128_PTR_TYPE_GS : V128_PTR_TYPE; LLVMTypeRef v128_ptr_type = enable_segue ? V128_PTR_TYPE_GS : V128_PTR_TYPE;
@ -324,7 +324,7 @@ fail:
bool bool
aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset, uint8 opcode, uint32 align, mem_offset_t offset,
uint8 lane_id) uint8 lane_id)
{ {
LLVMValueRef element, vector; LLVMValueRef element, vector;

View File

@ -14,32 +14,32 @@ extern "C" {
bool bool
aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_v128_load(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset); uint32 align, mem_offset_t offset);
bool bool
aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_load_extend(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset); uint8 opcode, uint32 align, mem_offset_t offset);
bool bool
aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_load_splat(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset); uint8 opcode, uint32 align, mem_offset_t offset);
bool bool
aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_load_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset, uint8 opcode, uint32 align, mem_offset_t offset,
uint8 lane_id); uint8 lane_id);
bool bool
aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_load_zero(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset); uint8 opcode, uint32 align, mem_offset_t offset);
bool bool
aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_v128_store(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint32 align, uint32 offset); uint32 align, mem_offset_t offset);
bool bool
aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx, aot_compile_simd_store_lane(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
uint8 opcode, uint32 align, uint32 offset, uint8 opcode, uint32 align, mem_offset_t offset,
uint8 lane_id); uint8 lane_id);
#ifdef __cplusplus #ifdef __cplusplus

View File

@ -500,8 +500,10 @@ typedef struct WASMTable {
#if WASM_ENABLE_MEMORY64 != 0 #if WASM_ENABLE_MEMORY64 != 0
typedef uint64 mem_offset_t; typedef uint64 mem_offset_t;
#define PR_MEM_OFFSET PRIu64
#else #else
typedef uint32 mem_offset_t; typedef uint32 mem_offset_t;
#define PR_MEM_OFFSET PRIu32
#endif #endif
typedef struct WASMMemory { typedef struct WASMMemory {

View File

@ -9,6 +9,7 @@
#include "wasm.h" #include "wasm.h"
#include "wasm_opcode.h" #include "wasm_opcode.h"
#include "wasm_runtime.h" #include "wasm_runtime.h"
#include "wasm_loader_common.h"
#include "../common/wasm_native.h" #include "../common/wasm_native.h"
#include "../common/wasm_memory.h" #include "../common/wasm_memory.h"
#if WASM_ENABLE_GC != 0 #if WASM_ENABLE_GC != 0
@ -2755,43 +2756,6 @@ check_memory_max_size(bool is_memory64, uint32 init_size, uint32 max_size,
return true; return true;
} }
static bool
check_memory_flag(const uint8 mem_flag, char *error_buf, uint32 error_buf_size)
{
/* Check whether certain features indicated by mem_flag are enabled in
* runtime */
if (mem_flag > MAX_PAGE_COUNT_FLAG) {
#if WASM_ENABLE_SHARED_MEMORY == 0
if (mem_flag & SHARED_MEMORY_FLAG) {
LOG_VERBOSE("shared memory flag was found, please enable shared "
"memory, lib-pthread or lib-wasi-threads");
set_error_buf(error_buf, error_buf_size, "invalid limits flags");
return false;
}
#endif
#if WASM_ENABLE_MEMORY64 == 0
if (mem_flag & MEMORY64_FLAG) {
LOG_VERBOSE("memory64 flag was found, please enable memory64");
set_error_buf(error_buf, error_buf_size, "invalid limits flags");
return false;
}
#endif
}
if (mem_flag > MAX_PAGE_COUNT_FLAG + SHARED_MEMORY_FLAG + MEMORY64_FLAG) {
set_error_buf(error_buf, error_buf_size, "invalid limits flags");
return false;
}
else if ((mem_flag & SHARED_MEMORY_FLAG)
&& !(mem_flag & MAX_PAGE_COUNT_FLAG)) {
set_error_buf(error_buf, error_buf_size,
"shared memory must have maximum");
return false;
}
return true;
}
static bool static bool
load_memory_import(const uint8 **p_buf, const uint8 *buf_end, load_memory_import(const uint8 **p_buf, const uint8 *buf_end,
WASMModule *parent_module, const char *sub_module_name, WASMModule *parent_module, const char *sub_module_name,
@ -2824,7 +2788,7 @@ load_memory_import(const uint8 **p_buf, const uint8 *buf_end,
return false; return false;
} }
if (!check_memory_flag(mem_flag, error_buf, error_buf_size)) { if (!wasm_memory_check_flags(mem_flag, error_buf, error_buf_size, false)) {
return false; return false;
} }
@ -3226,7 +3190,8 @@ load_memory(const uint8 **p_buf, const uint8 *buf_end, WASMMemory *memory,
return false; return false;
} }
if (!check_memory_flag(memory->flags, error_buf, error_buf_size)) { if (!wasm_memory_check_flags(memory->flags, error_buf, error_buf_size,
false)) {
return false; return false;
} }
@ -14762,9 +14727,9 @@ re_scan:
goto fail; goto fail;
} }
read_leb_uint32(p, p_end, mem_offset); /* offset */ read_leb_mem_offset(p, p_end, mem_offset); /* offset */
POP_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_V128); POP_AND_PUSH(mem_offset_type, VALUE_TYPE_V128);
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true; func->has_memory_operations = true;
#endif #endif
@ -14781,10 +14746,10 @@ re_scan:
goto fail; goto fail;
} }
read_leb_uint32(p, p_end, mem_offset); /* offset */ read_leb_mem_offset(p, p_end, mem_offset); /* offset */
POP_V128(); POP_V128();
POP_I32(); POP_MEM_OFFSET();
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true; func->has_memory_operations = true;
#endif #endif
@ -14999,7 +14964,7 @@ re_scan:
goto fail; goto fail;
} }
read_leb_uint32(p, p_end, mem_offset); /* offset */ read_leb_mem_offset(p, p_end, mem_offset); /* offset */
CHECK_BUF(p, p_end, 1); CHECK_BUF(p, p_end, 1);
lane = read_uint8(p); lane = read_uint8(p);
@ -15009,7 +14974,7 @@ re_scan:
} }
POP_V128(); POP_V128();
POP_I32(); POP_MEM_OFFSET();
if (opcode1 < SIMD_v128_store8_lane) { if (opcode1 < SIMD_v128_store8_lane) {
PUSH_V128(); PUSH_V128();
} }
@ -15030,9 +14995,9 @@ re_scan:
goto fail; goto fail;
} }
read_leb_uint32(p, p_end, mem_offset); /* offset */ read_leb_mem_offset(p, p_end, mem_offset); /* offset */
POP_AND_PUSH(VALUE_TYPE_I32, VALUE_TYPE_V128); POP_AND_PUSH(mem_offset_type, VALUE_TYPE_V128);
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true; func->has_memory_operations = true;
#endif #endif

View File

@ -11,6 +11,7 @@
#include "wasm_runtime.h" #include "wasm_runtime.h"
#include "../common/wasm_native.h" #include "../common/wasm_native.h"
#include "../common/wasm_memory.h" #include "../common/wasm_memory.h"
#include "wasm_loader_common.h"
#if WASM_ENABLE_FAST_JIT != 0 #if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h" #include "../fast-jit/jit_compiler.h"
#include "../fast-jit/jit_codecache.h" #include "../fast-jit/jit_codecache.h"
@ -714,38 +715,6 @@ load_table_import(const uint8 **p_buf, const uint8 *buf_end,
return true; return true;
} }
static bool
check_memory_flag(const uint8 mem_flag)
{
/* Check whether certain features indicated by mem_flag are enabled in
* runtime */
if (mem_flag > MAX_PAGE_COUNT_FLAG) {
#if WASM_ENABLE_SHARED_MEMORY == 0
if (mem_flag & SHARED_MEMORY_FLAG) {
LOG_VERBOSE("shared memory flag was found, please enable shared "
"memory, lib-pthread or lib-wasi-threads");
return false;
}
#endif
#if WASM_ENABLE_MEMORY64 == 0
if (mem_flag & MEMORY64_FLAG) {
LOG_VERBOSE("memory64 flag was found, please enable memory64");
return false;
}
#endif
}
if (mem_flag > MAX_PAGE_COUNT_FLAG + SHARED_MEMORY_FLAG + MEMORY64_FLAG) {
return false;
}
else if ((mem_flag & SHARED_MEMORY_FLAG)
&& !(mem_flag & MAX_PAGE_COUNT_FLAG)) {
return false;
}
return true;
}
static bool static bool
load_memory_import(const uint8 **p_buf, const uint8 *buf_end, load_memory_import(const uint8 **p_buf, const uint8 *buf_end,
WASMModule *parent_module, const char *sub_module_name, WASMModule *parent_module, const char *sub_module_name,
@ -766,7 +735,9 @@ load_memory_import(const uint8 **p_buf, const uint8 *buf_end,
uint32 declare_max_page_count = 0; uint32 declare_max_page_count = 0;
read_leb_uint32(p, p_end, mem_flag); read_leb_uint32(p, p_end, mem_flag);
bh_assert(check_memory_flag(mem_flag)); if (!wasm_memory_check_flags(mem_flag, error_buf, error_buf_size, false)) {
return false;
}
#if WASM_ENABLE_APP_FRAMEWORK == 0 #if WASM_ENABLE_APP_FRAMEWORK == 0
is_memory64 = mem_flag & MEMORY64_FLAG; is_memory64 = mem_flag & MEMORY64_FLAG;
@ -796,7 +767,6 @@ load_memory_import(const uint8 **p_buf, const uint8 *buf_end,
memory->num_bytes_per_page = DEFAULT_NUM_BYTES_PER_PAGE; memory->num_bytes_per_page = DEFAULT_NUM_BYTES_PER_PAGE;
*p_buf = p; *p_buf = p;
(void)check_memory_flag;
return true; return true;
} }
@ -891,7 +861,10 @@ load_memory(const uint8 **p_buf, const uint8 *buf_end, WASMMemory *memory,
read_leb_uint32(p, p_end, memory->flags); read_leb_uint32(p, p_end, memory->flags);
bh_assert(p - p_org <= 1); bh_assert(p - p_org <= 1);
(void)p_org; (void)p_org;
bh_assert(check_memory_flag(memory->flags)); if (!wasm_memory_check_flags(memory->flags, error_buf, error_buf_size,
false)) {
return false;
}
#if WASM_ENABLE_APP_FRAMEWORK == 0 #if WASM_ENABLE_APP_FRAMEWORK == 0
is_memory64 = memory->flags & MEMORY64_FLAG; is_memory64 = memory->flags & MEMORY64_FLAG;
@ -916,7 +889,6 @@ load_memory(const uint8 **p_buf, const uint8 *buf_end, WASMMemory *memory,
memory->num_bytes_per_page = DEFAULT_NUM_BYTES_PER_PAGE; memory->num_bytes_per_page = DEFAULT_NUM_BYTES_PER_PAGE;
*p_buf = p; *p_buf = p;
(void)check_memory_flag;
return true; return true;
} }

View File

@ -1408,19 +1408,23 @@ execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
#endif #endif
WASMExecEnv *exec_env_created = NULL; WASMExecEnv *exec_env_created = NULL;
WASMModuleInstanceCommon *module_inst_old = NULL; WASMModuleInstanceCommon *module_inst_old = NULL;
uint32 argv[3], argc; union {
uint32 u32[3];
uint64 u64;
} argv;
uint32 argc;
bool ret; bool ret;
#if WASM_ENABLE_MEMORY64 != 0 #if WASM_ENABLE_MEMORY64 != 0
bool is_memory64 = module_inst->memories[0]->is_memory64; bool is_memory64 = module_inst->memories[0]->is_memory64;
if (is_memory64) { if (is_memory64) {
argc = 2; argc = 2;
PUT_I64_TO_ADDR(&argv[0], size); PUT_I64_TO_ADDR(&argv.u64, size);
} }
else else
#endif #endif
{ {
argc = 1; argc = 1;
argv[0] = (uint32)size; argv.u32[0] = (uint32)size;
} }
/* if __retain is exported, then this module is compiled by /* if __retain is exported, then this module is compiled by
@ -1431,7 +1435,7 @@ execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
/* the malloc function from assemblyscript is: /* the malloc function from assemblyscript is:
function __new(size: usize, id: u32) function __new(size: usize, id: u32)
id = 0 means this is an ArrayBuffer object */ id = 0 means this is an ArrayBuffer object */
argv[argc] = 0; argv.u32[argc] = 0;
argc++; argc++;
} }
@ -1472,10 +1476,10 @@ execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
} }
} }
ret = wasm_call_function(exec_env, malloc_func, argc, argv); ret = wasm_call_function(exec_env, malloc_func, argc, argv.u32);
if (retain_func && ret) if (retain_func && ret)
ret = wasm_call_function(exec_env, retain_func, 1, argv); ret = wasm_call_function(exec_env, retain_func, 1, argv.u32);
if (module_inst_old) if (module_inst_old)
/* Restore the existing exec_env's module inst */ /* Restore the existing exec_env's module inst */
@ -1487,11 +1491,11 @@ execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
if (ret) { if (ret) {
#if WASM_ENABLE_MEMORY64 != 0 #if WASM_ENABLE_MEMORY64 != 0
if (is_memory64) if (is_memory64)
*p_result = GET_I64_FROM_ADDR(&argv[0]); *p_result = GET_I64_FROM_ADDR(&argv.u64);
else else
#endif #endif
{ {
*p_result = argv[0]; *p_result = argv.u32[0];
} }
} }
return ret; return ret;
@ -1506,18 +1510,22 @@ execute_free_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
#endif #endif
WASMExecEnv *exec_env_created = NULL; WASMExecEnv *exec_env_created = NULL;
WASMModuleInstanceCommon *module_inst_old = NULL; WASMModuleInstanceCommon *module_inst_old = NULL;
uint32 argv[2], argc; union {
uint32 u32[2];
uint64 u64;
} argv;
uint32 argc;
bool ret; bool ret;
#if WASM_ENABLE_MEMORY64 != 0 #if WASM_ENABLE_MEMORY64 != 0
if (module_inst->memories[0]->is_memory64) { if (module_inst->memories[0]->is_memory64) {
PUT_I64_TO_ADDR(&argv[0], offset); PUT_I64_TO_ADDR(&argv.u64, offset);
argc = 2; argc = 2;
} }
else else
#endif #endif
{ {
argv[0] = (uint32)offset; argv.u32[0] = (uint32)offset;
argc = 1; argc = 1;
} }
@ -1558,7 +1566,7 @@ execute_free_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
} }
} }
ret = wasm_call_function(exec_env, free_func, argc, argv); ret = wasm_call_function(exec_env, free_func, argc, argv.u32);
if (module_inst_old) if (module_inst_old)
/* Restore the existing exec_env's module inst */ /* Restore the existing exec_env's module inst */
@ -4176,7 +4184,7 @@ fail:
#if WASM_ENABLE_BULK_MEMORY != 0 #if WASM_ENABLE_BULK_MEMORY != 0
bool bool
llvm_jit_memory_init(WASMModuleInstance *module_inst, uint32 seg_index, llvm_jit_memory_init(WASMModuleInstance *module_inst, uint32 seg_index,
uint32 offset, uint32 len, uint32 dst) uint32 offset, uint32 len, size_t dst)
{ {
WASMMemoryInstance *memory_inst; WASMMemoryInstance *memory_inst;
WASMModule *module; WASMModule *module;
@ -4211,7 +4219,7 @@ llvm_jit_memory_init(WASMModuleInstance *module_inst, uint32 seg_index,
(WASMModuleInstanceCommon *)module_inst, (uint64)dst); (WASMModuleInstanceCommon *)module_inst, (uint64)dst);
SHARED_MEMORY_LOCK(memory_inst); SHARED_MEMORY_LOCK(memory_inst);
bh_memcpy_s(maddr, (uint32)(memory_inst->memory_data_size - dst), bh_memcpy_s(maddr, CLAMP_U64_TO_U32(memory_inst->memory_data_size - dst),
data + offset, len); data + offset, len);
SHARED_MEMORY_UNLOCK(memory_inst); SHARED_MEMORY_UNLOCK(memory_inst);
return true; return true;

View File

@ -760,7 +760,7 @@ llvm_jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx, uint32 argc,
#if WASM_ENABLE_BULK_MEMORY != 0 #if WASM_ENABLE_BULK_MEMORY != 0
bool bool
llvm_jit_memory_init(WASMModuleInstance *module_inst, uint32 seg_index, llvm_jit_memory_init(WASMModuleInstance *module_inst, uint32 seg_index,
uint32 offset, uint32 len, uint32 dst); uint32 offset, uint32 len, size_t dst);
bool bool
llvm_jit_data_drop(WASMModuleInstance *module_inst, uint32 seg_index); llvm_jit_data_drop(WASMModuleInstance *module_inst, uint32 seg_index);

View File

@ -79,7 +79,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
#### **Enable memory64 feature** #### **Enable memory64 feature**
- **WAMR_BUILD_MEMORY64**=1/0, default to disable if not set - **WAMR_BUILD_MEMORY64**=1/0, default to disable if not set
> Note: Currently, the memory64 feature is only supported in classic interpreter running mode. > Note: Currently, the memory64 feature is only supported in classic interpreter running mode and AOT mode.
#### **Enable thread manager** #### **Enable thread manager**
- **WAMR_BUILD_THREAD_MGR**=1/0, default to disable if not set - **WAMR_BUILD_THREAD_MGR**=1/0, default to disable if not set

View File

@ -111,6 +111,7 @@ $(NAME)_SOURCES := ${SHARED_ROOT}/platform/alios/alios_platform.c \
${IWASM_ROOT}/common/wasm_runtime_common.c \ ${IWASM_ROOT}/common/wasm_runtime_common.c \
${IWASM_ROOT}/common/wasm_native.c \ ${IWASM_ROOT}/common/wasm_native.c \
${IWASM_ROOT}/common/wasm_exec_env.c \ ${IWASM_ROOT}/common/wasm_exec_env.c \
${IWASM_ROOT}/common/wasm_loader_common.c \
${IWASM_ROOT}/common/wasm_memory.c \ ${IWASM_ROOT}/common/wasm_memory.c \
${IWASM_ROOT}/common/wasm_c_api.c \ ${IWASM_ROOT}/common/wasm_c_api.c \
${IWASM_ROOT}/common/arch/${INVOKE_NATIVE} \ ${IWASM_ROOT}/common/arch/${INVOKE_NATIVE} \

View File

@ -451,6 +451,7 @@ CSRCS += nuttx_platform.c \
wasm_runtime_common.c \ wasm_runtime_common.c \
wasm_native.c \ wasm_native.c \
wasm_exec_env.c \ wasm_exec_env.c \
wasm_loader_common.c \
wasm_memory.c \ wasm_memory.c \
wasm_c_api.c wasm_c_api.c

View File

@ -1119,9 +1119,6 @@ def compile_wasm_to_aot(wasm_tempfile, aot_tempfile, runner, opts, r, output = '
cmd.append("--enable-gc") cmd.append("--enable-gc")
cmd.append("--enable-tail-call") cmd.append("--enable-tail-call")
if opts.memory64:
cmd.append("--enable-memory64")
if output == 'object': if output == 'object':
cmd.append("--format=object") cmd.append("--format=object")
elif output == 'ir': elif output == 'ir':
@ -1134,9 +1131,10 @@ def compile_wasm_to_aot(wasm_tempfile, aot_tempfile, runner, opts, r, output = '
# Bounds checks is disabled by default for 64-bit targets, to # Bounds checks is disabled by default for 64-bit targets, to
# use the hardware based bounds checks. But it is not supported # use the hardware based bounds checks. But it is not supported
# in QEMU with NuttX. # in QEMU with NuttX and in memory64 mode.
# Enable bounds checks explicitly for all targets if running in QEMU. # Enable bounds checks explicitly for all targets if running in QEMU or all targets
if opts.qemu: # running in memory64 mode.
if opts.qemu or opts.memory64:
cmd.append("--bounds-checks=1") cmd.append("--bounds-checks=1")
# RISCV64 requires -mcmodel=medany, which can be set by --size-level=1 # RISCV64 requires -mcmodel=medany, which can be set by --size-level=1

View File

@ -0,0 +1,20 @@
diff --git a/test/core/binary.wast b/test/core/binary.wast
index b9fa438c..08ecee27 100644
--- a/test/core/binary.wast
+++ b/test/core/binary.wast
@@ -621,15 +621,6 @@
)
;; Malformed memory limits flag
-(assert_malformed
- (module binary
- "\00asm" "\01\00\00\00"
- "\05\03\01" ;; memory section with one entry
- "\04" ;; malformed memory limits flag
- "\00" ;; min 0
- )
- "malformed limits flags"
-)
(assert_malformed
(module binary
"\00asm" "\01\00\00\00"

View File

@ -409,6 +409,19 @@ function setup_wabt()
fi fi
} }
function compile_reference_interpreter()
{
echo "compile the reference intepreter"
pushd interpreter
make
if [ $? -ne 0 ]
then
echo "Failed to compile the reference interpreter"
exit 1
fi
popd
}
# TODO: with iwasm only # TODO: with iwasm only
function spec_test() function spec_test()
{ {
@ -457,6 +470,7 @@ function spec_test()
git apply ../../spec-test-script/thread_proposal_ignore_cases.patch git apply ../../spec-test-script/thread_proposal_ignore_cases.patch
git apply ../../spec-test-script/thread_proposal_fix_atomic_case.patch git apply ../../spec-test-script/thread_proposal_fix_atomic_case.patch
git apply ../../spec-test-script/thread_proposal_remove_memory64_flag_case.patch
fi fi
if [ ${ENABLE_EH} == 1 ]; then if [ ${ENABLE_EH} == 1 ]; then
@ -500,10 +514,7 @@ function spec_test()
git apply ../../spec-test-script/gc_nuttx_tail_call.patch git apply ../../spec-test-script/gc_nuttx_tail_call.patch
fi fi
echo "compile the reference intepreter" compile_reference_interpreter
pushd interpreter
make
popd
fi fi
# update memory64 cases # update memory64 cases
@ -519,14 +530,11 @@ function spec_test()
git restore . && git clean -ffd . git restore . && git clean -ffd .
# Reset to commit: "Merge remote-tracking branch 'upstream/main' into merge2" # Reset to commit: "Merge remote-tracking branch 'upstream/main' into merge2"
git reset --hard 48e69f394869c55b7bbe14ac963c09f4605490b6 git reset --hard 48e69f394869c55b7bbe14ac963c09f4605490b6
git checkout 044d0d2e77bdcbe891f7e0b9dd2ac01d56435f0b -- test/core/elem.wast git checkout 044d0d2e77bdcbe891f7e0b9dd2ac01d56435f0b -- test/core/elem.wast test/core/data.wast
git apply ../../spec-test-script/ignore_cases.patch git apply ../../spec-test-script/ignore_cases.patch
git apply ../../spec-test-script/memory64.patch git apply ../../spec-test-script/memory64.patch
echo "compile the reference intepreter" compile_reference_interpreter
pushd interpreter
make
popd
fi fi
popd popd

View File

@ -46,6 +46,7 @@ add_definitions(-DWASM_ENABLE_DUMP_CALL_STACK=1)
add_definitions(-DWASM_ENABLE_PERF_PROFILING=1) add_definitions(-DWASM_ENABLE_PERF_PROFILING=1)
add_definitions(-DWASM_ENABLE_LOAD_CUSTOM_SECTION=1) add_definitions(-DWASM_ENABLE_LOAD_CUSTOM_SECTION=1)
add_definitions(-DWASM_ENABLE_MODULE_INST_CONTEXT=1) add_definitions(-DWASM_ENABLE_MODULE_INST_CONTEXT=1)
add_definitions(-DWASM_ENABLE_MEMORY64=1)
add_definitions(-DWASM_ENABLE_GC=1) add_definitions(-DWASM_ENABLE_GC=1)