Compare commits

..

1 Commits

Author SHA1 Message Date
Giovanni Mazzeo
45a2a48da9
Merge 6aa94f0edf into 0343aaf8c3 2025-06-13 13:34:08 +09:00
67 changed files with 548 additions and 1595 deletions

View File

@ -30,23 +30,14 @@ runs:
if: ${{ startsWith(inputs.os, 'ubuntu') }}
shell: bash
run: |
echo "Downloading wasi-sdk for Ubuntu..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-linux.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-x86_64-linux/ wasi-sdk
echo "Downloading wabt for Ubuntu..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.37 wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on ubuntu"
working-directory: /opt
@ -54,23 +45,14 @@ runs:
if: ${{ inputs.os == 'macos-13' }}
shell: bash
run: |
echo "Downloading wasi-sdk for macOS-13..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-macos.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-x86_64-macos wasi-sdk
echo "Downloading wabt for macOS-13..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.36 wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.36 installed on macos-13"
working-directory: /opt
@ -78,48 +60,21 @@ runs:
if: ${{ inputs.os == 'macos-14' }}
shell: bash
run: |
echo "Downloading wasi-sdk for macOS-14..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-arm64-macos.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-arm64-macos wasi-sdk
echo "Downloading wabt for macOS-14..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-macos-14.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.37 wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on macos-14"
working-directory: /opt
#TODO: Add support for Windows
- name: Set up wasi-sdk and wabt on Windows
if: ${{ startsWith(inputs.os, 'windows') }}
shell: bash
shell: powershell
run: |
choco install -y wget
mkdir -p /opt/wasi-sdk
mkdir -p /opt/wabt
echo "Downloading wasi-sdk for Windows..."
wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-windows.tar.gz
echo "Extracting wasi-sdk..."
tar --strip-components=1 -xf wasi-sdk.tar.gz -C /opt/wasi-sdk
echo "Downloading wabt for Windows..."
wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-windows.tar.gz
echo "Extracting wabt..."
tar --strip-components=1 -xf wabt.tar.gz -C /opt/wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on Windows"
echo "::notice::Support for Windows is not implemented yet"
exit 1

View File

@ -23,7 +23,7 @@ on:
type: string
required: true
upload_url:
description: upload binary assets to the URL of release
description: a semantic version number. it is required when `release` is true.
type: string
required: false
ver_num:

View File

@ -23,7 +23,7 @@ on:
type: string
required: true
upload_url:
description: upload binary assets to the URL of release
description: a semantic version number. it is required when `release` is true.
type: string
required: false
ver_num:

View File

@ -53,7 +53,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3.29.0
uses: github/codeql-action/init@v3.28.19
with:
languages: ${{ matrix.language }}
@ -70,7 +70,7 @@ jobs:
- run: |
./.github/scripts/codeql_buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3.29.0
uses: github/codeql-action/analyze@v3.28.19
with:
category: "/language:${{matrix.language}}"
upload: false
@ -99,7 +99,7 @@ jobs:
output: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif
- name: Upload CodeQL results to code scanning
uses: github/codeql-action/upload-sarif@v3.29.0
uses: github/codeql-action/upload-sarif@v3.28.19
with:
sarif_file: ${{ steps.step1.outputs.sarif-output }}
category: "/language:${{matrix.language}}"

View File

@ -288,24 +288,30 @@ jobs:
sudo swapon /swapfile
sudo swapon --show
- name: run spec tests with retry
id: run_spec_tests
uses: nick-fields/retry@v3
with:
command: |
cd ./tests/wamr-test-suites
source /opt/intel/sgxsdk/environment
./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
max_attempts: 3
retry_on: error
shell: bash
timeout_minutes: 10
- name: print test results
- name: run spec tests
run: |
echo "Test results:"
echo "${{ steps.run_spec_tests.outputs.stdout }}"
echo "${{ steps.run_spec_tests.outputs.stderr }}"
echo "Exit code: ${{ steps.run_spec_tests.outputs.exit_code }}"
echo "Exit error: ${{ steps.run_spec_tests.outputs.exit_error }}"
shell: bash
set +e
source /opt/intel/sgxsdk/environment
attempts=0
max_attempts=3
while [ $attempts -lt $max_attempts ]; do
./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
exitcode="$?"
if [ $exitcode -eq 0 ]; then
echo "Spec test passed"
exit 0
elif [ $exitcode -ne 143 ]; then
echo "Spec test failed with error code $exitcode"
exit 1
fi
echo "$exitcode is a known GitHub-hosted runner issue"
echo "::notice::Re-running the spec test due to error code 143"
attempts=$((attempts + 1))
done
echo "::notice::Report an error with code 143 in SGX CI after $max_attempts attempts"
exit 143
working-directory: ./tests/wamr-test-suites

View File

@ -36,11 +36,12 @@ env:
LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0"
MULTI_TIER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1"
# For Spec Test
DEFAULT_TEST_OPTIONS: "-s spec -b -P"
MULTI_MODULES_TEST_OPTIONS: "-s spec -b -P -M"
SIMD_TEST_OPTIONS: "-s spec -b -P -S"
THREADS_TEST_OPTIONS: "-s spec -b -P -p"
X86_32_TARGET_TEST_OPTIONS: "-m x86_32"
# FIXME: use binary release(adding -b) instead of building from source after upgrading to 22.04
DEFAULT_TEST_OPTIONS: "-s spec -P"
MULTI_MODULES_TEST_OPTIONS: "-s spec -M -P"
SIMD_TEST_OPTIONS: "-s spec -S -P"
THREADS_TEST_OPTIONS: "-s spec -p -P"
X86_32_TARGET_TEST_OPTIONS: "-m x86_32 -P"
WASI_TEST_OPTIONS: "-s wasi_certification -w"
permissions:

View File

@ -60,6 +60,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@2847b7f7ab9f48fc49eca90a53fff6007285f399
uses: github/codeql-action/upload-sarif@b1e4dc3db58c9601794e22a9f6d28d45461b9dbf
with:
sarif_file: results.sarif

View File

@ -99,9 +99,9 @@ if (NOT DEFINED WAMR_BUILD_LIB_WASI_THREADS)
set (WAMR_BUILD_LIB_WASI_THREADS 0)
endif ()
if (NOT DEFINED WAMR_BUILD_COPY_CALL_STACK)
if (NOT DEFINED WAMR_ENABLE_COPY_CALLSTACK)
# Disable copy callstack by default
set (WAMR_BUILD_COPY_CALL_STACK 0)
set (WAMR_ENABLE_COPY_CALLSTACK 0)
endif()
if (NOT DEFINED WAMR_BUILD_MINI_LOADER)

View File

@ -497,7 +497,7 @@
- wasm loader: Fix handling if block without op else (#3404)
- ref-types: Correct default value for function local variables (#3397)
- aot compiler: Fix the length type passed to aot_memmove/aot_memset (#3378)
- Fix loader and mini-loader select potential error (#3374)
- Fix loader and mini-loader select potiential error (#3374)
- Fix aot debugger compilation error on windows (#3370)
- A few native stack detection fixes for macOS/arm64 (#3368)
- Fix ESP32-S3 compiling error (#3359)

View File

@ -334,10 +334,15 @@ if (WAMR_BUILD_SHARED_HEAP EQUAL 1)
add_definitions (-DWASM_ENABLE_SHARED_HEAP=1)
message (" Shared heap enabled")
endif()
if (WAMR_BUILD_COPY_CALL_STACK EQUAL 1)
add_definitions (-DWASM_ENABLE_COPY_CALL_STACK=1)
if (WAMR_ENABLE_COPY_CALLSTACK EQUAL 1)
add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=1)
message(" Copy callstack enabled")
else ()
add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=0)
message(" Copy callstack disabled")
endif()
if (WAMR_BUILD_MEMORY64 EQUAL 1)
# if native is 32-bit or cross-compiled to 32-bit
if (NOT WAMR_BUILD_TARGET MATCHES ".*64.*")
@ -534,9 +539,6 @@ if (WAMR_BUILD_WASI_NN EQUAL 1)
if (DEFINED WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH)
add_definitions (-DWASM_WASI_NN_EXTERNAL_DELEGATE_PATH="${WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH}")
endif ()
if (NOT DEFINED WAMR_BUILD_WASI_EPHEMERAL_NN)
set(WAMR_BUILD_WASI_EPHEMERAL_NN 1)
endif()
if (WAMR_BUILD_WASI_EPHEMERAL_NN EQUAL 1)
message (" WASI-NN: use 'wasi_ephemeral_nn' instead of 'wasi-nn'")
add_definitions (-DWASM_ENABLE_WASI_EPHEMERAL_NN=1)

View File

@ -106,7 +106,6 @@ endif ()
if (WAMR_BUILD_WASI_NN EQUAL 1)
include (${IWASM_DIR}/libraries/wasi-nn/cmake/wasi_nn.cmake)
set (WAMR_BUILD_MODULE_INST_CONTEXT 1)
endif ()
if (WAMR_BUILD_LIB_PTHREAD EQUAL 1)

View File

@ -4,6 +4,7 @@
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
import argparse
import re
from pathlib import Path
import re
import shlex
@ -38,7 +39,7 @@ INVALID_FILE_NAME_SEGMENT = r"([a-zA-Z0-9]+\-[a-zA-Z0-9]+)"
def locate_command(command: str) -> bool:
if not shutil.which(command):
print(f"Command '{command}' not found")
print(f"Command '{command}'' not found")
return False
return True

View File

@ -193,8 +193,8 @@
#error "Heap aux stack allocation must be enabled for WASI threads"
#endif
#ifndef WASM_ENABLE_COPY_CALL_STACK
#define WASM_ENABLE_COPY_CALL_STACK 0
#ifndef WAMR_ENABLE_COPY_CALLSTACK
#define WAMR_ENABLE_COPY_CALLSTACK 0
#endif
#ifndef WASM_ENABLE_BASE_LIB

View File

@ -1309,13 +1309,6 @@ load_init_expr(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
read_uint32(buf, buf_end, type_idx);
read_uint32(buf, buf_end, length);
if (type_idx >= module->type_count
|| !wasm_type_is_array_type(module->types[type_idx])) {
set_error_buf(error_buf, error_buf_size,
"invalid or non-array type index.");
goto fail;
}
if (init_expr_type == INIT_EXPR_TYPE_ARRAY_NEW_DEFAULT) {
expr->u.array_new_default.type_index = type_idx;
expr->u.array_new_default.length = length;
@ -1730,12 +1723,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
(void)u8;
read_uint32(buf, buf_end, j);
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (j >= module->type_count) {
set_error_buf(error_buf, error_buf_size, "invalid type index");
goto fail;
}
#endif
if (module->types[j]->ref_count == UINT16_MAX) {
set_error_buf(error_buf, error_buf_size,
"wasm type's ref count too large");
@ -1999,13 +1986,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
AOTType *cur_type = module->types[j];
parent_type_idx = cur_type->parent_type_idx;
if (parent_type_idx != (uint32)-1) { /* has parent */
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (parent_type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"invalid parent type index");
goto fail;
}
#endif
AOTType *parent_type = module->types[parent_type_idx];
module->types[j]->parent_type = parent_type;
@ -2029,13 +2009,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
AOTType *cur_type = module->types[j];
parent_type_idx = cur_type->parent_type_idx;
if (parent_type_idx != (uint32)-1) { /* has parent */
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (parent_type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"invalid parent type index");
goto fail;
}
#endif
AOTType *parent_type = module->types[parent_type_idx];
/* subtyping has been checked during compilation */
bh_assert(wasm_type_is_subtype_of(

View File

@ -4137,9 +4137,9 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame)
}
#endif /* end of WASM_ENABLE_AOT_STACK_FRAME != 0 */
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n,
char *error_buf, uint32 error_buf_size)
{
@ -4193,7 +4193,7 @@ aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
uint32
aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n,
char *error_buf, uint32_t error_buf_size)
{
@ -4243,7 +4243,7 @@ aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
uint32
aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -4265,7 +4265,7 @@ aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
error_buf, error_buf_size);
}
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
#if WASM_ENABLE_DUMP_CALL_STACK != 0
bool

View File

@ -787,12 +787,12 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame);
bool
aot_create_call_stack(struct WASMExecEnv *exec_env);
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
/**
* @brief Dump wasm call stack or get the size

View File

@ -1145,7 +1145,7 @@ wasm_reftype_is_subtype_of(uint8 type1, const WASMRefType *ref_type1,
return true;
else {
int32 heap_type = ref_type1->ref_ht_common.heap_type;
// We don't care whether type2 is nullable or not. So
// We dont care whether type2 is nullable or not. So
// we normalize it into its related one-byte type.
if (type2 == REF_TYPE_HT_NULLABLE
|| type2 == REF_TYPE_HT_NON_NULLABLE) {

View File

@ -1743,9 +1743,9 @@ wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env)
wasm_exec_env_destroy(exec_env);
}
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -1780,7 +1780,7 @@ wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
strncpy(error_buf, err_msg, error_buf_size);
return 0;
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
bool
wasm_runtime_init_thread_env(void)

View File

@ -758,12 +758,12 @@ wasm_runtime_create_exec_env(WASMModuleInstanceCommon *module_inst,
WASM_RUNTIME_API_EXTERN void
wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env);
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
WASM_RUNTIME_API_EXTERN uint32_t
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32 error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN WASMModuleInstanceCommon *

View File

@ -3999,7 +3999,7 @@ aot_get_func_from_table(const AOTCompContext *comp_ctx, LLVMValueRef base,
if (!(func =
LLVMBuildBitCast(comp_ctx->builder, func, func_type, "func"))) {
aot_set_last_error("cast function failed.");
aot_set_last_error("cast function fialed.");
goto fail;
}
@ -4068,7 +4068,7 @@ aot_load_const_from_table(AOTCompContext *comp_ctx, LLVMValueRef base,
if (!(const_addr = LLVMBuildBitCast(comp_ctx->builder, const_addr,
const_ptr_type, "const_addr"))) {
aot_set_last_error("cast const failed.");
aot_set_last_error("cast const fialed.");
return NULL;
}

View File

@ -139,6 +139,8 @@ typedef struct wasm_frame_t {
uint32_t *lp;
} WASMCApiFrame;
typedef WASMCApiFrame wasm_frame_t;
/* WASM section */
typedef struct wasm_section_t {
struct wasm_section_t *next;
@ -902,7 +904,7 @@ wasm_runtime_destroy_exec_env(wasm_exec_env_t exec_env);
* @return number of copied frames
*/
WASM_RUNTIME_API_EXTERN uint32_t
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32_t length, const uint32_t skip_n,
char *error_buf, uint32_t error_buf_size);

View File

@ -3351,8 +3351,7 @@ load_import_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
/* valtype */
CHECK_BUF(p, p_end, 1);
global_type = read_uint8(p);
if (wasm_is_reftype_htref_nullable(global_type)
|| wasm_is_reftype_htref_non_nullable(global_type)) {
if (wasm_is_reftype_htref_nullable(global_type)) {
int32 heap_type;
read_leb_int32(p, p_end, heap_type);
(void)heap_type;
@ -3713,7 +3712,7 @@ load_function_section(const uint8 *buf, const uint8 *buf_end,
* we shall make a copy of code body [p_code, p_code + code_size]
* when we are worrying about inappropriate releasing behaviour.
* all code bodies are actually in a buffer which user allocates in
* their embedding environment and we don't have power over them.
* his embedding environment and we don't have power on them.
* it will be like:
* code_body_cp = malloc(code_size);
* memcpy(code_body_cp, p_code, code_size);

View File

@ -1226,7 +1226,7 @@ load_function_section(const uint8 *buf, const uint8 *buf_end,
* we shall make a copy of code body [p_code, p_code + code_size]
* when we are worrying about inappropriate releasing behaviour.
* all code bodies are actually in a buffer which user allocates in
* their embedding environment and we don't have power over them.
* his embedding environment and we don't have power on them.
* it will be like:
* code_body_cp = malloc(code_size);
* memcpy(code_body_cp, p_code, code_size);

View File

@ -2668,7 +2668,7 @@ wasm_instantiate(WASMModule *module, WASMModuleInstance *parent,
}
STORE_PTR((void **)global_data, func_obj);
global_data += sizeof(void *);
/* Also update the initial_value since other globals may
/* Also update the inital_value since other globals may
* refer to this */
global->initial_value.gc_obj = (wasm_obj_t)func_obj;
break;
@ -4195,9 +4195,9 @@ wasm_get_module_inst_mem_consumption(const WASMModuleInstance *module_inst,
#endif /* end of (WASM_ENABLE_MEMORY_PROFILING != 0) \
|| (WASM_ENABLE_MEMORY_TRACING != 0) */
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
uint32 length, uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -4242,7 +4242,7 @@ wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
return count >= skip_n ? count - skip_n : 0;
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
#if WASM_ENABLE_DUMP_CALL_STACK != 0
bool

View File

@ -731,12 +731,12 @@ wasm_get_table_inst(const WASMModuleInstance *module_inst, uint32 tbl_idx)
#if WASM_ENABLE_DUMP_CALL_STACK != 0
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
uint32 length, uint32 skip_n, char *error_buf,
uint32_t error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
bool
wasm_interp_create_call_stack(struct WASMExecEnv *exec_env);

View File

@ -743,7 +743,7 @@ wasm_debug_instance_get_obj_mem(WASMDebugInstance *instance, uint64 offset,
module_inst = (WASMModuleInstance *)exec_env->module_inst;
if (offset + *size > module_inst->module->load_size) {
LOG_VERBOSE("wasm_debug_instance_get_data_mem size overflow!\n");
LOG_VERBOSE("wasm_debug_instance_get_data_mem size over flow!\n");
*size = module_inst->module->load_size >= offset
? module_inst->module->load_size - offset
: 0;
@ -797,7 +797,7 @@ wasm_debug_instance_get_linear_mem(WASMDebugInstance *instance, uint64 offset,
num_bytes_per_page = memory->num_bytes_per_page;
linear_mem_size = num_bytes_per_page * memory->cur_page_count;
if (offset + *size > linear_mem_size) {
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size overflow!\n");
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size over flow!\n");
*size = linear_mem_size >= offset ? linear_mem_size - offset : 0;
}
bh_memcpy_s(buf, (uint32)*size, memory->memory_data + offset,
@ -830,7 +830,7 @@ wasm_debug_instance_set_linear_mem(WASMDebugInstance *instance, uint64 offset,
num_bytes_per_page = memory->num_bytes_per_page;
linear_mem_size = num_bytes_per_page * memory->cur_page_count;
if (offset + *size > linear_mem_size) {
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size overflow!\n");
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size over flow!\n");
*size = linear_mem_size >= offset ? linear_mem_size - offset : 0;
}
bh_memcpy_s(memory->memory_data + offset, (uint32)*size, buf,

View File

@ -175,19 +175,6 @@ process_wasm_global(WASMGDBServer *server, char *args)
os_mutex_unlock(&tmpbuf_lock);
}
/* TODO: let server send an empty/error reply.
Original issue: 4265
Not tested yet, but it should work.
*/
static void
send_reply(WASMGDBServer *server, const char *err)
{
if (!err || !*err)
write_packet(server, "");
else
write_packet(server, err);
}
void
handle_general_query(WASMGDBServer *server, char *payload)
{
@ -227,7 +214,6 @@ handle_general_query(WASMGDBServer *server, char *payload)
if (!args) {
LOG_ERROR("payload parse error during handle_general_query");
send_reply(server, "");
return;
}
@ -398,7 +384,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
if (status == 0) {
os_mutex_lock(&tmpbuf_lock);
(void)snprintf(tmpbuf, MAX_PACKET_SIZE, "W%02" PRIx32, status);
send_reply(server, tmpbuf);
write_packet(server, tmpbuf);
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -417,7 +403,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
"T%02" PRIx32 "thread:%" PRIx64 ";name:%s;", gdb_status,
(uint64)(uintptr_t)tid, "nobody");
if (len < 0 || len >= MAX_PACKET_SIZE) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -425,7 +410,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
if (tids_count > 0) {
int n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "threads:");
if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -442,7 +426,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
}
if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -469,7 +452,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
"thread-pcs:%" PRIx64 ";00:%s;reason:%s;description:", pc,
pc_string, "exception");
if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -480,7 +462,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "%02x",
exception[i]);
if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -611,7 +592,7 @@ handle_get_register(WASMGDBServer *server, char *payload)
int32 i = strtol(payload, NULL, 16);
if (i != 0) {
send_reply(server, "E01");
write_packet(server, "E01");
return;
}
regdata = wasm_debug_instance_get_pc(
@ -767,7 +748,7 @@ handle_add_break(WASMGDBServer *server, char *payload)
if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length))
!= 3) {
LOG_ERROR("Unsupported number of add break arguments %d", arg_c);
send_reply(server, "");
write_packet(server, "");
return;
}
@ -802,7 +783,7 @@ handle_remove_break(WASMGDBServer *server, char *payload)
if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length))
!= 3) {
LOG_ERROR("Unsupported number of remove break arguments %d", arg_c);
send_reply(server, "");
write_packet(server, "");
return;
}
@ -854,7 +835,6 @@ handle_malloc(WASMGDBServer *server, char *payload)
}
else {
LOG_ERROR("Payload parse error during handle malloc");
send_reply(server, "");
return;
}

View File

@ -4,9 +4,4 @@
*/
#define WASM_ENABLE_WASI_EPHEMERAL_NN 1
#define WASI_NN_NAME(name) wasi_ephemeral_nn_##name
#include "wasi_nn.h"
#undef WASM_ENABLE_WASI_EPHEMERAL_NN
#undef WASI_NN_NAME

View File

@ -21,7 +21,6 @@
#else
#define WASI_NN_IMPORT(name) \
__attribute__((import_module("wasi_nn"), import_name(name)))
#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.)
#endif
/**
@ -35,22 +34,17 @@
* @return wasi_nn_error Execution status.
*/
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
WASI_NN_ERROR_TYPE
WASI_NN_NAME(load)
(WASI_NN_NAME(graph_builder) * builder, uint32_t builder_len,
WASI_NN_NAME(graph_encoding) encoding, WASI_NN_NAME(execution_target) target,
WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load");
wasi_nn_error
load(graph_builder *builder, uint32_t builder_len, graph_encoding encoding,
execution_target target, graph *g) WASI_NN_IMPORT("load");
#else
WASI_NN_ERROR_TYPE
WASI_NN_NAME(load)
(WASI_NN_NAME(graph_builder_array) * builder,
WASI_NN_NAME(graph_encoding) encoding, WASI_NN_NAME(execution_target) target,
WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load");
wasi_nn_error
load(graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g) WASI_NN_IMPORT("load");
#endif
WASI_NN_ERROR_TYPE
WASI_NN_NAME(load_by_name)
(const char *name, uint32_t name_len, WASI_NN_NAME(graph) * g)
wasi_nn_error
load_by_name(const char *name, uint32_t name_len, graph *g)
WASI_NN_IMPORT("load_by_name");
/**
@ -65,9 +59,8 @@ WASI_NN_NAME(load_by_name)
* @param ctx Execution context.
* @return wasi_nn_error Execution status.
*/
WASI_NN_ERROR_TYPE
WASI_NN_NAME(init_execution_context)
(WASI_NN_NAME(graph) g, WASI_NN_NAME(graph_execution_context) * ctx)
wasi_nn_error
init_execution_context(graph g, graph_execution_context *ctx)
WASI_NN_IMPORT("init_execution_context");
/**
@ -78,10 +71,9 @@ WASI_NN_NAME(init_execution_context)
* @param tensor Input tensor.
* @return wasi_nn_error Execution status.
*/
WASI_NN_ERROR_TYPE
WASI_NN_NAME(set_input)
(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index,
WASI_NN_NAME(tensor) * tensor) WASI_NN_IMPORT("set_input");
wasi_nn_error
set_input(graph_execution_context ctx, uint32_t index, tensor *tensor)
WASI_NN_IMPORT("set_input");
/**
* @brief Compute the inference on the given inputs.
@ -89,9 +81,8 @@ WASI_NN_NAME(set_input)
* @param ctx Execution context.
* @return wasi_nn_error Execution status.
*/
WASI_NN_ERROR_TYPE
WASI_NN_NAME(compute)
(WASI_NN_NAME(graph_execution_context) ctx) WASI_NN_IMPORT("compute");
wasi_nn_error
compute(graph_execution_context ctx) WASI_NN_IMPORT("compute");
/**
* @brief Extract the outputs after inference.
@ -106,16 +97,15 @@ WASI_NN_NAME(compute)
* @return wasi_nn_error Execution status.
*/
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
WASI_NN_ERROR_TYPE
WASI_NN_NAME(get_output)
(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index,
uint8_t *output_tensor, uint32_t output_tensor_max_size,
wasi_nn_error
get_output(graph_execution_context ctx, uint32_t index,
tensor_data output_tensor, uint32_t output_tensor_max_size,
uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output");
#else
WASI_NN_ERROR_TYPE
WASI_NN_NAME(get_output)
(graph_execution_context ctx, uint32_t index, uint8_t *output_tensor,
uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output");
wasi_nn_error
get_output(graph_execution_context ctx, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size)
WASI_NN_IMPORT("get_output");
#endif
#endif

View File

@ -13,23 +13,6 @@
extern "C" {
#endif
/* our host logic doesn't use any prefix. neither legacy wasi_nn.h does. */
#if !defined(__wasm__) || !defined(WASI_NN_NAME)
#define WASI_NN_NAME(name) name
#define WASI_NN_ERROR_NAME(name) name
#define WASI_NN_TYPE_NAME(name) name
#define WASI_NN_ENCODING_NAME(name) name
#define WASI_NN_TARGET_NAME(name) name
#define WASI_NN_ERROR_TYPE wasi_nn_error
#else
#define WASI_NN_ERROR_NAME(name) WASI_NN_NAME(error_##name)
#define WASI_NN_TYPE_NAME(name) WASI_NN_NAME(type_##name)
#define WASI_NN_ENCODING_NAME(name) WASI_NN_NAME(encoding_##name)
#define WASI_NN_TARGET_NAME(name) WASI_NN_NAME(target_##name)
#define WASI_NN_ERROR_TYPE WASI_NN_NAME(error);
#endif
/**
* ERRORS
*
@ -39,22 +22,22 @@ extern "C" {
// https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L5-L17
// Error codes returned by functions in this API.
typedef enum {
WASI_NN_ERROR_NAME(success) = 0,
WASI_NN_ERROR_NAME(invalid_argument),
WASI_NN_ERROR_NAME(invalid_encoding),
WASI_NN_ERROR_NAME(missing_memory),
WASI_NN_ERROR_NAME(busy),
WASI_NN_ERROR_NAME(runtime_error),
WASI_NN_ERROR_NAME(unsupported_operation),
WASI_NN_ERROR_NAME(too_large),
WASI_NN_ERROR_NAME(not_found),
success = 0,
invalid_argument,
invalid_encoding,
missing_memory,
busy,
runtime_error,
unsupported_operation,
too_large,
not_found,
// for WasmEdge-wasi-nn
WASI_NN_ERROR_NAME(end_of_sequence) = 100, // End of Sequence Found.
WASI_NN_ERROR_NAME(context_full) = 101, // Context Full.
WASI_NN_ERROR_NAME(prompt_tool_long) = 102, // Prompt Too Long.
WASI_NN_ERROR_NAME(model_not_found) = 103, // Model Not Found.
} WASI_NN_ERROR_TYPE;
end_of_sequence = 100, // End of Sequence Found.
context_full = 101, // Context Full.
prompt_tool_long = 102, // Prompt Too Long.
model_not_found = 103, // Model Not Found.
} wasi_nn_error;
/**
* TENSOR
@ -68,27 +51,15 @@ typedef enum {
typedef struct {
uint32_t *buf;
uint32_t size;
} WASI_NN_NAME(tensor_dimensions);
} tensor_dimensions;
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
// sync up with
// https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L19-L28
// The type of the elements in a tensor.
typedef enum {
WASI_NN_TYPE_NAME(fp16) = 0,
WASI_NN_TYPE_NAME(fp32),
WASI_NN_TYPE_NAME(fp64),
WASI_NN_TYPE_NAME(u8),
WASI_NN_TYPE_NAME(i32),
WASI_NN_TYPE_NAME(i64),
} WASI_NN_NAME(tensor_type);
typedef enum { fp16 = 0, fp32, fp64, u8, i32, i64 } tensor_type;
#else
typedef enum {
WASI_NN_TYPE_NAME(fp16) = 0,
WASI_NN_TYPE_NAME(fp32),
WASI_NN_TYPE_NAME(up8),
WASI_NN_TYPE_NAME(ip32),
} WASI_NN_NAME(tensor_type);
typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type;
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
// The tensor data.
@ -99,14 +70,7 @@ typedef enum {
// 4-byte f32 elements would have a data array of length 16). Naturally, this
// representation requires some knowledge of how to lay out data in
// memory--e.g., using row-major ordering--and could perhaps be improved.
#if !defined(__wasm__) || WASM_ENABLE_WASI_EPHEMERAL_NN != 0
typedef struct {
uint8_t *buf;
uint32_t size;
} WASI_NN_NAME(tensor_data);
#else
typedef uint8_t *WASI_NN_NAME(tensor_data);
#endif
typedef uint8_t *tensor_data;
// A tensor.
typedef struct {
@ -114,16 +78,16 @@ typedef struct {
// represent a tensor containing a single value, use `[1]` for the tensor
// dimensions.
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 && defined(__wasm__)
WASI_NN_NAME(tensor_dimensions) dimensions;
tensor_dimensions dimensions;
#else
WASI_NN_NAME(tensor_dimensions) * dimensions;
tensor_dimensions *dimensions;
#endif
// Describe the type of element in the tensor (e.g., f32).
uint8_t type;
uint8_t _pad[3];
// Contains the tensor data.
WASI_NN_NAME(tensor_data) data;
} WASI_NN_NAME(tensor);
tensor_data data;
} tensor;
/**
* GRAPH
@ -138,15 +102,15 @@ typedef struct {
typedef struct {
uint8_t *buf;
uint32_t size;
} WASI_NN_NAME(graph_builder);
} graph_builder;
typedef struct {
WASI_NN_NAME(graph_builder) * buf;
graph_builder *buf;
uint32_t size;
} WASI_NN_NAME(graph_builder_array);
} graph_builder_array;
// An execution graph for performing inference (i.e., a model).
typedef uint32_t WASI_NN_NAME(graph);
typedef uint32_t graph;
// sync up with
// https://github.com/WebAssembly/wasi-nn/blob/main/wit/wasi-nn.wit#L75
@ -154,25 +118,21 @@ typedef uint32_t WASI_NN_NAME(graph);
// various backends that encode (i.e., serialize) their graph IR with different
// formats.
typedef enum {
WASI_NN_ENCODING_NAME(openvino) = 0,
WASI_NN_ENCODING_NAME(onnx),
WASI_NN_ENCODING_NAME(tensorflow),
WASI_NN_ENCODING_NAME(pytorch),
WASI_NN_ENCODING_NAME(tensorflowlite),
WASI_NN_ENCODING_NAME(ggml),
WASI_NN_ENCODING_NAME(autodetect),
WASI_NN_ENCODING_NAME(unknown_backend),
} WASI_NN_NAME(graph_encoding);
openvino = 0,
onnx,
tensorflow,
pytorch,
tensorflowlite,
ggml,
autodetect,
unknown_backend,
} graph_encoding;
// Define where the graph should be executed.
typedef enum WASI_NN_NAME(execution_target) {
WASI_NN_TARGET_NAME(cpu) = 0,
WASI_NN_TARGET_NAME(gpu),
WASI_NN_TARGET_NAME(tpu),
} WASI_NN_NAME(execution_target);
typedef enum execution_target { cpu = 0, gpu, tpu } execution_target;
// Bind a `graph` to the input and output tensors for an inference.
typedef uint32_t WASI_NN_NAME(graph_execution_context);
typedef uint32_t graph_execution_context;
#ifdef __cplusplus
}

View File

@ -99,8 +99,7 @@ graph_builder_array_app_native(wasm_module_inst_t instance,
static wasi_nn_error
tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements,
tensor_wasm *input_tensor_wasm, void **data,
uint32_t *size)
tensor_wasm *input_tensor_wasm, tensor_data *data)
{
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
#define data_size input_tensor_wasm->data_size
@ -114,9 +113,8 @@ tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements,
NN_ERR_PRINTF("input_tensor_wasm->data_offset is invalid");
return invalid_argument;
}
*data = wasm_runtime_addr_app_to_native(
*data = (tensor_data)wasm_runtime_addr_app_to_native(
instance, (uint64)input_tensor_wasm->data_offset);
*size = data_size;
return success;
#undef data_size
}
@ -190,19 +188,16 @@ tensor_app_native(wasm_module_inst_t instance, tensor_wasm *input_tensor_wasm,
NN_DBG_PRINTF("Tensor type: %d", input_tensor_wasm->type);
NN_DBG_PRINTF("Total number of elements: %d", total_elements);
void *data = NULL;
uint32_t datasize;
tensor_data data = NULL;
if (success
!= (res =
tensor_data_app_native(instance, total_elements,
input_tensor_wasm, &data, &datasize))) {
!= (res = tensor_data_app_native(instance, total_elements,
input_tensor_wasm, &data))) {
wasm_runtime_free(dimensions);
return res;
}
input_tensor->type = input_tensor_wasm->type;
input_tensor->dimensions = dimensions;
input_tensor->data.buf = data;
input_tensor->data.size = datasize;
input_tensor->data = data;
return success;
}

View File

@ -20,10 +20,6 @@
#include "wasi_nn_types.h"
#include "wasm_export.h"
#if WASM_ENABLE_WASI_EPHEMERAL_NN == 0
#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.)
#endif
#define HASHMAP_INITIAL_SIZE 20
#if defined(__APPLE__)
#define LIB_EXTENTION ".dylib"
@ -55,36 +51,65 @@ struct backends_api_functions {
NN_ERR_PRINTF("Error %s() -> %d", #func, wasi_error); \
} while (0)
static void *wasi_nn_key;
/* HashMap utils */
static HashMap *hashmap;
static uint32
hash_func(const void *key)
{
// fnv1a_hash
const uint32 FNV_PRIME = 16777619;
const uint32 FNV_OFFSET_BASIS = 2166136261U;
uint32 hash = FNV_OFFSET_BASIS;
const unsigned char *bytes = (const unsigned char *)key;
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
hash ^= bytes[i];
hash *= FNV_PRIME;
}
return hash;
}
static bool
key_equal_func(void *key1, void *key2)
{
return key1 == key2;
}
static void
key_destroy_func(void *key1)
{
/* key type is wasm_module_inst_t*. do nothing */
}
static void
wasi_nn_ctx_destroy(WASINNContext *wasi_nn_ctx)
{
NN_DBG_PRINTF("[WASI NN] DEINIT...");
if (wasi_nn_ctx == NULL) {
NN_ERR_PRINTF(
"Error when deallocating memory. WASI-NN context is NULL");
return;
}
NN_DBG_PRINTF("[WASI NN] DEINIT...");
NN_DBG_PRINTF("Freeing wasi-nn");
NN_DBG_PRINTF("-> is_model_loaded: %d", wasi_nn_ctx->is_model_loaded);
NN_DBG_PRINTF("-> current_encoding: %d", wasi_nn_ctx->backend);
bh_assert(!wasi_nn_ctx->busy);
/* deinit() the backend */
if (wasi_nn_ctx->is_backend_ctx_initialized) {
wasi_nn_error res;
call_wasi_nn_func(wasi_nn_ctx->backend, deinit, res,
wasi_nn_ctx->backend_ctx);
}
os_mutex_destroy(&wasi_nn_ctx->lock);
wasm_runtime_free(wasi_nn_ctx);
}
static void
dtor(wasm_module_inst_t inst, void *ctx)
value_destroy_func(void *value)
{
wasi_nn_ctx_destroy(ctx);
wasi_nn_ctx_destroy((WASINNContext *)value);
}
bool
@ -97,9 +122,12 @@ wasi_nn_initialize()
return false;
}
wasi_nn_key = wasm_runtime_create_context_key(dtor);
if (wasi_nn_key == NULL) {
NN_ERR_PRINTF("Failed to create context key");
// hashmap { instance: wasi_nn_ctx }
hashmap = bh_hash_map_create(HASHMAP_INITIAL_SIZE, true, hash_func,
key_equal_func, key_destroy_func,
value_destroy_func);
if (hashmap == NULL) {
NN_ERR_PRINTF("Error while initializing hashmap");
os_mutex_destroy(&wasi_nn_lock);
return false;
}
@ -120,11 +148,6 @@ wasi_nn_initialize_context()
}
memset(wasi_nn_ctx, 0, sizeof(WASINNContext));
if (os_mutex_init(&wasi_nn_ctx->lock)) {
NN_ERR_PRINTF("Error when initializing a lock for WASI-NN context");
wasm_runtime_free(wasi_nn_ctx);
return NULL;
}
return wasi_nn_ctx;
}
@ -133,59 +156,29 @@ static WASINNContext *
wasm_runtime_get_wasi_nn_ctx(wasm_module_inst_t instance)
{
WASINNContext *wasi_nn_ctx =
wasm_runtime_get_context(instance, wasi_nn_key);
(WASINNContext *)bh_hash_map_find(hashmap, (void *)instance);
if (wasi_nn_ctx == NULL) {
WASINNContext *newctx = wasi_nn_initialize_context();
if (newctx == NULL)
wasi_nn_ctx = wasi_nn_initialize_context();
if (wasi_nn_ctx == NULL)
return NULL;
os_mutex_lock(&wasi_nn_lock);
wasi_nn_ctx = wasm_runtime_get_context(instance, wasi_nn_key);
if (wasi_nn_ctx == NULL) {
wasm_runtime_set_context_spread(instance, wasi_nn_key, newctx);
wasi_nn_ctx = newctx;
newctx = NULL;
}
os_mutex_unlock(&wasi_nn_lock);
if (newctx != NULL) {
wasi_nn_ctx_destroy(newctx);
}
}
return wasi_nn_ctx;
}
static WASINNContext *
lock_ctx(wasm_module_inst_t instance)
{
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
if (wasi_nn_ctx == NULL) {
bool ok =
bh_hash_map_insert(hashmap, (void *)instance, (void *)wasi_nn_ctx);
if (!ok) {
NN_ERR_PRINTF("Error while storing context");
wasi_nn_ctx_destroy(wasi_nn_ctx);
return NULL;
}
os_mutex_lock(&wasi_nn_ctx->lock);
if (wasi_nn_ctx->busy) {
os_mutex_unlock(&wasi_nn_ctx->lock);
return NULL;
}
wasi_nn_ctx->busy = true;
os_mutex_unlock(&wasi_nn_ctx->lock);
return wasi_nn_ctx;
}
static void
unlock_ctx(WASINNContext *wasi_nn_ctx)
{
if (wasi_nn_ctx == NULL) {
return;
}
os_mutex_lock(&wasi_nn_ctx->lock);
bh_assert(wasi_nn_ctx->busy);
wasi_nn_ctx->busy = false;
os_mutex_unlock(&wasi_nn_ctx->lock);
return wasi_nn_ctx;
}
void
wasi_nn_destroy()
{
wasm_runtime_destroy_context_key(wasi_nn_key);
// destroy hashmap will destroy keys and values
bh_hash_map_destroy(hashmap);
// close backends' libraries and registered functions
for (unsigned i = 0; i < sizeof(lookup) / sizeof(lookup[0]); i++) {
@ -404,41 +397,6 @@ detect_and_load_backend(graph_encoding backend_hint,
return ret;
}
static wasi_nn_error
ensure_backend(wasm_module_inst_t instance, graph_encoding encoding,
WASINNContext *wasi_nn_ctx)
{
wasi_nn_error res;
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(encoding, &loaded_backend)) {
res = invalid_encoding;
NN_ERR_PRINTF("load backend failed");
goto fail;
}
if (wasi_nn_ctx->is_backend_ctx_initialized) {
if (wasi_nn_ctx->backend != loaded_backend) {
res = unsupported_operation;
goto fail;
}
}
else {
wasi_nn_ctx->backend = loaded_backend;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
wasi_nn_ctx->is_backend_ctx_initialized = true;
}
return success;
fail:
return res;
}
/* WASI-NN implementation */
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
@ -452,8 +410,6 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
graph_encoding encoding, execution_target target, graph *g)
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
{
wasi_nn_error res;
NN_DBG_PRINTF("[WASI NN] LOAD [encoding=%d, target=%d]...", encoding,
target);
@ -461,23 +417,18 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
if (!instance)
return runtime_error;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
wasi_nn_error res;
graph_builder_array builder_native = { 0 };
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (success
!= (res = graph_builder_array_app_native(
instance, builder, builder_wasm_size, &builder_native)))
goto fail;
return res;
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
if (success
!= (res = graph_builder_array_app_native(instance, builder,
&builder_native)))
goto fail;
return res;
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
if (!wasm_runtime_validate_native_addr(instance, g,
@ -487,7 +438,19 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
goto fail;
}
res = ensure_backend(instance, encoding, wasi_nn_ctx);
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(encoding, &loaded_backend)) {
res = invalid_encoding;
NN_ERR_PRINTF("load backend failed");
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
@ -502,7 +465,6 @@ fail:
// XXX: Free intermediate structure pointers
if (builder_native.buf)
wasm_runtime_free(builder_native.buf);
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -511,8 +473,6 @@ wasi_nn_error
wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len,
graph *g)
{
wasi_nn_error res;
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
if (!instance) {
return runtime_error;
@ -536,26 +496,29 @@ wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len,
NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME %s...", name);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(autodetect, &loaded_backend)) {
NN_ERR_PRINTF("load backend failed");
return invalid_encoding;
}
res = ensure_backend(instance, autodetect, wasi_nn_ctx);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
wasi_nn_error res;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name, res,
wasi_nn_ctx->backend_ctx, name, name_len, g);
if (res != success)
goto fail;
return res;
wasi_nn_ctx->is_model_loaded = true;
res = success;
fail:
unlock_ctx(wasi_nn_ctx);
return res;
return success;
}
wasi_nn_error
@ -563,8 +526,6 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name,
int32_t name_len, char *config,
int32_t config_len, graph *g)
{
wasi_nn_error res;
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
if (!instance) {
return runtime_error;
@ -593,28 +554,30 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name,
NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME_WITH_CONFIG %s %s...", name, config);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(autodetect, &loaded_backend)) {
NN_ERR_PRINTF("load backend failed");
return invalid_encoding;
}
res = ensure_backend(instance, autodetect, wasi_nn_ctx);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
wasi_nn_error res;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name_with_config, res,
wasi_nn_ctx->backend_ctx, name, name_len, config,
config_len, g);
if (res != success)
goto fail;
return res;
wasi_nn_ctx->is_model_loaded = true;
res = success;
fail:
unlock_ctx(wasi_nn_ctx);
return res;
return success;
}
wasi_nn_error
@ -628,27 +591,20 @@ wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph g,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
if (!wasm_runtime_validate_native_addr(
instance, ctx, (uint64)sizeof(graph_execution_context))) {
NN_ERR_PRINTF("ctx is invalid");
res = invalid_argument;
goto fail;
return invalid_argument;
}
call_wasi_nn_func(wasi_nn_ctx->backend, init_execution_context, res,
wasi_nn_ctx->backend_ctx, g, ctx);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -663,21 +619,17 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
tensor input_tensor_native = { 0 };
if (success
!= (res = tensor_app_native(instance, input_tensor,
&input_tensor_native)))
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, set_input, res,
wasi_nn_ctx->backend_ctx, ctx, index,
@ -685,8 +637,7 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
// XXX: Free intermediate structure pointers
if (input_tensor_native.dimensions)
wasm_runtime_free(input_tensor_native.dimensions);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -700,32 +651,26 @@ wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx)
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, compute, res,
wasi_nn_ctx->backend_ctx, ctx);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
wasi_nn_error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, void *output_tensor,
uint32_t index, tensor_data output_tensor,
uint32_t output_tensor_len, uint32_t *output_tensor_size)
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
wasi_nn_error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, void *output_tensor,
uint32_t index, tensor_data output_tensor,
uint32_t *output_tensor_size)
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
{
@ -736,36 +681,28 @@ wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
if (!wasm_runtime_validate_native_addr(instance, output_tensor_size,
(uint64)sizeof(uint32_t))) {
NN_ERR_PRINTF("output_tensor_size is invalid");
res = invalid_argument;
goto fail;
return invalid_argument;
}
tensor_data tensor = {
.buf = output_tensor,
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
.size = output_tensor_len,
#else
.size = *output_tensor_size,
#endif
};
call_wasi_nn_func(wasi_nn_ctx->backend, get_output, res,
wasi_nn_ctx->backend_ctx, ctx, index, &tensor,
wasi_nn_ctx->backend_ctx, ctx, index, output_tensor,
&output_tensor_len);
*output_tensor_size = output_tensor_len;
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
call_wasi_nn_func(wasi_nn_ctx->backend, get_output, res,
wasi_nn_ctx->backend_ctx, ctx, index, output_tensor,
output_tensor_size);
fail:
unlock_ctx(wasi_nn_ctx);
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
return res;
}

View File

@ -2,9 +2,6 @@
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdlib.h>
#include "wasi_nn_types.h"
#include "utils/logger.h"
#include "llama.h"
@ -289,7 +286,7 @@ deinit_backend(void *ctx)
llama_backend_free();
free(backend_ctx);
os_free(backend_ctx);
return success;
}
@ -385,7 +382,7 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
{
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
// tensor->data is the prompt string. ends with \0
char *prompt_text = (char *)wasi_nn_tensor->data.buf;
char *prompt_text = (char *)wasi_nn_tensor->data;
#ifndef NDEBUG
NN_DBG_PRINTF("--------------------------------------------------");
@ -552,7 +549,7 @@ fail:
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
@ -571,7 +568,7 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
printf("%s\n", output_metadata);
}
memcpy(output_tensor->buf, output_metadata, strlen(output_metadata));
memcpy(output_tensor, output_metadata, strlen(output_metadata));
*output_tensor_size = strlen(output_metadata);
return success;
}
@ -591,7 +588,7 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
printf("%s", buf);
}
memcpy(output_tensor->buf + end_pos, buf, strlen(buf));
memcpy(output_tensor + end_pos, buf, strlen(buf));
end_pos += strlen(buf);
}

View File

@ -26,25 +26,17 @@
* from 4. to 6. is the Inference Loop
*/
/* these limits are arbitrary. */
#define MAX_GRAPHS 4
#define MAX_EXECUTION_CONTEXTS 4
typedef struct {
ov_core_t *core;
/* keep input model files */
struct OpenVINOGraph {
void *weight_data;
ov_tensor_t *weights_tensor;
ov_model_t *model;
/* add prepostprocess */
ov_model_t *new_model;
ov_compiled_model_t *compiled_model;
} graphs[MAX_GRAPHS];
struct OpenVINOExecutionContext {
struct OpenVINOGraph *graph;
ov_infer_request_t *infer_request;
} execution_contexts[MAX_EXECUTION_CONTEXTS];
unsigned int n_graphs;
unsigned int n_execution_contexts;
ov_tensor_t *input_tensor;
} OpenVINOContext;
/*
@ -189,29 +181,6 @@ wasi_nn_tensor_type_to_openvino_element_type(tensor_type wasi_nn_type)
return UNDEFINED;
}
static void
free_graph(struct OpenVINOGraph *graph)
{
if (graph->weight_data)
os_free(graph->weight_data);
if (graph->weights_tensor)
ov_tensor_free(graph->weights_tensor);
if (graph->model)
ov_model_free(graph->model);
if (graph->compiled_model)
ov_compiled_model_free(graph->compiled_model);
}
static void
free_execution_context(struct OpenVINOExecutionContext *c)
{
if (c->infer_request)
ov_infer_request_free(c->infer_request);
}
static wasi_nn_error
uint32_array_to_int64_array(uint32_t array_size, uint32_t *src, int64_t **dst)
{
@ -231,8 +200,6 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
unsigned int graph_idx;
wasi_nn_error ret = unsupported_operation;
if (encoding != openvino) {
@ -258,47 +225,39 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
graph_builder xml = builder->buf[0];
graph_builder weight = builder->buf[1];
graph_idx = ov_ctx->n_graphs;
if (graph_idx >= MAX_GRAPHS) {
return runtime_error;
/* if xml is a String with a model in IR */
if (!(xml.buf[xml.size] == '\0' && xml.buf[xml.size - 1] != '\0')) {
NN_ERR_PRINTF("Invalid xml string.");
return invalid_argument;
}
graph = &ov_ctx->graphs[graph_idx];
memset(graph, 0, sizeof(*graph));
/* transfer weight to an ov tensor */
{
graph->weight_data = os_malloc(weight.size);
if (!graph->weight_data)
ov_ctx->weight_data = os_malloc(weight.size);
if (!ov_ctx->weight_data)
goto fail;
memcpy(graph->weight_data, weight.buf, weight.size);
memcpy(ov_ctx->weight_data, weight.buf, weight.size);
ov_element_type_e type = U8;
int64_t dims[1] = { weight.size };
ov_shape_t shape = { 1, dims };
CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(type, shape,
graph->weight_data,
&graph->weights_tensor),
ov_ctx->weight_data,
&ov_ctx->weights_tensor),
ret);
}
/* load model from buffer */
CHECK_OV_STATUS(ov_core_read_model_from_memory_buffer(
ov_ctx->core, (char *)xml.buf, xml.size,
graph->weights_tensor, &graph->model),
ov_ctx->weights_tensor, &ov_ctx->model),
ret);
#ifndef NDEBUG
print_model_input_output_info(ov_ctx->model);
#endif
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0,
&graph->compiled_model),
ret);
*g = graph_idx;
ov_ctx->n_graphs++;
return success;
ret = success;
fail:
free_graph(graph);
return ret;
}
@ -306,62 +265,20 @@ __attribute__((visibility("default"))) wasi_nn_error
load_by_name(void *ctx, const char *filename, uint32_t filename_len, graph *g)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
unsigned int graph_idx;
wasi_nn_error ret = unsupported_operation;
graph_idx = ov_ctx->n_graphs;
if (graph_idx >= MAX_GRAPHS) {
return runtime_error;
}
graph = &ov_ctx->graphs[graph_idx];
memset(graph, 0, sizeof(*graph));
CHECK_OV_STATUS(
ov_core_read_model(ov_ctx->core, filename, NULL, &graph->model), ret);
ov_core_read_model(ov_ctx->core, filename, NULL, &ov_ctx->model), ret);
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0,
&graph->compiled_model),
ret);
*g = graph_idx;
ov_ctx->n_graphs++;
return success;
ret = success;
fail:
free_graph(graph);
return ret;
}
__attribute__((visibility("default"))) wasi_nn_error
init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
struct OpenVINOExecutionContext *exec;
unsigned int exec_idx;
wasi_nn_error ret;
if (g >= ov_ctx->n_graphs)
return runtime_error;
graph = &ov_ctx->graphs[g];
exec_idx = ov_ctx->n_execution_contexts;
if (exec_idx >= MAX_EXECUTION_CONTEXTS)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_idx];
memset(exec, 0, sizeof(*exec));
exec->graph = graph;
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
graph->compiled_model, &exec->infer_request),
ret);
*exec_ctx = exec_idx;
ov_ctx->n_execution_contexts++;
return success;
fail:
return ret;
}
__attribute__((visibility("default"))) wasi_nn_error
@ -369,15 +286,19 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor *wasi_nn_tensor)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
ov_shape_t input_shape = { 0 };
ov_tensor_t *input_tensor = NULL;
int64_t *ov_dims = NULL;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
ov_preprocess_prepostprocessor_t *ppp = NULL;
ov_preprocess_input_info_t *input_info = NULL;
ov_preprocess_input_tensor_info_t *input_tensor_info = NULL;
ov_layout_t *input_layout = NULL;
ov_preprocess_preprocess_steps_t *input_process = NULL;
ov_preprocess_input_model_info_t *p_input_model = NULL;
ov_layout_t *model_layout = NULL;
ov_preprocess_output_info_t *output_info = NULL;
ov_preprocess_output_tensor_info_t *output_tensor_info = NULL;
/* wasi_nn_tensor -> ov_tensor */
{
@ -402,22 +323,96 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
shape_info);
CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(input_type, input_shape,
wasi_nn_tensor->data.buf,
&input_tensor),
wasi_nn_tensor->data,
&ov_ctx->input_tensor),
ret);
}
/* set preprocess based on wasi_nn_tensor */
{
CHECK_OV_STATUS(
ov_preprocess_prepostprocessor_create(ov_ctx->model, &ppp), ret);
/* reuse user' created tensor's info */
CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_input_info_by_index(
ppp, index, &input_info),
ret);
CHECK_OV_STATUS(ov_preprocess_input_info_get_tensor_info(
input_info, &input_tensor_info),
ret);
CHECK_OV_STATUS(ov_preprocess_input_tensor_info_set_from(
input_tensor_info, ov_ctx->input_tensor),
ret);
/* add RESIZE */
CHECK_OV_STATUS(ov_preprocess_input_info_get_preprocess_steps(
input_info, &input_process),
ret);
CHECK_OV_STATUS(
ov_preprocess_preprocess_steps_resize(input_process, RESIZE_LINEAR),
ret);
/* input model */
CHECK_OV_STATUS(
ov_preprocess_input_info_get_model_info(input_info, &p_input_model),
ret);
// TODO: what if not?
CHECK_OV_STATUS(ov_layout_create("NCHW", &model_layout), ret);
CHECK_OV_STATUS(ov_preprocess_input_model_info_set_layout(p_input_model,
model_layout),
ret);
/* output -> F32(possibility) */
CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_output_info_by_index(
ppp, index, &output_info),
ret);
CHECK_OV_STATUS(ov_preprocess_output_info_get_tensor_info(
output_info, &output_tensor_info),
ret);
CHECK_OV_STATUS(
ov_preprocess_output_set_element_type(output_tensor_info, F32),
ret);
CHECK_OV_STATUS(
ov_preprocess_prepostprocessor_build(ppp, &ov_ctx->new_model), ret);
}
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, ov_ctx->new_model,
"CPU", 0, &ov_ctx->compiled_model),
ret);
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
ov_ctx->compiled_model, &ov_ctx->infer_request),
ret);
/* install ov_tensor -> infer_request */
CHECK_OV_STATUS(ov_infer_request_set_input_tensor_by_index(
exec->infer_request, index, input_tensor),
ov_ctx->infer_request, index, ov_ctx->input_tensor),
ret);
ret = success;
fail:
if (ov_dims)
os_free(ov_dims);
if (input_tensor)
ov_tensor_free(input_tensor);
ov_shape_free(&input_shape);
if (ppp)
ov_preprocess_prepostprocessor_free(ppp);
if (input_info)
ov_preprocess_input_info_free(input_info);
if (input_tensor_info)
ov_preprocess_input_tensor_info_free(input_tensor_info);
if (input_layout)
ov_layout_free(input_layout);
if (input_process)
ov_preprocess_preprocess_steps_free(input_process);
if (p_input_model)
ov_preprocess_input_model_info_free(p_input_model);
if (model_layout)
ov_layout_free(model_layout);
if (output_info)
ov_preprocess_output_info_free(output_info);
if (output_tensor_info)
ov_preprocess_output_tensor_info_free(output_tensor_info);
return ret;
}
@ -426,14 +421,9 @@ __attribute__((visibility("default"))) wasi_nn_error
compute(void *ctx, graph_execution_context exec_ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
CHECK_OV_STATUS(ov_infer_request_infer(exec->infer_request), ret);
CHECK_OV_STATUS(ov_infer_request_infer(ov_ctx->infer_request), ret);
ret = success;
fail:
return ret;
@ -441,33 +431,23 @@ fail:
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
ov_tensor_t *ov_tensor = NULL;
void *data = NULL;
size_t byte_size = 0;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
CHECK_OV_STATUS(ov_infer_request_get_output_tensor_by_index(
exec->infer_request, index, &ov_tensor),
ov_ctx->infer_request, index, &ov_tensor),
ret);
CHECK_OV_STATUS(ov_tensor_get_byte_size(ov_tensor, &byte_size), ret);
if (byte_size > output_tensor->size) {
ret = too_large;
goto fail;
}
CHECK_OV_STATUS(ov_tensor_data(ov_tensor, &data), ret);
memcpy(output_tensor->buf, data, byte_size);
memcpy(output_tensor, data, byte_size);
*output_tensor_size = (uint32_t)byte_size;
@ -521,16 +501,27 @@ __attribute__((visibility("default"))) wasi_nn_error
deinit_backend(void *ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
unsigned int i;
if (!ov_ctx)
return invalid_argument;
for (i = 0; i < ov_ctx->n_execution_contexts; i++)
free_execution_context(&ov_ctx->execution_contexts[i]);
if (ov_ctx->weight_data)
os_free(ov_ctx->weight_data);
for (i = 0; i < ov_ctx->n_graphs; i++)
free_graph(&ov_ctx->graphs[i]);
if (ov_ctx->weights_tensor)
ov_tensor_free(ov_ctx->weights_tensor);
if (ov_ctx->input_tensor)
ov_tensor_free(ov_ctx->input_tensor);
if (ov_ctx->infer_request)
ov_infer_request_free(ov_ctx->infer_request);
if (ov_ctx->compiled_model)
ov_compiled_model_free(ov_ctx->compiled_model);
if (ov_ctx->model)
ov_model_free(ov_ctx->model);
if (ov_ctx->core)
ov_core_free(ov_ctx->core);

View File

@ -24,7 +24,7 @@ compute(void *ctx, graph_execution_context exec_ctx);
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size);
tensor_data output_tensor, uint32_t *output_tensor_size);
__attribute__((visibility("default"))) wasi_nn_error
init_backend(void **ctx);

View File

@ -9,12 +9,7 @@
#include "wasi_nn_types.h"
#include "wasm_export.h"
#include "bh_platform.h"
typedef struct {
korp_mutex lock;
bool busy;
bool is_backend_ctx_initialized;
bool is_model_loaded;
graph_encoding backend;
void *backend_ctx;
@ -32,7 +27,7 @@ typedef wasi_nn_error (*SET_INPUT)(void *, graph_execution_context, uint32_t,
tensor *);
typedef wasi_nn_error (*COMPUTE)(void *, graph_execution_context);
typedef wasi_nn_error (*GET_OUTPUT)(void *, graph_execution_context, uint32_t,
tensor_data *, uint32_t *);
tensor_data, uint32_t *);
/* wasi-nn general APIs */
typedef wasi_nn_error (*BACKEND_INITIALIZE)(void **);
typedef wasi_nn_error (*BACKEND_DEINITIALIZE)(void *);

View File

@ -281,11 +281,6 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
{
TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx;
if (input_tensor->type != fp32) {
NN_ERR_PRINTF("unsupported input tensor type %u", input_tensor->type);
return runtime_error;
}
wasi_nn_error res;
if (success != (res = is_valid_graph_execution_context(tfl_ctx, ctx)))
return res;
@ -324,7 +319,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
index);
int size = model_tensor_size * sizeof(float);
bh_memcpy_s(it, size, input_tensor->data.buf, size);
bh_memcpy_s(it, size, input_tensor->data, size);
}
else { // TODO: Assuming uint8 quantized networks.
TfLiteAffineQuantization *quant_info =
@ -342,7 +337,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_DBG_PRINTF("input tensor: (scale, offset) = (%f, %f)", scale,
zero_point);
float *input_tensor_f = (float *)input_tensor->data.buf;
float *input_tensor_f = (float *)input_tensor->data;
for (uint32_t i = 0; i < model_tensor_size; ++i) {
it[i] = (uint8_t)(input_tensor_f[i] / scale + zero_point);
}
@ -366,7 +361,7 @@ compute(void *tflite_ctx, graph_execution_context ctx)
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx;
@ -389,34 +384,23 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
return too_large;
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
if (*output_tensor_size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
if (tensor->quantization.type == kTfLiteNoQuantization) {
NN_DBG_PRINTF("No quantization information");
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (output_tensor->size < tensor->bytes) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
if (output_tensor->size < tensor->bytes / sizeof(float)) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#endif
bh_memcpy_s(output_tensor->buf, output_tensor->size, tensor->data.data,
tensor->bytes);
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
*output_tensor_size = tensor->bytes;
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
*output_tensor_size = tensor->bytes / sizeof(float);
#endif
float *ot =
tfl_ctx->interpreters[ctx].interpreter->typed_output_tensor<float>(
index);
int size = model_tensor_size * sizeof(float);
bh_memcpy_s(output_tensor, size, ot, size);
}
else { // TODO: Assuming uint8 quantized networks.
TfLiteAffineQuantization *quant_info =
@ -425,27 +409,6 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_ERR_PRINTF("Quantization per channel is not supported");
return runtime_error;
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (output_tensor->size / sizeof(float) < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
if (output_tensor->size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#endif
uint8_t *ot = tfl_ctx->interpreters[ctx]
.interpreter->typed_output_tensor<uint8_t>(index);
@ -454,22 +417,13 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_DBG_PRINTF("output tensor: (scale, offset) = (%f, %f)", scale,
zero_point);
float *output_tensor_f = (float *)output_tensor->buf;
float *output_tensor_f = (float *)output_tensor;
for (uint32_t i = 0; i < model_tensor_size; ++i) {
output_tensor_f[i] = (ot[i] - zero_point) * scale;
}
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
*output_tensor_size = model_tensor_size * sizeof(float);
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
*output_tensor_size = model_tensor_size;
#endif
}
*output_tensor_size = model_tensor_size;
return success;
}

View File

@ -32,7 +32,7 @@ compute(void *tflite_ctx, graph_execution_context ctx);
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size);
tensor_data output_tensor, uint32_t *output_tensor_size);
__attribute__((visibility("default"))) wasi_nn_error
init_backend(void **tflite_ctx);

View File

@ -3,17 +3,6 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# on intel mac, this ends up with a lot of the following error.
#
# AttributeError: 'Sequential' object has no attribute '_get_save_spec'.
#
# * "pip install tensorflow" installs tensorflow 2.16.2 on intel mac.
# (because it's the last version before tf deprecated the target.)
# * keras 3 support in the version seems incomplete (thus the error)
# * a workaround: use keras 2 as mentioned in:
# https://github.com/tensorflow/tensorflow/releases/tag/v2.16.1
# https://blog.tensorflow.org/2024/03/whats-new-in-tensorflow-216.html
CURR_PATH=$(cd $(dirname $0) && pwd -P)
# WASM application that uses WASI-NN

View File

@ -3,7 +3,7 @@
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import AveragePooling2D, Conv2D
from keras.layers import AveragePooling2D, Conv2D
from tensorflow.keras import Input, Model

View File

@ -201,20 +201,10 @@ openat(int fd, const char *pathname, int flags, ...)
int ret;
char dir_path[DIR_PATH_LEN];
char *full_path;
mode_t mode = 0;
bool has_mode = false;
if (flags & O_CREAT) {
va_list ap;
va_start(ap, flags);
mode = (mode_t)va_arg(ap, int);
va_end(ap);
has_mode = true;
}
ret = fcntl(fd, F_GETPATH, dir_path);
if (ret != 0) {
errno = EINVAL;
errno = -EINVAL;
return -1;
}
@ -224,7 +214,7 @@ openat(int fd, const char *pathname, int flags, ...)
return -1;
}
new_fd = has_mode ? open(full_path, flags, mode) : open(full_path, flags);
new_fd = open(full_path, flags);
free(full_path);
return new_fd;

View File

@ -35,8 +35,8 @@ extend_vector(Vector *vector, size_t length)
if (length <= vector->max_elems)
return true;
if (length < vector->max_elems * 3 / 2)
length = vector->max_elems * 3 / 2;
if (length < vector->size_elem * 3 / 2)
length = vector->size_elem * 3 / 2;
if (!(data = alloc_vector_data(length, vector->size_elem))) {
return false;
@ -194,12 +194,12 @@ bh_vector_append(Vector *vector, const void *elem_buf)
goto just_return;
}
/* make sure one more slot is used by the thread who allocates it */
/* make sure one more slot is used by the thread who allocas it */
if (vector->lock)
os_mutex_lock(vector->lock);
if (!extend_vector(vector, vector->num_elems + 1)) {
LOG_ERROR("Append vector elem failed: extend vector failed.\n");
LOG_ERROR("Append ector elem failed: extend vector failed.\n");
goto unlock_return;
}

View File

@ -102,7 +102,6 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
### **Enable lib wasi-nn**
- **WAMR_BUILD_WASI_NN**=1/0, default to disable if not set
> Note: WAMR_BUILD_WASI_NN without WAMR_BUILD_WASI_EPHEMERAL_NN is deprecated and will likely be removed in future versions of WAMR. Please consider to enable WAMR_BUILD_WASI_EPHEMERAL_NN as well.
> Note: See [WASI-NN](../core/iwasm/libraries/wasi-nn) for more details.
### **Enable lib wasi-nn GPU mode**
@ -114,7 +113,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
- **WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH**=Path to the external delegate shared library (e.g. `libedgetpu.so.1.0` for Coral USB)
### **Enable lib wasi-nn with `wasi_ephemeral_nn` module support**
- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to enable if not set
- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to disable if not set
### **Disable boundary check with hardware trap**
- **WAMR_DISABLE_HW_BOUND_CHECK**=1/0, default to enable if not set and supported by platform

View File

@ -6,7 +6,7 @@
### Pre-requisites
#### Install requirements
Before proceeding it is necessary to make sure your Python environment is correctly configured. To do this open a terminal session in this directory and perform the following:
Before proceeding it is necessary to make sure your Python environment is correctly configured. To do ths open a terminal session in this directory and perfom the following:
```shell

View File

@ -353,12 +353,12 @@ writable and needs to be copied into a ctype array.
#### variable arguments
A function with _variable arguments_ makes it hard to specify the required
A function with _variable arugments_ makes it hard to specify the required
argument types for the function prototype. It leaves us one way to call it
directly without any arguments type checking.
```python
libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_double(3.14), "World!")
libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_doulbe(3.14), "World!")
```
#### Use `c_bool` to represent `wasm_mutability_t `
@ -373,7 +373,7 @@ libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_double(3
### bindgen.py
`bindgen.py` is a tool to create WAMR python binding automatically. `binding.py`
`bindge.py` is a tool to create WAMR python binding automatically. `binding.py`
is generated. We should avoid modification on it. Additional helpers should go
to `ffi.py`.

View File

@ -114,12 +114,6 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}")
set (CMAKE_MACOSX_RPATH True)
# if enable wasi-nn, both wasi-nn-backends and iwasm
# need to use same WAMR (dynamic) libraries
if (WAMR_BUILD_WASI_NN EQUAL 1)
set (BUILD_SHARED_LIBS ON)
endif ()
set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake)

View File

@ -79,7 +79,7 @@ struct wamr_pal_create_process_args {
// Untrusted environment variable array pass to new process.
//
// The untrusted env vars to the command. And the last element of the array
// must be NULL to indicate the end of the array.
// must be NULL to indicate the length of array.
//
// Optional field.
const char **env;

View File

@ -72,7 +72,7 @@ def to_json(inst, cls):
class Fuzzing(db.Model):
__tablename__ = 'fuzzing_task'
__tablename__ = 'fazzing_task'
id = db.Column(db.Integer, autoincrement=True,
primary_key=True, nullable=False)
repo = db.Column(db.String(200), nullable=False, default='')
@ -96,7 +96,7 @@ class TaskError(db.Model):
__tablename__ = 'task_error'
id = db.Column(db.Integer, autoincrement=True,
primary_key=True, nullable=False)
fuzzing_id = db.Column(db.Integer, db.ForeignKey("fuzzing_task.id"))
fazzing_id = db.Column(db.Integer, db.ForeignKey("fazzing_task.id"))
name = db.Column(db.String(200), nullable=False, default='')
std_out = db.Column(db.Text, default='')
data = db.Column(db.JSON)
@ -119,9 +119,9 @@ def to_data(data):
def error_count(data):
error = len(TaskError.query.filter(
TaskError.fuzzing_id == data.get('id'), TaskError.status.in_([1, 2])).all())
TaskError.fazzing_id == data.get('id'), TaskError.status.in_([1, 2])).all())
end_error = len(TaskError.query.filter(
TaskError.fuzzing_id == data.get('id'), TaskError.status == 0).all())
TaskError.fazzing_id == data.get('id'), TaskError.status == 0).all())
data['error'] = error
data['end_error'] = end_error
return data
@ -159,11 +159,11 @@ def show_fuzz_list():
id = data.get('id')
if id:
all_error = TaskError.query.filter(
TaskError.fuzzing_id == id).with_entities(TaskError.id, TaskError.fuzzing_id,
TaskError.fazzing_id == id).with_entities(TaskError.id, TaskError.fazzing_id,
TaskError.create_time, TaskError.data,
TaskError.name, TaskError.status,
TaskError.update_time, TaskError.comment).order_by(TaskError.status.desc(), TaskError.update_time.desc(), TaskError.id.desc()).all()
data_message = [{'id': error['id'], "fuzzing_id": error['fuzzing_id'],
data_message = [{'id': error['id'], "fuzzing_id": error['fazzing_id'],
"name": error['name'], "data": error['data'],
'create_time': error['create_time'].strftime('%Y-%m-%d %H:%M:%S'),
'update_time': error['update_time'].strftime('%Y-%m-%d %H:%M:%S'),
@ -204,7 +204,7 @@ def New_fuzzing():
# curd.set_error_status_to(list(map(lambda x: x.id, error_list)), db)
# Fuzzing.query.filter_by(id=fuzz.id).delete()
fuzz.data = {'error': "Clone repo Error"}
db.session.commit()
db.commit()
return jsonify({"status": 0, "result": "", "msg": "Clone repo Error"})
wamr_path_parent = fuzz_dir.parent.parent
@ -277,7 +277,7 @@ def scheduler_run_task():
for fuzz in fuzz_query:
all_error = TaskError.query.filter(
TaskError.fuzzing_id == fuzz.id).with_entities(TaskError.name).all()
TaskError.fazzing_id == fuzz.id).with_entities(TaskError.name).all()
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{fuzz.id}'
dir_list = filter(lambda x: x.startswith(
@ -287,7 +287,7 @@ def scheduler_run_task():
for dir in dir_list:
cmd = f'cd {fuzz_cmd} && ./wasm_mutator_fuzz {dir}'
status, resp = getstatusoutput(cmd)
task_error = TaskError(name=dir, std_out=resp, fuzzing_id=fuzz.id,
task_error = TaskError(name=dir, std_out=resp, fazzing_id=fuzz.id,
create_time=datetime.utcnow() + timedelta(hours=8))
db.session.add(task_error)
db.session.commit()
@ -312,7 +312,7 @@ def get_error_txt():
return jsonify({"status": 0, "results": [], 'msg': "Error"})
error = TaskError.query.get(id)
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{error.fuzzing_id}'
'workspace' / f'build_{error.fazzing_id}'
file_cmd = fuzz_cmd / error.name
response = send_file(file_cmd, as_attachment=True,
@ -351,7 +351,7 @@ def get_cases_zip():
with ZipFile(memory_file, "w", ZIP_DEFLATED) as zf:
for task_error in task_query:
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{task_error.fuzzing_id}'
'workspace' / f'build_{task_error.fazzing_id}'
file_cmd = fuzz_cmd / task_error.name
zf.write(str(file_cmd), arcname=task_error.name)
memory_file.seek(0)
@ -399,7 +399,7 @@ def error_restart():
if run_status:
return jsonify({"status": 0, "results": [], 'msg': "There are already tasks in progress"})
task_query = TaskError.query.filter(TaskError.id.in_(id_list)).all()
fuzzing_id = task_query[0].fuzzing_id
fuzzing_id = task_query[0].fazzing_id
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{fuzzing_id}'
restart_cmd = wasm_mutator_dir / \
@ -412,7 +412,7 @@ def error_restart():
if not Path(restart_cmd / 'wamr').exists():
print('------ error: clone repo not folder exists ------')
# fuzz.data = {'error': "Clone repo Error"}
db.session.commit()
db.commit()
return jsonify({"status": 0, "result": "", "msg": "Clone repo Error"})
wamr_path_parent = fuzz_dir.parent.parent
wamr_path = wamr_path_parent / 'wamr'

View File

@ -17,7 +17,7 @@ git apply ../../../wamr-test-suites/spec-test-script/gc_ignore_cases.patch
# Set OCaml compiler environment
eval $(opam config env)
echo "compile the reference interpreter"
echo "compile the reference intepreter"
pushd interpreter
make
popd

View File

@ -9,7 +9,7 @@ import os
from collections import OrderedDict
def CLI_ARGS_GENERATOR(running_modes_supported: list[str]) -> list[str]:
def CLI_ARGS_GENREATOR(running_modes_supported: list[str]) -> list[str]:
res = []
list_2d = [["--default-running-mode={} --module-running-mode={}".format(i, j)
for i in running_modes_supported] for j in running_modes_supported]
@ -35,16 +35,16 @@ def main():
]
# Python 3.7+: Dictionary iteration order is guaranteed to be in order of insertion.
# just to be safe, using OrderedDict
# just to be safe, using orderreddict
# key: value -> compile mode, {"compile_flag": CMake compile flag, "iwasm_cli_args": array of CLI args tested}
test_options = OrderedDict({
"INTERP": {"compile_flag": COMPILE_FLAGS[0], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:1])},
"FAST_JIT": {"compile_flag": COMPILE_FLAGS[1], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:2])},
"INTERP": {"compile_flag": COMPILE_FLAGS[0], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:1])},
"FAST_JIT": {"compile_flag": COMPILE_FLAGS[1], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:2])},
"LLVM_JIT": {"compile_flag": COMPILE_FLAGS[2],
"cli_args": CLI_ARGS_GENERATOR([RUNNING_MODES[0], RUNNING_MODES[2]])},
"MULTI_TIER_JIT": {"compile_flag": COMPILE_FLAGS[3], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES)},
"cli_args": CLI_ARGS_GENREATOR([RUNNING_MODES[0], RUNNING_MODES[2]])},
"MULTI_TIER_JIT": {"compile_flag": COMPILE_FLAGS[3], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES)},
"EAGER_JIT_WITH_BOTH_JIT": {"compile_flag": COMPILE_FLAGS[4],
"cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:3])}
"cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:3])}
})
build_cmd = "./build_c_embed.sh \"{build_flag}\""

View File

@ -29,7 +29,7 @@ def main():
]
# Python 3.7+: Dictionary iteration order is guaranteed to be in order of insertion.
# just to be safe, using OrderedDict
# just to be safe, using orderreddict
# key: value -> compile mode, {"compile_flag": CMake compile flag, "iwasm_cli_args": array of CLI args tested}
test_options = OrderedDict({
"INTERP": {"compile_flag": COMPILE_FLAGS[0], "iwasm_cli_args": IWASM_CLI_ARGS[:1]},

View File

@ -31,7 +31,7 @@ class memory64_atomic_test_suite : public testing::TestWithParam<RunningMode>
return true;
fail:
if (module)
if (!module)
wasm_runtime_unload(module);
return false;
@ -56,13 +56,11 @@ class memory64_atomic_test_suite : public testing::TestWithParam<RunningMode>
if (exec_env)
wasm_runtime_destroy_exec_env(exec_env);
if (module_inst)
wasm_runtime_deinstantiate(module_inst);
if (module)
wasm_runtime_unload(module);
return false;
}
void destroy_exec_env()
void destory_exec_env()
{
wasm_runtime_destroy_exec_env(exec_env);
wasm_runtime_deinstantiate(module_inst);
@ -111,7 +109,7 @@ class memory64_atomic_test_suite : public testing::TestWithParam<RunningMode>
virtual void TearDown()
{
if (cleanup) {
destroy_exec_env();
destory_exec_env();
wasm_runtime_destroy();
cleanup = false;
}
@ -341,8 +339,8 @@ TEST_P(memory64_atomic_test_suite, atomic_opcodes_i64_rmw_cmpxchg)
PUT_I64_TO_ADDR(wasm_argv + 2, 0x100F0E0D0C0B0A09);
// new
PUT_I64_TO_ADDR(wasm_argv + 4, 0xdeadcafebeefdead);
ASSERT_TRUE(wasm_runtime_call_wasm(
exec_env, func_map["i64_atomic_rmw_cmpxchg"], 6, wasm_argv));
ASSERT_TRUE(wasm_runtime_call_wasm(exec_env, func_map["i64_atomic_rmw_cmpxchg"],
6, wasm_argv));
i64 = 0x100F0E0D0C0B0A09;
ASSERT_EQ(i64, GET_U64_FROM_ADDR(wasm_argv));

View File

@ -31,7 +31,7 @@ class memory64_test_suite : public testing::TestWithParam<RunningMode>
return true;
fail:
if (module)
if (!module)
wasm_runtime_unload(module);
return false;
@ -56,13 +56,11 @@ class memory64_test_suite : public testing::TestWithParam<RunningMode>
if (exec_env)
wasm_runtime_destroy_exec_env(exec_env);
if (module_inst)
wasm_runtime_deinstantiate(module_inst);
if (module)
wasm_runtime_unload(module);
return false;
}
void destroy_exec_env()
void destory_exec_env()
{
wasm_runtime_destroy_exec_env(exec_env);
wasm_runtime_deinstantiate(module_inst);
@ -203,7 +201,7 @@ TEST_P(memory64_test_suite, memory_8GB)
i64 = 0xbeefdead;
ASSERT_EQ(i64, GET_U64_FROM_ADDR(wasm_argv));
destroy_exec_env();
destory_exec_env();
}
TEST_P(memory64_test_suite, mem64_from_clang)
@ -230,7 +228,7 @@ TEST_P(memory64_test_suite, mem64_from_clang)
i32 = 0x109;
ASSERT_EQ(i32, wasm_argv[0]);
destroy_exec_env();
destory_exec_env();
}
INSTANTIATE_TEST_CASE_P(RunningMode, memory64_test_suite,

View File

@ -21,7 +21,7 @@ std::string TEST_WASM1 = "/hello.wasm";
std::string TEST_WASM2 = "/mytest.wasm";
char *WASM_FILE_1;
char *WASM_FILE_2;
std::vector<RunningMode> running_mode_supported = { Mode_Interp,
std::vector<RunningMode> running_mode_supportted = { Mode_Interp,
#if WASM_ENABLE_FAST_JIT != 0
Mode_Fast_JIT,
#endif
@ -76,7 +76,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
return true;
fail:
if (module)
if (!module)
wasm_runtime_unload(module);
return false;
@ -101,13 +101,11 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
if (exec_env)
wasm_runtime_destroy_exec_env(exec_env);
if (module_inst)
wasm_runtime_deinstantiate(module_inst);
if (module)
wasm_runtime_unload(module);
return false;
}
void destroy_exec_env()
void destory_exec_env()
{
wasm_runtime_destroy_exec_env(exec_env);
wasm_runtime_deinstantiate(module_inst);
@ -141,7 +139,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
ASSERT_TRUE(ret);
ASSERT_EQ(10, wasm_argv[0]);
destroy_exec_env();
destory_exec_env();
}
void run_wasm_complex(char *filename1, char *filename2,
@ -170,7 +168,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
ASSERT_TRUE(ret);
ASSERT_EQ(10, wasm_argv[0]);
destroy_exec_env();
destory_exec_env();
/* run wasm file 2 in running_mode */
ret = load_wasm_file(filename2);
@ -186,7 +184,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
ret = wasm_runtime_call_wasm(exec_env, main, 2, wasm_argv);
ASSERT_TRUE(ret);
destroy_exec_env();
destory_exec_env();
}
public:
@ -248,7 +246,7 @@ TEST_F(wasm_running_modes_test_suite, wasm_runtime_is_running_mode_supported)
// normal situation
ASSERT_EQ(true, wasm_runtime_is_running_mode_supported(
static_cast<RunningMode>(Mode_Default)));
for (auto running_mode : running_mode_supported) {
for (auto running_mode : running_mode_supportted) {
ASSERT_EQ(true, wasm_runtime_is_running_mode_supported(running_mode));
}
@ -266,7 +264,7 @@ TEST_F(wasm_running_modes_test_suite, wasm_runtime_set_default_running_mode)
// normal situation: only set up
ASSERT_EQ(true, wasm_runtime_set_default_running_mode(
static_cast<RunningMode>(Mode_Default)));
for (auto running_mode : running_mode_supported) {
for (auto running_mode : running_mode_supportted) {
ASSERT_EQ(true, wasm_runtime_set_default_running_mode(running_mode));
}
@ -298,13 +296,13 @@ TEST_P(wasm_running_modes_test_suite,
wasm_runtime_set_and_get_running_mode_complex)
{
RunningMode default_running_mode = GetParam();
for (auto running_mode : running_mode_supported) {
for (auto running_mode : running_mode_supportted) {
run_wasm_complex(WASM_FILE_1, WASM_FILE_2, default_running_mode,
running_mode);
}
}
INSTANTIATE_TEST_CASE_P(RunningMode, wasm_running_modes_test_suite,
testing::ValuesIn(running_mode_supported));
testing::ValuesIn(running_mode_supportted));
}

View File

@ -362,6 +362,10 @@ function sightglass_test()
function setup_wabt()
{
# please sync with .github/actions/install-wasi-sdk-wabt/action.yml
if [ ${WABT_BINARY_RELEASE} == "YES" ]; then
echo "download a binary release and install"
local WAT2WASM=${WORK_DIR}/wabt/out/gcc/Release/wat2wasm
if [ ! -f ${WAT2WASM} ]; then
case ${PLATFORM} in
cosmopolitan)
;;
@ -383,10 +387,6 @@ function setup_wabt()
;;
esac
if [ ${WABT_BINARY_RELEASE} == "YES" ]; then
echo "download a binary release and install"
local WAT2WASM=${WORK_DIR}/wabt/out/gcc/Release/wat2wasm
if [ ! -f ${WAT2WASM} ]; then
pushd /tmp
wget -O wabt-tar.gz --progress=dot:giga ${WABT_URL}
tar xf wabt-tar.gz
@ -414,7 +414,7 @@ function setup_wabt()
function compile_reference_interpreter()
{
echo "compile the reference interpreter"
echo "compile the reference intepreter"
pushd interpreter
make
if [ $? -ne 0 ]

View File

@ -6,4 +6,3 @@ cmake_minimum_required (VERSION 3.14)
project(wamr-wasi-extensions LANGUAGES C)
add_subdirectory(nn)
add_subdirectory(socket)

View File

@ -1,12 +0,0 @@
# Copyright (C) 2025 Midokura Japan KK. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
cmake_minimum_required(VERSION 3.14)
set(CMAKE_C_STANDARD 99)
set(CMAKE_C_STANDARD_REQUIRED YES)
project(nn-cli LANGUAGES C)
add_executable(nn-cli main.c fileio.c map.c)
find_package(wamr-wasi-nn REQUIRED)
target_link_libraries(nn-cli wamr-wasi-nn)

View File

@ -1,73 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/*
* modified copy-and-paste from:
* https://github.com/yamt/toywasm/blob/0eaad8cacd0cc7692946ff19b25994f106113be8/lib/fileio.c
*/
#include <sys/stat.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include "fileio.h"
int
map_file(const char *path, void **pp, size_t *sizep)
{
void *p;
size_t size;
ssize_t ssz;
int fd;
int ret;
fd = open(path, O_RDONLY);
if (fd == -1) {
ret = errno;
assert(ret != 0);
return ret;
}
struct stat st;
ret = fstat(fd, &st);
if (ret == -1) {
ret = errno;
assert(ret != 0);
close(fd);
return ret;
}
size = st.st_size;
if (size > 0) {
p = malloc(size);
}
else {
/* Avoid a confusing error */
p = malloc(1);
}
if (p == NULL) {
close(fd);
return ENOMEM;
}
ssz = read(fd, p, size);
if (ssz != size) {
ret = errno;
assert(ret != 0);
close(fd);
return ret;
}
close(fd);
*pp = p;
*sizep = size;
return 0;
}
void
unmap_file(void *p, size_t sz)
{
free(p);
}

View File

@ -1,14 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/*
* modified copy-and-paste from:
* https://github.com/yamt/toywasm/blob/0eaad8cacd0cc7692946ff19b25994f106113be8/lib/fileio.h
*/
int
map_file(const char *filename, void **pp, size_t *szp);
void
unmap_file(void *p, size_t sz);

View File

@ -1,497 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <sys/stat.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <wamr/wasi_ephemeral_nn.h>
#include "fileio.h"
#include "map.h"
static struct map graphs;
static struct map contexts;
static void
load_graph(char *options)
{
int target = wasi_ephemeral_nn_target_cpu;
int encoding = wasi_ephemeral_nn_encoding_openvino;
const char *id = "default";
wasi_ephemeral_nn_graph_builder *builders = NULL;
size_t nbuilders = 0;
enum {
opt_id,
opt_file,
opt_encoding,
opt_target,
};
static char *const keylistp[] = {
[opt_id] = "id",
[opt_file] = "file",
[opt_encoding] = "encoding",
[opt_target] = "target",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
id = value;
break;
case opt_file:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
builders =
realloc(builders, (nbuilders + 1) * sizeof(*builders));
if (builders == NULL) {
exit(1);
}
wasi_ephemeral_nn_graph_builder *b = &builders[nbuilders++];
int ret = map_file(value, (void *)&b->buf, (void *)&b->size);
if (ret != 0) {
fprintf(stderr, "map_file \"%s\" failed: %s\n", value,
strerror(ret));
exit(1);
}
break;
case opt_encoding:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
encoding = atoi(value);
break;
case opt_target:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
target = atoi(value);
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
wasi_ephemeral_nn_error nnret;
wasi_ephemeral_nn_graph g;
nnret = wasi_ephemeral_nn_load(builders, nbuilders, encoding, target, &g);
size_t i;
for (i = 0; i < nbuilders; i++) {
wasi_ephemeral_nn_graph_builder *b = &builders[i];
unmap_file(b->buf, b->size);
}
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "load failed with %d\n", (int)nnret);
exit(1);
}
map_set(&graphs, id, g);
}
static void
init_execution_context(char *options)
{
const char *id = "default";
const char *graph_id = "default";
enum {
opt_id,
opt_graph_id,
};
static char *const keylistp[] = {
[opt_id] = "id",
[opt_graph_id] = "graph-id",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
id = value;
break;
case opt_graph_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
graph_id = value;
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
wasi_ephemeral_nn_graph g = map_get(&graphs, graph_id);
wasi_ephemeral_nn_graph_execution_context c;
wasi_ephemeral_nn_error nnret;
nnret = wasi_ephemeral_nn_init_execution_context(g, &c);
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "init_execution_context failed with %d\n", (int)nnret);
exit(1);
}
map_set(&contexts, id, c);
}
static void
set_input(char *options)
{
int ret;
const char *context_id = "default";
uint32_t idx = 0;
wasi_ephemeral_nn_tensor tensor = {
.dimensions = { .buf = NULL, .size = 0, },
.type = wasi_ephemeral_nn_type_fp32,
.data = NULL,
};
void *buf = NULL;
size_t sz = 0;
enum {
opt_context_id,
opt_dim,
opt_type,
opt_idx,
opt_file,
};
static char *const keylistp[] = {
[opt_context_id] = "context-id",
[opt_dim] = "dim",
[opt_type] = "type",
[opt_idx] = "idx",
[opt_file] = "file",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_context_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
context_id = value;
break;
case opt_dim:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
wasi_ephemeral_nn_tensor_dimensions *dims = &tensor.dimensions;
dims->buf =
realloc(dims->buf, (dims->size + 1) * sizeof(*dims->buf));
if (dims->buf == NULL) {
exit(1);
}
dims->buf[dims->size++] = atoi(value);
break;
case opt_type:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
tensor.type = atoi(value);
break;
case opt_file:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
if (buf != NULL) {
fprintf(stderr, "duplicated tensor data\n");
exit(2);
}
ret = map_file(value, &buf, &sz);
if (ret != 0) {
fprintf(stderr, "map_file \"%s\" failed: %s\n", value,
strerror(ret));
exit(1);
}
break;
case opt_idx:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
idx = atoi(value);
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
if (tensor.dimensions.size == 0) {
fprintf(stderr, "no dimension is given\n");
exit(2);
}
if (buf == NULL) {
fprintf(stderr, "no tensor is given\n");
exit(2);
}
/*
* REVISIT: we can check the tensor size against type/dimensions
* and warn the user if unexpected.
*/
wasi_ephemeral_nn_error nnret;
wasi_ephemeral_nn_graph_execution_context c =
map_get(&contexts, context_id);
tensor.data.buf = buf;
tensor.data.size = sz;
nnret = wasi_ephemeral_nn_set_input(c, idx, &tensor);
unmap_file(buf, sz);
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "set_input failed with %d\n", (int)nnret);
exit(1);
}
}
static void
compute(char *options)
{
const char *context_id = "default";
enum {
opt_context_id,
};
static char *const keylistp[] = {
[opt_context_id] = "context-id",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_context_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
context_id = value;
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
wasi_ephemeral_nn_graph_execution_context c =
map_get(&contexts, context_id);
wasi_ephemeral_nn_error nnret;
nnret = wasi_ephemeral_nn_compute(c);
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "compute failed with %d\n", (int)nnret);
exit(1);
}
}
static void
get_output(char *options)
{
int ret;
const char *outfile = NULL;
const char *context_id = "default";
uint32_t idx = 0;
enum {
opt_context_id,
opt_idx,
opt_file,
};
static char *const keylistp[] = {
[opt_context_id] = "context-id",
[opt_idx] = "idx",
[opt_file] = "file",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_context_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
context_id = value;
break;
case opt_file:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
outfile = value;
break;
case opt_idx:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
idx = atoi(value);
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
int outfd = -1;
if (outfile != NULL) {
outfd = open(outfile, O_CREAT | O_TRUNC | O_WRONLY);
if (outfd == -1) {
fprintf(stderr, "failed to open output file \"%s\": %s\n", outfile,
strerror(errno));
exit(1);
}
}
wasi_ephemeral_nn_error nnret;
wasi_ephemeral_nn_graph_execution_context c =
map_get(&contexts, context_id);
void *resultbuf = NULL;
size_t resultbufsz = 256;
uint32_t resultsz;
retry:
resultbuf = realloc(resultbuf, resultbufsz);
if (resultbuf == NULL) {
exit(1);
}
nnret =
wasi_ephemeral_nn_get_output(c, 0, resultbuf, resultbufsz, &resultsz);
if (nnret == wasi_ephemeral_nn_error_too_large) {
resultbufsz *= 2;
goto retry;
}
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "get_output failed with %d\n", (int)nnret);
exit(1);
}
if (outfd != -1) {
ssize_t written = write(outfd, resultbuf, resultsz);
if (written == -1) {
fprintf(stderr, "failed to write: %s\n", strerror(errno));
exit(1);
}
if (written == -1) {
fprintf(stderr, "unexpetecd write length %zu (expected %zu)\n",
written, (size_t)resultsz);
exit(1);
}
ret = close(outfd);
if (ret != 0) {
fprintf(stderr, "failed to close: %s\n", strerror(errno));
exit(1);
}
}
else {
fprintf(stderr, "WARNING: discarding %zu bytes output\n",
(size_t)resultsz);
}
}
enum longopt {
opt_load_graph = 0x100,
opt_init_execution_context,
opt_set_input,
opt_compute,
opt_get_output,
};
static const struct option longopts[] = {
{
"load-graph",
required_argument,
NULL,
opt_load_graph,
},
{
"init-execution-context",
optional_argument,
NULL,
opt_init_execution_context,
},
{
"set-input",
required_argument,
NULL,
opt_set_input,
},
{
"compute",
optional_argument,
NULL,
opt_compute,
},
{
"get-output",
optional_argument,
NULL,
opt_get_output,
},
{
NULL,
0,
NULL,
0,
},
};
int
main(int argc, char **argv)
{
extern char *optarg;
int ch;
int longidx;
while ((ch = getopt_long(argc, argv, "", longopts, &longidx)) != -1) {
switch (ch) {
case opt_load_graph:
load_graph(optarg);
break;
case opt_init_execution_context:
init_execution_context(optarg ? optarg : "");
break;
case opt_set_input:
set_input(optarg);
break;
case opt_compute:
compute(optarg ? optarg : "");
break;
case opt_get_output:
get_output(optarg ? optarg : "");
break;
default:
exit(2);
}
}
exit(0);
}

View File

@ -1,58 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "map.h"
static uintmax_t *
map_find_slot(struct map *m, const char *name)
{
size_t i;
for (i = 0; i < m->nentries; i++) {
if (!strcmp(m->entries[i].k, name)) {
return &m->entries[i].v;
}
}
return NULL;
}
static void
map_append(struct map *m, const char *k, uintmax_t v)
{
m->entries = realloc(m->entries, (m->nentries + 1) * sizeof(*m->entries));
if (m->entries == NULL) {
exit(1);
}
struct map_entry *e = &m->entries[m->nentries++];
e->k = k;
e->v = v;
}
void
map_set(struct map *m, const char *k, uintmax_t v)
{
uintmax_t *p = map_find_slot(m, k);
if (p != NULL) {
fprintf(stderr, "duplicated id \"%s\"\n", k);
exit(1);
}
map_append(m, k, v);
}
uintmax_t
map_get(struct map *m, const char *k)
{
uintmax_t *p = map_find_slot(m, k);
if (p == NULL) {
fprintf(stderr, "id \"%s\" not found\n", k);
exit(1);
}
return *p;
}

View File

@ -1,19 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdint.h>
struct map {
struct map_entry {
const char *k;
uintmax_t v;
} * entries;
size_t nentries;
};
void
map_set(struct map *m, const char *k, uintmax_t v);
uintmax_t
map_get(struct map *m, const char *k);

View File

@ -93,7 +93,7 @@ print_result(const float *result, size_t sz)
int
main(int argc, char **argv)
{
wasi_ephemeral_nn_error nnret;
wasi_nn_error nnret;
int ret;
void *xml;
size_t xmlsz;
@ -112,7 +112,7 @@ main(int argc, char **argv)
exit(1);
}
/* note: openvino takes two buffers, namely IR and weights */
wasi_ephemeral_nn_graph_builder builders[2] = { {
graph_builder builders[2] = { {
.buf = xml,
.size = xmlsz,
},
@ -120,19 +120,17 @@ main(int argc, char **argv)
.buf = weights,
.size = weightssz,
} };
wasi_ephemeral_nn_graph g;
nnret =
wasi_ephemeral_nn_load(builders, 2, wasi_ephemeral_nn_encoding_openvino,
wasi_ephemeral_nn_target_cpu, &g);
graph g;
nnret = load(builders, 2, openvino, cpu, &g);
unmap_file(xml, xmlsz);
unmap_file(weights, weightssz);
if (nnret != wasi_ephemeral_nn_error_success) {
if (nnret != success) {
fprintf(stderr, "load failed with %d\n", (int)nnret);
exit(1);
}
wasi_ephemeral_nn_graph_execution_context ctx;
nnret = wasi_ephemeral_nn_init_execution_context(g, &ctx);
if (nnret != wasi_ephemeral_nn_error_success) {
graph_execution_context ctx;
nnret = init_execution_context(g, &ctx);
if (nnret != success) {
fprintf(stderr, "init_execution_context failed with %d\n", (int)nnret);
exit(1);
}
@ -144,28 +142,26 @@ main(int argc, char **argv)
strerror(ret));
exit(1);
}
wasi_ephemeral_nn_tensor tensor = {
tensor tensor = {
.dimensions = { .buf = (uint32_t[]){1, 3, 224, 224,}, .size = 4, },
.type = wasi_ephemeral_nn_type_fp32,
.data.buf = tensordata,
.data.size = tensordatasz,
.type = fp32,
.data = tensordata,
};
nnret = wasi_ephemeral_nn_set_input(ctx, 0, &tensor);
nnret = set_input(ctx, 0, &tensor);
unmap_file(tensordata, tensordatasz);
if (nnret != wasi_ephemeral_nn_error_success) {
if (nnret != success) {
fprintf(stderr, "set_input failed with %d\n", (int)nnret);
exit(1);
}
nnret = wasi_ephemeral_nn_compute(ctx);
if (nnret != wasi_ephemeral_nn_error_success) {
nnret = compute(ctx);
if (nnret != success) {
fprintf(stderr, "compute failed with %d\n", (int)nnret);
exit(1);
}
float result[1001];
uint32_t resultsz;
nnret = wasi_ephemeral_nn_get_output(ctx, 0, (void *)result, sizeof(result),
&resultsz);
if (nnret != wasi_ephemeral_nn_error_success) {
nnret = get_output(ctx, 0, (void *)result, sizeof(result), &resultsz);
if (nnret != success) {
fprintf(stderr, "get_output failed with %d\n", (int)nnret);
exit(1);
}

View File

@ -1,11 +0,0 @@
cmake_minimum_required(VERSION 3.14)
set(CMAKE_C_STANDARD 99)
set(CMAKE_C_STANDARD_REQUIRED YES)
set(CMAKE_C_EXTENSIONS NO)
project(socket-nslookup LANGUAGES C)
add_executable(socket-nslookup ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/iwasm/libraries/lib-socket/test/nslookup.c)
find_package(wamr-wasi-socket REQUIRED)
target_link_libraries(socket-nslookup wamr-wasi-socket)
target_link_options(socket-nslookup PRIVATE -Wl,--max-memory=262144)

View File

@ -1,10 +0,0 @@
cmake_minimum_required(VERSION 3.14)
set(CMAKE_C_STANDARD 99)
set(CMAKE_C_STANDARD_REQUIRED YES)
project(socket-tcp-udp LANGUAGES C)
add_executable(socket-tcp-udp ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/iwasm/libraries/lib-socket/test/tcp_udp.c)
find_package(wamr-wasi-socket REQUIRED)
target_link_libraries(socket-tcp-udp wamr-wasi-socket)
target_link_options(socket-tcp-udp PRIVATE -Wl,--max-memory=262144)

View File

@ -1,26 +0,0 @@
set(wasi_socket_dir ${CMAKE_CURRENT_SOURCE_DIR}/../../core/iwasm/libraries/lib-socket)
set(wasi_socket_header_dir ${wasi_socket_dir}/inc)
set(srcs ${wasi_socket_dir}/src/wasi/wasi_socket_ext.c)
set(headers
${wasi_socket_header_dir}/wasi_socket_ext.h
)
add_library(wamr-wasi-socket STATIC ${srcs})
set_property(TARGET wamr-wasi-socket PROPERTY PUBLIC_HEADER ${headers})
target_include_directories(wamr-wasi-socket
PUBLIC
$<BUILD_INTERFACE:${wasi_socket_header_dir}>
$<INSTALL_INTERFACE:include>)
# as this is a library, be extra conservative about wasm features
# to improve compatibilities. as this particular library is just a
# simple static stub, extra wasm features won't benefit us much anyway.
# note that LLVM-19 enables reference-types by default.
target_compile_options(wamr-wasi-socket PRIVATE -mno-reference-types)
install(TARGETS wamr-wasi-socket
EXPORT wamr-wasi-socket-config
PUBLIC_HEADER DESTINATION include)
install(EXPORT wamr-wasi-socket-config
DESTINATION lib/cmake/wamr-wasi-socket)

View File

@ -3,8 +3,6 @@
# Copyright (C) 2025 Midokura Japan KK. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
PREFIX=/tmp/wamr
WASI_SDK=${WASI_SDK:-/opt/wasi-sdk}
@ -19,21 +17,3 @@ cmake -B build-app-nn \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/nn
cmake --build build-app-nn
cmake -B build-app-nn-cli \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/nn-cli
cmake --build build-app-nn-cli
cmake -B build-app-socket-nslookup \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/socket-nslookup
cmake --build build-app-socket-nslookup
cmake -B build-app-socket-tcp-udp \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/socket-tcp-udp
cmake --build build-app-socket-tcp-udp