Compare commits

..

2 Commits

Author SHA1 Message Date
liang.he@intel.com
aaa21a543e test zephyrproject-rtos/ci image 2025-06-18 02:37:06 +00:00
liang.he@intel.com
84c24e914e Add a new CI on Zephyr
to run product-mini/platforms/zephyr/simple and
product-mini/platforms/zephyr/user-mode
2025-06-18 01:45:26 +00:00
55 changed files with 532 additions and 764 deletions

View File

@ -23,7 +23,7 @@ on:
type: string
required: true
upload_url:
description: upload binary assets to the URL of release
description: a semantic version number. it is required when `release` is true.
type: string
required: false
ver_num:

View File

@ -23,7 +23,7 @@ on:
type: string
required: true
upload_url:
description: upload binary assets to the URL of release
description: a semantic version number. it is required when `release` is true.
type: string
required: false
ver_num:

View File

@ -53,7 +53,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3.29.1
uses: github/codeql-action/init@v3.29.0
with:
languages: ${{ matrix.language }}
@ -70,7 +70,7 @@ jobs:
- run: |
./.github/scripts/codeql_buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3.29.1
uses: github/codeql-action/analyze@v3.29.0
with:
category: "/language:${{matrix.language}}"
upload: false
@ -99,7 +99,7 @@ jobs:
output: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif
- name: Upload CodeQL results to code scanning
uses: github/codeql-action/upload-sarif@v3.29.1
uses: github/codeql-action/upload-sarif@v3.29.0
with:
sarif_file: ${{ steps.step1.outputs.sarif-output }}
category: "/language:${{matrix.language}}"

View File

@ -290,6 +290,28 @@ jobs:
- name: run spec tests
run: |
set +e
source /opt/intel/sgxsdk/environment
./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
attempts=0
max_attempts=3
while [ $attempts -lt $max_attempts ]; do
./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
exitcode="$?"
if [ $exitcode -eq 0 ]; then
echo "Spec test passed"
exit 0
elif [ $exitcode -ne 143 ]; then
echo "Spec test failed with error code $exitcode"
exit 1
fi
echo "$exitcode is a known GitHub-hosted runner issue"
echo "::notice::Re-running the spec test due to error code 143"
attempts=$((attempts + 1))
done
echo "::notice::Report an error with code 143 in SGX CI after $max_attempts attempts"
exit 143
working-directory: ./tests/wamr-test-suites

View File

@ -56,74 +56,97 @@ permissions:
jobs:
smoke_test:
runs-on: ubuntu-22.04
runs-on: zephyr-runner
container:
# For Zephyr 3.7 LTS, use the v0.26-branch or the latest v0.26.x release Docker image.
# ci require a larger runner to avoid "no space left on device"
image: ghcr.io/zephyrproject-rtos/ci-base:v0.26-branch
image: ghcr.io/zephyrproject-rtos/ci:v0.26-branch
options: --user root
steps:
# https://docs.zephyrproject.org/latest/develop/application/index.html#zephyr-workspace-application
# zephyrproject/ --> CI ROOT
# zephyrproject/
# ├─── .west/
# │ └─── config
# ├─── zephyr/
# ├─── bootloader/
# ├─── zephyr/ --> Zephyr source code
# ├─── zephyr-sdk/
# ├─── modules/
# │ |─── wasm-micro-runtime --> WAMR source code
# ├─── tools/
# ├─── vendor/
# └─── application/ --> DUMMY. keep west_lite.yml here
# ├─── <vendor/private-repositories>/
# └─── applications/ <- wasm-micro-runtime HERE
# └── app/
- name: Checkout code
uses: actions/checkout@v3
with:
path: modules/wasm-micro-runtime
- name: Prepare Zephyr environment
shell: bash
run: |
mkdir -p application
cp modules/wasm-micro-runtime/product-mini/platforms/zephyr/simple/west_lite.yml application/west_lite.yml
path: application
- name: Setup Zephyr project
uses: zephyrproject-rtos/action-zephyr-setup@v1
with:
app-path: application
manifest-file-name: west_lite.yml
sdk-version: ${{ env.ZEPHYR_SDK_VERSION }}
toolchains: arc-zephyr-elf:arc64-zephyr-elf
- name: Build a sample application(simple)
- name: DBG#1
shell: bash
run: |
pushd product-mini/platforms/zephyr/simple
west build . -b qemu_arc/qemu_arc_hs -p always -- -DWAMR_BUILD_TARGET=ARC
popd
pwd
ls -l .
tree -d .
# west build -t run will fork several processes, which will cause the job to hang.
# run in the background and kill it after 5 seconds
.github/scripts/run_qemu_arc.sh \
../../zephyr-sdk \
product-mini/platforms/zephyr/simple/build/zephyr/zephyr.elf &
sleep 5
pkill qemu-system-arc
working-directory: modules/wasm-micro-runtime
# - name: Generate a minimum Zephyr project
# shell: bash
# run: |
# mkdir -p ./zephyrproject/modules/zephyr
# mkdir -p ./zephyrproject/smoke-test
# cp product-mini/platforms/zephyr/simple/west_lite.yml ./zephyrproject/smoke-test/west.yml
- name: Build a sample application(user-mode)
shell: bash
run: |
pushd product-mini/platforms/zephyr/user-mode
west build . -b qemu_arc/qemu_arc_hs -p always -- -DWAMR_BUILD_TARGET=ARC
popd
# - name: Initialize west
# shell: bash
# run: |
# west init -l .
# working-directory: ./zephyrproject/smoke-test
# west build -t run will fork several processes, which will cause the job to hang.
# run in the background and kill it after 5 seconds
.github/scripts/run_qemu_arc.sh \
../../zephyr-sdk \
product-mini/platforms/zephyr/user-mode/build/zephyr/zephyr.elf &
sleep 5
pkill qemu-system-arc
working-directory: modules/wasm-micro-runtime
# - name: Update west to fetch the Zephyr project
# shell: bash
# run: west update --stats
# working-directory: ./zephyrproject
# - name: Export Zephyr environment
# shell: bash
# run: |
# west zephyr-export
# pip3 install -r ./scripts/requirements.txt
# working-directory: ./zephyrproject/modules/zephyr
# - name: Set Environment Variables
# shell: bash
# run: |
# echo "ZEPHYR_BASE=$(realpath ./zephyrproject/modules/zephyr)" >> $GITHUB_ENV
# - name: Build a sample application(simple)
# run: |
# pushd product-mini/platforms/zephyr/simple
# west build . -b qemu_arc/qemu_arc_hs -p always -- -DWAMR_BUILD_TARGET=ARC
# popd
# # west build -t run will fork several processes, which will cause the job to hang.
# # run in the background and kill it after 5 seconds
# .github/scripts/run_qemu_arc.sh \
# /opt/zephyr-sdk-${{ env.ZEPHYR_SDK_VERSION }} \
# product-mini/platforms/zephyr/simple/build/zephyr/zephyr.elf &
# sleep 5
# pkill qemu-system-arc
# - name: Build a sample application(user-mode)
# run: |
# pushd product-mini/platforms/zephyr/user-mode
# west build . -b qemu_arc/qemu_arc_hs -p always -- -DWAMR_BUILD_TARGET=ARC
# popd
# # west build -t run will fork several processes, which will cause the job to hang.
# # run in the background and kill it after 5 seconds
# .github/scripts/run_qemu_arc.sh \
# /opt/zephyr-sdk-${{ env.ZEPHYR_SDK_VERSION }} \
# product-mini/platforms/zephyr/user-mode/build/zephyr/zephyr.elf &
# sleep 5
# pkill qemu-system-arc

View File

@ -36,11 +36,12 @@ env:
LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0"
MULTI_TIER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1"
# For Spec Test
DEFAULT_TEST_OPTIONS: "-s spec -b -P"
MULTI_MODULES_TEST_OPTIONS: "-s spec -b -P -M"
SIMD_TEST_OPTIONS: "-s spec -b -P -S"
THREADS_TEST_OPTIONS: "-s spec -b -P -p"
X86_32_TARGET_TEST_OPTIONS: "-m x86_32"
# FIXME: use binary release(adding -b) instead of building from source after upgrading to 22.04
DEFAULT_TEST_OPTIONS: "-s spec -P"
MULTI_MODULES_TEST_OPTIONS: "-s spec -M -P"
SIMD_TEST_OPTIONS: "-s spec -S -P"
THREADS_TEST_OPTIONS: "-s spec -p -P"
X86_32_TARGET_TEST_OPTIONS: "-m x86_32 -P"
WASI_TEST_OPTIONS: "-s wasi_certification -w"
permissions:

View File

@ -60,6 +60,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@4c57370d0304fbff638216539f81d9163f77712a
uses: github/codeql-action/upload-sarif@2847b7f7ab9f48fc49eca90a53fff6007285f399
with:
sarif_file: results.sarif

View File

@ -1,57 +0,0 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
name: wamr_wasi_extensions
on:
pull_request:
types:
- opened
- synchronize
paths:
- ".github/workflows/wamr_wasi_extensions.yml"
- "wamr_wasi_extensios/**"
- "core/iwasm/libraries/wasi-nn/include/**"
- "core/iwasm/libraries/lib-socket/**"
# allow to be triggered manually
workflow_dispatch:
# Cancel any in-flight jobs for the same PR/branch so there's only one active
# at a time
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_wamr_wasi_extensions:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-22.04, macos-13, macos-14]
steps:
- name: checkout
uses: actions/checkout@v4
- name: install-wasi-sdk-wabt
uses: ./.github/actions/install-wasi-sdk-wabt
with:
os: ${{ matrix.os }}
- name: Build wamr-wasi-extensions
run: |
mkdir dist
./build_libs.sh $(pwd)/dist/wamr-wasi-extensions
working-directory: wamr-wasi-extensions
- name: Build wamr-wasi-extensions samples
run: |
./build_samples.sh $(pwd)/dist/wamr-wasi-extensions
working-directory: wamr-wasi-extensions
- name: Upload artifacts
if: matrix.os == 'macos-14'
uses: actions/upload-artifact@v4
with:
name: wamr-wasi-extensions
path: wamr-wasi-extensions/dist
retention-days: 10

View File

@ -99,9 +99,9 @@ if (NOT DEFINED WAMR_BUILD_LIB_WASI_THREADS)
set (WAMR_BUILD_LIB_WASI_THREADS 0)
endif ()
if (NOT DEFINED WAMR_BUILD_COPY_CALL_STACK)
if (NOT DEFINED WAMR_ENABLE_COPY_CALLSTACK)
# Disable copy callstack by default
set (WAMR_BUILD_COPY_CALL_STACK 0)
set (WAMR_ENABLE_COPY_CALLSTACK 0)
endif()
if (NOT DEFINED WAMR_BUILD_MINI_LOADER)

View File

@ -334,10 +334,15 @@ if (WAMR_BUILD_SHARED_HEAP EQUAL 1)
add_definitions (-DWASM_ENABLE_SHARED_HEAP=1)
message (" Shared heap enabled")
endif()
if (WAMR_BUILD_COPY_CALL_STACK EQUAL 1)
add_definitions (-DWASM_ENABLE_COPY_CALL_STACK=1)
if (WAMR_ENABLE_COPY_CALLSTACK EQUAL 1)
add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=1)
message(" Copy callstack enabled")
else ()
add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=0)
message(" Copy callstack disabled")
endif()
if (WAMR_BUILD_MEMORY64 EQUAL 1)
# if native is 32-bit or cross-compiled to 32-bit
if (NOT WAMR_BUILD_TARGET MATCHES ".*64.*")
@ -534,9 +539,6 @@ if (WAMR_BUILD_WASI_NN EQUAL 1)
if (DEFINED WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH)
add_definitions (-DWASM_WASI_NN_EXTERNAL_DELEGATE_PATH="${WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH}")
endif ()
if (NOT DEFINED WAMR_BUILD_WASI_EPHEMERAL_NN)
set(WAMR_BUILD_WASI_EPHEMERAL_NN 1)
endif()
if (WAMR_BUILD_WASI_EPHEMERAL_NN EQUAL 1)
message (" WASI-NN: use 'wasi_ephemeral_nn' instead of 'wasi-nn'")
add_definitions (-DWASM_ENABLE_WASI_EPHEMERAL_NN=1)

View File

@ -106,7 +106,6 @@ endif ()
if (WAMR_BUILD_WASI_NN EQUAL 1)
include (${IWASM_DIR}/libraries/wasi-nn/cmake/wasi_nn.cmake)
set (WAMR_BUILD_MODULE_INST_CONTEXT 1)
endif ()
if (WAMR_BUILD_LIB_PTHREAD EQUAL 1)

View File

@ -4,6 +4,7 @@
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
import argparse
import re
from pathlib import Path
import re
import shlex
@ -38,7 +39,7 @@ INVALID_FILE_NAME_SEGMENT = r"([a-zA-Z0-9]+\-[a-zA-Z0-9]+)"
def locate_command(command: str) -> bool:
if not shutil.which(command):
print(f"Command '{command}' not found")
print(f"Command '{command}'' not found")
return False
return True

View File

@ -193,8 +193,8 @@
#error "Heap aux stack allocation must be enabled for WASI threads"
#endif
#ifndef WASM_ENABLE_COPY_CALL_STACK
#define WASM_ENABLE_COPY_CALL_STACK 0
#ifndef WAMR_ENABLE_COPY_CALLSTACK
#define WAMR_ENABLE_COPY_CALLSTACK 0
#endif
#ifndef WASM_ENABLE_BASE_LIB

View File

@ -1730,12 +1730,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
(void)u8;
read_uint32(buf, buf_end, j);
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (j >= module->type_count) {
set_error_buf(error_buf, error_buf_size, "invalid type index");
goto fail;
}
#endif
if (module->types[j]->ref_count == UINT16_MAX) {
set_error_buf(error_buf, error_buf_size,
"wasm type's ref count too large");
@ -1999,13 +1993,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
AOTType *cur_type = module->types[j];
parent_type_idx = cur_type->parent_type_idx;
if (parent_type_idx != (uint32)-1) { /* has parent */
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (parent_type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"invalid parent type index");
goto fail;
}
#endif
AOTType *parent_type = module->types[parent_type_idx];
module->types[j]->parent_type = parent_type;
@ -2029,13 +2016,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
AOTType *cur_type = module->types[j];
parent_type_idx = cur_type->parent_type_idx;
if (parent_type_idx != (uint32)-1) { /* has parent */
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (parent_type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"invalid parent type index");
goto fail;
}
#endif
AOTType *parent_type = module->types[parent_type_idx];
/* subtyping has been checked during compilation */
bh_assert(wasm_type_is_subtype_of(

View File

@ -4137,9 +4137,9 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame)
}
#endif /* end of WASM_ENABLE_AOT_STACK_FRAME != 0 */
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n,
char *error_buf, uint32 error_buf_size)
{
@ -4193,7 +4193,7 @@ aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
uint32
aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n,
char *error_buf, uint32_t error_buf_size)
{
@ -4243,7 +4243,7 @@ aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
uint32
aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -4265,7 +4265,7 @@ aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
error_buf, error_buf_size);
}
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
#if WASM_ENABLE_DUMP_CALL_STACK != 0
bool

View File

@ -787,12 +787,12 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame);
bool
aot_create_call_stack(struct WASMExecEnv *exec_env);
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
/**
* @brief Dump wasm call stack or get the size

View File

@ -1743,9 +1743,9 @@ wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env)
wasm_exec_env_destroy(exec_env);
}
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -1780,7 +1780,7 @@ wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
strncpy(error_buf, err_msg, error_buf_size);
return 0;
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
bool
wasm_runtime_init_thread_env(void)

View File

@ -758,12 +758,12 @@ wasm_runtime_create_exec_env(WASMModuleInstanceCommon *module_inst,
WASM_RUNTIME_API_EXTERN void
wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env);
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
WASM_RUNTIME_API_EXTERN uint32_t
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32 error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN WASMModuleInstanceCommon *

View File

@ -48,7 +48,7 @@ typedef struct AOTSymbolList {
} AOTSymbolList;
/* AOT object data */
struct AOTObjectData {
typedef struct AOTObjectData {
AOTCompContext *comp_ctx;
LLVMMemoryBufferRef mem_buf;
@ -82,7 +82,7 @@ struct AOTObjectData {
const char *stack_sizes_section_name;
uint32 stack_sizes_offset;
uint32 *stack_sizes;
};
} AOTObjectData;
#if 0
static void dump_buf(uint8 *buf, uint32 size, char *title)

View File

@ -3999,7 +3999,7 @@ aot_get_func_from_table(const AOTCompContext *comp_ctx, LLVMValueRef base,
if (!(func =
LLVMBuildBitCast(comp_ctx->builder, func, func_type, "func"))) {
aot_set_last_error("cast function failed.");
aot_set_last_error("cast function fialed.");
goto fail;
}
@ -4068,7 +4068,7 @@ aot_load_const_from_table(AOTCompContext *comp_ctx, LLVMValueRef base,
if (!(const_addr = LLVMBuildBitCast(comp_ctx->builder, const_addr,
const_ptr_type, "const_addr"))) {
aot_set_last_error("cast const failed.");
aot_set_last_error("cast const fialed.");
return NULL;
}

View File

@ -139,6 +139,8 @@ typedef struct wasm_frame_t {
uint32_t *lp;
} WASMCApiFrame;
typedef WASMCApiFrame wasm_frame_t;
/* WASM section */
typedef struct wasm_section_t {
struct wasm_section_t *next;
@ -902,7 +904,7 @@ wasm_runtime_destroy_exec_env(wasm_exec_env_t exec_env);
* @return number of copied frames
*/
WASM_RUNTIME_API_EXTERN uint32_t
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32_t length, const uint32_t skip_n,
char *error_buf, uint32_t error_buf_size);

View File

@ -4088,7 +4088,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
case WASM_OP_STRING_ENCODE_LOSSY_UTF8_ARRAY:
case WASM_OP_STRING_ENCODE_WTF8_ARRAY:
{
uint32 start, array_len;
uint32 start, array_len, count;
int32 bytes_written;
EncodingFlag flag = WTF8;
WASMArrayType *array_type;

View File

@ -2042,9 +2042,9 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
"recursive type count too large");
return false;
}
module->type_count += rec_count - 1;
new_total_size =
sizeof(WASMFuncType *)
* (uint64)(module->type_count + rec_count - 1);
sizeof(WASMFuncType *) * (uint64)module->type_count;
if (new_total_size > UINT32_MAX) {
set_error_buf(error_buf, error_buf_size,
"allocate memory failed");
@ -2052,7 +2052,6 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
}
MEM_REALLOC(module->types, (uint32)total_size,
(uint32)new_total_size);
module->type_count += rec_count - 1;
total_size = new_total_size;
}
@ -3352,8 +3351,7 @@ load_import_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
/* valtype */
CHECK_BUF(p, p_end, 1);
global_type = read_uint8(p);
if (wasm_is_reftype_htref_nullable(global_type)
|| wasm_is_reftype_htref_non_nullable(global_type)) {
if (wasm_is_reftype_htref_nullable(global_type)) {
int32 heap_type;
read_leb_int32(p, p_end, heap_type);
(void)heap_type;
@ -15025,6 +15023,8 @@ re_scan:
case WASM_OP_STRING_NEW_LOSSY_UTF8:
case WASM_OP_STRING_NEW_WTF8:
{
uint32 memidx;
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true;
#endif
@ -15036,6 +15036,7 @@ re_scan:
POP_I32();
POP_I32();
PUSH_REF(REF_TYPE_STRINGREF);
(void)memidx;
break;
}
case WASM_OP_STRING_CONST:
@ -15063,6 +15064,8 @@ re_scan:
case WASM_OP_STRING_ENCODE_LOSSY_UTF8:
case WASM_OP_STRING_ENCODE_WTF8:
{
uint32 memidx;
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true;
#endif
@ -15074,6 +15077,7 @@ re_scan:
POP_I32();
POP_STRINGREF();
PUSH_I32();
(void)memidx;
break;
}
case WASM_OP_STRING_CONCAT:
@ -15114,6 +15118,8 @@ re_scan:
case WASM_OP_STRINGVIEW_WTF8_ENCODE_LOSSY_UTF8:
case WASM_OP_STRINGVIEW_WTF8_ENCODE_WTF8:
{
uint32 memidx;
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true;
#endif
@ -15128,6 +15134,7 @@ re_scan:
POP_REF(REF_TYPE_STRINGVIEWWTF8);
PUSH_I32();
PUSH_I32();
(void)memidx;
break;
}
case WASM_OP_STRINGVIEW_WTF8_SLICE:
@ -15159,6 +15166,8 @@ re_scan:
}
case WASM_OP_STRINGVIEW_WTF16_ENCODE:
{
uint32 memidx;
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true;
#endif
@ -15172,6 +15181,7 @@ re_scan:
POP_I32();
POP_REF(REF_TYPE_STRINGVIEWWTF16);
PUSH_I32();
(void)memidx;
break;
}
case WASM_OP_STRINGVIEW_WTF16_SLICE:

View File

@ -2668,7 +2668,7 @@ wasm_instantiate(WASMModule *module, WASMModuleInstance *parent,
}
STORE_PTR((void **)global_data, func_obj);
global_data += sizeof(void *);
/* Also update the initial_value since other globals may
/* Also update the inital_value since other globals may
* refer to this */
global->initial_value.gc_obj = (wasm_obj_t)func_obj;
break;
@ -4195,9 +4195,9 @@ wasm_get_module_inst_mem_consumption(const WASMModuleInstance *module_inst,
#endif /* end of (WASM_ENABLE_MEMORY_PROFILING != 0) \
|| (WASM_ENABLE_MEMORY_TRACING != 0) */
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
uint32 length, uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -4242,7 +4242,7 @@ wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
return count >= skip_n ? count - skip_n : 0;
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
#if WASM_ENABLE_DUMP_CALL_STACK != 0
bool

View File

@ -731,12 +731,12 @@ wasm_get_table_inst(const WASMModuleInstance *module_inst, uint32 tbl_idx)
#if WASM_ENABLE_DUMP_CALL_STACK != 0
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
uint32 length, uint32 skip_n, char *error_buf,
uint32_t error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
bool
wasm_interp_create_call_stack(struct WASMExecEnv *exec_env);

View File

@ -21,7 +21,6 @@
#else
#define WASI_NN_IMPORT(name) \
__attribute__((import_module("wasi_nn"), import_name(name)))
#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.)
#endif
/**
@ -109,13 +108,14 @@ WASI_NN_NAME(compute)
WASI_NN_ERROR_TYPE
WASI_NN_NAME(get_output)
(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index,
uint8_t *output_tensor, uint32_t output_tensor_max_size,
WASI_NN_NAME(tensor_data) output_tensor, uint32_t output_tensor_max_size,
uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output");
#else
WASI_NN_ERROR_TYPE
WASI_NN_NAME(get_output)
(graph_execution_context ctx, uint32_t index, uint8_t *output_tensor,
uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output");
(graph_execution_context ctx, uint32_t index,
WASI_NN_NAME(tensor_data) output_tensor, uint32_t *output_tensor_size)
WASI_NN_IMPORT("get_output");
#endif
#endif

View File

@ -99,14 +99,7 @@ typedef enum {
// 4-byte f32 elements would have a data array of length 16). Naturally, this
// representation requires some knowledge of how to lay out data in
// memory--e.g., using row-major ordering--and could perhaps be improved.
#if !defined(__wasm__) || WASM_ENABLE_WASI_EPHEMERAL_NN != 0
typedef struct {
uint8_t *buf;
uint32_t size;
} WASI_NN_NAME(tensor_data);
#else
typedef uint8_t *WASI_NN_NAME(tensor_data);
#endif
// A tensor.
typedef struct {

View File

@ -99,8 +99,7 @@ graph_builder_array_app_native(wasm_module_inst_t instance,
static wasi_nn_error
tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements,
tensor_wasm *input_tensor_wasm, void **data,
uint32_t *size)
tensor_wasm *input_tensor_wasm, tensor_data *data)
{
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
#define data_size input_tensor_wasm->data_size
@ -114,9 +113,8 @@ tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements,
NN_ERR_PRINTF("input_tensor_wasm->data_offset is invalid");
return invalid_argument;
}
*data = wasm_runtime_addr_app_to_native(
*data = (tensor_data)wasm_runtime_addr_app_to_native(
instance, (uint64)input_tensor_wasm->data_offset);
*size = data_size;
return success;
#undef data_size
}
@ -190,19 +188,16 @@ tensor_app_native(wasm_module_inst_t instance, tensor_wasm *input_tensor_wasm,
NN_DBG_PRINTF("Tensor type: %d", input_tensor_wasm->type);
NN_DBG_PRINTF("Total number of elements: %d", total_elements);
void *data = NULL;
uint32_t datasize;
tensor_data data = NULL;
if (success
!= (res =
tensor_data_app_native(instance, total_elements,
input_tensor_wasm, &data, &datasize))) {
!= (res = tensor_data_app_native(instance, total_elements,
input_tensor_wasm, &data))) {
wasm_runtime_free(dimensions);
return res;
}
input_tensor->type = input_tensor_wasm->type;
input_tensor->dimensions = dimensions;
input_tensor->data.buf = data;
input_tensor->data.size = datasize;
input_tensor->data = data;
return success;
}

View File

@ -20,10 +20,6 @@
#include "wasi_nn_types.h"
#include "wasm_export.h"
#if WASM_ENABLE_WASI_EPHEMERAL_NN == 0
#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.)
#endif
#define HASHMAP_INITIAL_SIZE 20
#if defined(__APPLE__)
#define LIB_EXTENTION ".dylib"
@ -55,21 +51,53 @@ struct backends_api_functions {
NN_ERR_PRINTF("Error %s() -> %d", #func, wasi_error); \
} while (0)
static void *wasi_nn_key;
/* HashMap utils */
static HashMap *hashmap;
static uint32
hash_func(const void *key)
{
// fnv1a_hash
const uint32 FNV_PRIME = 16777619;
const uint32 FNV_OFFSET_BASIS = 2166136261U;
uint32 hash = FNV_OFFSET_BASIS;
const unsigned char *bytes = (const unsigned char *)key;
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
hash ^= bytes[i];
hash *= FNV_PRIME;
}
return hash;
}
static bool
key_equal_func(void *key1, void *key2)
{
return key1 == key2;
}
static void
key_destroy_func(void *key1)
{
/* key type is wasm_module_inst_t*. do nothing */
}
static void
wasi_nn_ctx_destroy(WASINNContext *wasi_nn_ctx)
{
NN_DBG_PRINTF("[WASI NN] DEINIT...");
if (wasi_nn_ctx == NULL) {
NN_ERR_PRINTF(
"Error when deallocating memory. WASI-NN context is NULL");
return;
}
NN_DBG_PRINTF("[WASI NN] DEINIT...");
NN_DBG_PRINTF("Freeing wasi-nn");
NN_DBG_PRINTF("-> is_model_loaded: %d", wasi_nn_ctx->is_model_loaded);
NN_DBG_PRINTF("-> current_encoding: %d", wasi_nn_ctx->backend);
bh_assert(!wasi_nn_ctx->busy);
/* deinit() the backend */
if (wasi_nn_ctx->is_backend_ctx_initialized) {
wasi_nn_error res;
@ -77,14 +105,13 @@ wasi_nn_ctx_destroy(WASINNContext *wasi_nn_ctx)
wasi_nn_ctx->backend_ctx);
}
os_mutex_destroy(&wasi_nn_ctx->lock);
wasm_runtime_free(wasi_nn_ctx);
}
static void
dtor(wasm_module_inst_t inst, void *ctx)
value_destroy_func(void *value)
{
wasi_nn_ctx_destroy(ctx);
wasi_nn_ctx_destroy((WASINNContext *)value);
}
bool
@ -97,9 +124,12 @@ wasi_nn_initialize()
return false;
}
wasi_nn_key = wasm_runtime_create_context_key(dtor);
if (wasi_nn_key == NULL) {
NN_ERR_PRINTF("Failed to create context key");
// hashmap { instance: wasi_nn_ctx }
hashmap = bh_hash_map_create(HASHMAP_INITIAL_SIZE, true, hash_func,
key_equal_func, key_destroy_func,
value_destroy_func);
if (hashmap == NULL) {
NN_ERR_PRINTF("Error while initializing hashmap");
os_mutex_destroy(&wasi_nn_lock);
return false;
}
@ -120,11 +150,6 @@ wasi_nn_initialize_context()
}
memset(wasi_nn_ctx, 0, sizeof(WASINNContext));
if (os_mutex_init(&wasi_nn_ctx->lock)) {
NN_ERR_PRINTF("Error when initializing a lock for WASI-NN context");
wasm_runtime_free(wasi_nn_ctx);
return NULL;
}
return wasi_nn_ctx;
}
@ -133,59 +158,29 @@ static WASINNContext *
wasm_runtime_get_wasi_nn_ctx(wasm_module_inst_t instance)
{
WASINNContext *wasi_nn_ctx =
wasm_runtime_get_context(instance, wasi_nn_key);
(WASINNContext *)bh_hash_map_find(hashmap, (void *)instance);
if (wasi_nn_ctx == NULL) {
WASINNContext *newctx = wasi_nn_initialize_context();
if (newctx == NULL)
wasi_nn_ctx = wasi_nn_initialize_context();
if (wasi_nn_ctx == NULL)
return NULL;
bool ok =
bh_hash_map_insert(hashmap, (void *)instance, (void *)wasi_nn_ctx);
if (!ok) {
NN_ERR_PRINTF("Error while storing context");
wasi_nn_ctx_destroy(wasi_nn_ctx);
return NULL;
os_mutex_lock(&wasi_nn_lock);
wasi_nn_ctx = wasm_runtime_get_context(instance, wasi_nn_key);
if (wasi_nn_ctx == NULL) {
wasm_runtime_set_context_spread(instance, wasi_nn_key, newctx);
wasi_nn_ctx = newctx;
newctx = NULL;
}
os_mutex_unlock(&wasi_nn_lock);
if (newctx != NULL) {
wasi_nn_ctx_destroy(newctx);
}
}
return wasi_nn_ctx;
}
static WASINNContext *
lock_ctx(wasm_module_inst_t instance)
{
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
if (wasi_nn_ctx == NULL) {
return NULL;
}
os_mutex_lock(&wasi_nn_ctx->lock);
if (wasi_nn_ctx->busy) {
os_mutex_unlock(&wasi_nn_ctx->lock);
return NULL;
}
wasi_nn_ctx->busy = true;
os_mutex_unlock(&wasi_nn_ctx->lock);
return wasi_nn_ctx;
}
static void
unlock_ctx(WASINNContext *wasi_nn_ctx)
{
if (wasi_nn_ctx == NULL) {
return;
}
os_mutex_lock(&wasi_nn_ctx->lock);
bh_assert(wasi_nn_ctx->busy);
wasi_nn_ctx->busy = false;
os_mutex_unlock(&wasi_nn_ctx->lock);
}
void
wasi_nn_destroy()
{
wasm_runtime_destroy_context_key(wasi_nn_key);
// destroy hashmap will destroy keys and values
bh_hash_map_destroy(hashmap);
// close backends' libraries and registered functions
for (unsigned i = 0; i < sizeof(lookup) / sizeof(lookup[0]); i++) {
@ -406,7 +401,7 @@ detect_and_load_backend(graph_encoding backend_hint,
static wasi_nn_error
ensure_backend(wasm_module_inst_t instance, graph_encoding encoding,
WASINNContext *wasi_nn_ctx)
WASINNContext **wasi_nn_ctx_ptr)
{
wasi_nn_error res;
@ -417,6 +412,7 @@ ensure_backend(wasm_module_inst_t instance, graph_encoding encoding,
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
if (wasi_nn_ctx->is_backend_ctx_initialized) {
if (wasi_nn_ctx->backend != loaded_backend) {
res = unsupported_operation;
@ -434,6 +430,7 @@ ensure_backend(wasm_module_inst_t instance, graph_encoding encoding,
wasi_nn_ctx->is_backend_ctx_initialized = true;
}
*wasi_nn_ctx_ptr = wasi_nn_ctx;
return success;
fail:
return res;
@ -461,23 +458,17 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
if (!instance)
return runtime_error;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
graph_builder_array builder_native = { 0 };
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (success
!= (res = graph_builder_array_app_native(
instance, builder, builder_wasm_size, &builder_native)))
goto fail;
return res;
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
if (success
!= (res = graph_builder_array_app_native(instance, builder,
&builder_native)))
goto fail;
return res;
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
if (!wasm_runtime_validate_native_addr(instance, g,
@ -487,7 +478,8 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
goto fail;
}
res = ensure_backend(instance, encoding, wasi_nn_ctx);
WASINNContext *wasi_nn_ctx;
res = ensure_backend(instance, encoding, &wasi_nn_ctx);
if (res != success)
goto fail;
@ -502,7 +494,6 @@ fail:
// XXX: Free intermediate structure pointers
if (builder_native.buf)
wasm_runtime_free(builder_native.buf);
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -536,26 +527,18 @@ wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len,
NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME %s...", name);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
res = ensure_backend(instance, autodetect, wasi_nn_ctx);
WASINNContext *wasi_nn_ctx;
res = ensure_backend(instance, autodetect, &wasi_nn_ctx);
if (res != success)
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name, res,
wasi_nn_ctx->backend_ctx, name, name_len, g);
if (res != success)
goto fail;
return res;
wasi_nn_ctx->is_model_loaded = true;
res = success;
fail:
unlock_ctx(wasi_nn_ctx);
return res;
return success;
}
wasi_nn_error
@ -593,28 +576,19 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name,
NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME_WITH_CONFIG %s %s...", name, config);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
res = ensure_backend(instance, autodetect, wasi_nn_ctx);
WASINNContext *wasi_nn_ctx;
res = ensure_backend(instance, autodetect, &wasi_nn_ctx);
if (res != success)
goto fail;
;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name_with_config, res,
wasi_nn_ctx->backend_ctx, name, name_len, config,
config_len, g);
if (res != success)
goto fail;
return res;
wasi_nn_ctx->is_model_loaded = true;
res = success;
fail:
unlock_ctx(wasi_nn_ctx);
return res;
return success;
}
wasi_nn_error
@ -628,27 +602,20 @@ wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph g,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
if (!wasm_runtime_validate_native_addr(
instance, ctx, (uint64)sizeof(graph_execution_context))) {
NN_ERR_PRINTF("ctx is invalid");
res = invalid_argument;
goto fail;
return invalid_argument;
}
call_wasi_nn_func(wasi_nn_ctx->backend, init_execution_context, res,
wasi_nn_ctx->backend_ctx, g, ctx);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -663,21 +630,17 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
tensor input_tensor_native = { 0 };
if (success
!= (res = tensor_app_native(instance, input_tensor,
&input_tensor_native)))
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, set_input, res,
wasi_nn_ctx->backend_ctx, ctx, index,
@ -685,8 +648,7 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
// XXX: Free intermediate structure pointers
if (input_tensor_native.dimensions)
wasm_runtime_free(input_tensor_native.dimensions);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -700,32 +662,26 @@ wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx)
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, compute, res,
wasi_nn_ctx->backend_ctx, ctx);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
wasi_nn_error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, void *output_tensor,
uint32_t index, tensor_data output_tensor,
uint32_t output_tensor_len, uint32_t *output_tensor_size)
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
wasi_nn_error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, void *output_tensor,
uint32_t index, tensor_data output_tensor,
uint32_t *output_tensor_size)
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
{
@ -736,36 +692,28 @@ wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
if (!wasm_runtime_validate_native_addr(instance, output_tensor_size,
(uint64)sizeof(uint32_t))) {
NN_ERR_PRINTF("output_tensor_size is invalid");
res = invalid_argument;
goto fail;
return invalid_argument;
}
tensor_data tensor = {
.buf = output_tensor,
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
.size = output_tensor_len,
#else
.size = *output_tensor_size,
#endif
};
call_wasi_nn_func(wasi_nn_ctx->backend, get_output, res,
wasi_nn_ctx->backend_ctx, ctx, index, &tensor,
wasi_nn_ctx->backend_ctx, ctx, index, output_tensor,
&output_tensor_len);
*output_tensor_size = output_tensor_len;
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
call_wasi_nn_func(wasi_nn_ctx->backend, get_output, res,
wasi_nn_ctx->backend_ctx, ctx, index, output_tensor,
output_tensor_size);
fail:
unlock_ctx(wasi_nn_ctx);
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
return res;
}

View File

@ -2,10 +2,7 @@
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdlib.h>
#include "wasi_nn_backend.h"
#include "wasi_nn_types.h"
#include "utils/logger.h"
#include "llama.h"
#include "ggml.h"
@ -289,7 +286,7 @@ deinit_backend(void *ctx)
llama_backend_free();
free(backend_ctx);
os_free(backend_ctx);
return success;
}
@ -384,18 +381,18 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor *wasi_nn_tensor)
{
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
// tensor->data is the prompt string.
char *prompt_text = (char *)wasi_nn_tensor->data.buf;
uint32_t prompt_text_len = wasi_nn_tensor->data.size;
// tensor->data is the prompt string. ends with \0
char *prompt_text = (char *)wasi_nn_tensor->data;
#ifndef NDEBUG
NN_DBG_PRINTF("--------------------------------------------------");
NN_DBG_PRINTF("prompt_text: %.*s", (int)prompt_text_len, prompt_text);
NN_DBG_PRINTF("prompt_text: %s", prompt_text);
NN_DBG_PRINTF("--------------------------------------------------");
#endif
// tokenize the prompt
uint32_t n_token_max = llama_n_ctx(backend_ctx->ctx);
uint32_t prompt_text_len = strlen(prompt_text);
if (backend_ctx->prompt == NULL) {
backend_ctx->prompt = calloc(n_token_max, sizeof(llama_token));
@ -480,6 +477,7 @@ compute(void *ctx, graph_execution_context exec_ctx)
// main loop
int32_t n_cur = batch.n_tokens;
int n_decode = 0;
int32_t n_vocab = llama_n_vocab(backend_ctx->model);
llama_token_data *candidates = NULL;
@ -530,6 +528,7 @@ compute(void *ctx, graph_execution_context exec_ctx)
// push this new token for next evaluation
llama_batch_add(&batch, new_token_id, n_cur, seq_ids,
sizeof(seq_ids) / sizeof(seq_ids[0]), true);
n_decode++;
n_cur++;
if (llama_decode(backend_ctx->ctx, batch) != 0) {
@ -550,7 +549,7 @@ fail:
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
@ -569,7 +568,7 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
printf("%s\n", output_metadata);
}
memcpy(output_tensor->buf, output_metadata, strlen(output_metadata));
memcpy(output_tensor, output_metadata, strlen(output_metadata));
*output_tensor_size = strlen(output_metadata);
return success;
}
@ -589,7 +588,7 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
printf("%s", buf);
}
memcpy(output_tensor->buf + end_pos, buf, strlen(buf));
memcpy(output_tensor + end_pos, buf, strlen(buf));
end_pos += strlen(buf);
}

View File

@ -3,7 +3,8 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "wasi_nn_backend.h"
#include "wasi_nn_types.h"
#include "wasi_nn_openvino.h"
#include "utils/logger.h"
#include "bh_platform.h"
@ -25,25 +26,15 @@
* from 4. to 6. is the Inference Loop
*/
/* these limits are arbitrary. */
#define MAX_GRAPHS 4
#define MAX_EXECUTION_CONTEXTS 4
typedef struct {
ov_core_t *core;
/* keep input model files */
struct OpenVINOGraph {
void *weight_data;
ov_tensor_t *weights_tensor;
ov_model_t *model;
ov_compiled_model_t *compiled_model;
} graphs[MAX_GRAPHS];
struct OpenVINOExecutionContext {
struct OpenVINOGraph *graph;
ov_infer_request_t *infer_request;
} execution_contexts[MAX_EXECUTION_CONTEXTS];
unsigned int n_graphs;
unsigned int n_execution_contexts;
void *weight_data;
ov_tensor_t *weights_tensor;
ov_model_t *model;
ov_compiled_model_t *compiled_model;
ov_infer_request_t *infer_request;
ov_tensor_t *input_tensor;
} OpenVINOContext;
/*
@ -188,29 +179,6 @@ wasi_nn_tensor_type_to_openvino_element_type(tensor_type wasi_nn_type)
return UNDEFINED;
}
static void
free_graph(struct OpenVINOGraph *graph)
{
if (graph->weight_data)
os_free(graph->weight_data);
if (graph->weights_tensor)
ov_tensor_free(graph->weights_tensor);
if (graph->model)
ov_model_free(graph->model);
if (graph->compiled_model)
ov_compiled_model_free(graph->compiled_model);
}
static void
free_execution_context(struct OpenVINOExecutionContext *c)
{
if (c->infer_request)
ov_infer_request_free(c->infer_request);
}
static wasi_nn_error
uint32_array_to_int64_array(uint32_t array_size, uint32_t *src, int64_t **dst)
{
@ -230,8 +198,6 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
unsigned int graph_idx;
wasi_nn_error ret = unsupported_operation;
if (encoding != openvino) {
@ -257,47 +223,33 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
graph_builder xml = builder->buf[0];
graph_builder weight = builder->buf[1];
graph_idx = ov_ctx->n_graphs;
if (graph_idx >= MAX_GRAPHS) {
return runtime_error;
}
graph = &ov_ctx->graphs[graph_idx];
memset(graph, 0, sizeof(*graph));
/* transfer weight to an ov tensor */
{
graph->weight_data = os_malloc(weight.size);
if (!graph->weight_data)
ov_ctx->weight_data = os_malloc(weight.size);
if (!ov_ctx->weight_data)
goto fail;
memcpy(graph->weight_data, weight.buf, weight.size);
memcpy(ov_ctx->weight_data, weight.buf, weight.size);
ov_element_type_e type = U8;
int64_t dims[1] = { weight.size };
ov_shape_t shape = { 1, dims };
CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(type, shape,
graph->weight_data,
&graph->weights_tensor),
ov_ctx->weight_data,
&ov_ctx->weights_tensor),
ret);
}
/* load model from buffer */
CHECK_OV_STATUS(ov_core_read_model_from_memory_buffer(
ov_ctx->core, (char *)xml.buf, xml.size,
graph->weights_tensor, &graph->model),
ov_ctx->weights_tensor, &ov_ctx->model),
ret);
#ifndef NDEBUG
print_model_input_output_info(graph->model);
print_model_input_output_info(ov_ctx->model);
#endif
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0,
&graph->compiled_model),
ret);
*g = graph_idx;
ov_ctx->n_graphs++;
return success;
ret = success;
fail:
free_graph(graph);
return ret;
}
@ -305,62 +257,20 @@ __attribute__((visibility("default"))) wasi_nn_error
load_by_name(void *ctx, const char *filename, uint32_t filename_len, graph *g)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
unsigned int graph_idx;
wasi_nn_error ret = unsupported_operation;
graph_idx = ov_ctx->n_graphs;
if (graph_idx >= MAX_GRAPHS) {
return runtime_error;
}
graph = &ov_ctx->graphs[graph_idx];
memset(graph, 0, sizeof(*graph));
CHECK_OV_STATUS(
ov_core_read_model(ov_ctx->core, filename, NULL, &graph->model), ret);
ov_core_read_model(ov_ctx->core, filename, NULL, &ov_ctx->model), ret);
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0,
&graph->compiled_model),
ret);
*g = graph_idx;
ov_ctx->n_graphs++;
return success;
ret = success;
fail:
free_graph(graph);
return ret;
}
__attribute__((visibility("default"))) wasi_nn_error
init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
struct OpenVINOExecutionContext *exec;
unsigned int exec_idx;
wasi_nn_error ret;
if (g >= ov_ctx->n_graphs)
return runtime_error;
graph = &ov_ctx->graphs[g];
exec_idx = ov_ctx->n_execution_contexts;
if (exec_idx >= MAX_EXECUTION_CONTEXTS)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_idx];
memset(exec, 0, sizeof(*exec));
exec->graph = graph;
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
graph->compiled_model, &exec->infer_request),
ret);
*exec_ctx = exec_idx;
ov_ctx->n_execution_contexts++;
return success;
fail:
return ret;
}
__attribute__((visibility("default"))) wasi_nn_error
@ -368,16 +278,10 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor *wasi_nn_tensor)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
ov_shape_t input_shape = { 0 };
ov_tensor_t *input_tensor = NULL;
int64_t *ov_dims = NULL;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
/* wasi_nn_tensor -> ov_tensor */
{
ret = uint32_array_to_int64_array(wasi_nn_tensor->dimensions->size,
@ -401,21 +305,28 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
shape_info);
CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(input_type, input_shape,
wasi_nn_tensor->data.buf,
&input_tensor),
wasi_nn_tensor->data,
&ov_ctx->input_tensor),
ret);
}
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, ov_ctx->model, "CPU", 0,
&ov_ctx->compiled_model),
ret);
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
ov_ctx->compiled_model, &ov_ctx->infer_request),
ret);
/* install ov_tensor -> infer_request */
CHECK_OV_STATUS(ov_infer_request_set_input_tensor_by_index(
exec->infer_request, index, input_tensor),
ov_ctx->infer_request, index, ov_ctx->input_tensor),
ret);
ret = success;
fail:
if (ov_dims)
os_free(ov_dims);
if (input_tensor)
ov_tensor_free(input_tensor);
ov_shape_free(&input_shape);
return ret;
@ -425,14 +336,9 @@ __attribute__((visibility("default"))) wasi_nn_error
compute(void *ctx, graph_execution_context exec_ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
CHECK_OV_STATUS(ov_infer_request_infer(exec->infer_request), ret);
CHECK_OV_STATUS(ov_infer_request_infer(ov_ctx->infer_request), ret);
ret = success;
fail:
return ret;
@ -440,33 +346,28 @@ fail:
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
ov_tensor_t *ov_tensor = NULL;
void *data = NULL;
size_t byte_size = 0;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
CHECK_OV_STATUS(ov_infer_request_get_output_tensor_by_index(
exec->infer_request, index, &ov_tensor),
ov_ctx->infer_request, index, &ov_tensor),
ret);
CHECK_OV_STATUS(ov_tensor_get_byte_size(ov_tensor, &byte_size), ret);
if (byte_size > output_tensor->size) {
if (byte_size > *output_tensor_size) {
ret = too_large;
goto fail;
}
CHECK_OV_STATUS(ov_tensor_data(ov_tensor, &data), ret);
memcpy(output_tensor->buf, data, byte_size);
memcpy(output_tensor, data, byte_size);
*output_tensor_size = (uint32_t)byte_size;
@ -520,16 +421,27 @@ __attribute__((visibility("default"))) wasi_nn_error
deinit_backend(void *ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
unsigned int i;
if (!ov_ctx)
return invalid_argument;
for (i = 0; i < ov_ctx->n_execution_contexts; i++)
free_execution_context(&ov_ctx->execution_contexts[i]);
if (ov_ctx->weight_data)
os_free(ov_ctx->weight_data);
for (i = 0; i < ov_ctx->n_graphs; i++)
free_graph(&ov_ctx->graphs[i]);
if (ov_ctx->weights_tensor)
ov_tensor_free(ov_ctx->weights_tensor);
if (ov_ctx->input_tensor)
ov_tensor_free(ov_ctx->input_tensor);
if (ov_ctx->infer_request)
ov_infer_request_free(ov_ctx->infer_request);
if (ov_ctx->compiled_model)
ov_compiled_model_free(ov_ctx->compiled_model);
if (ov_ctx->model)
ov_model_free(ov_ctx->model);
if (ov_ctx->core)
ov_core_free(ov_ctx->core);

View File

@ -3,26 +3,15 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_BACKEND_H
#define WASI_NN_BACKEND_H
#ifndef WASI_NN_OPENVINO_HPP
#define WASI_NN_OPENVINO_HPP
#include "wasi_nn_types.h"
#ifdef __cplusplus
extern "C" {
#endif
__attribute__((visibility("default"))) wasi_nn_error
load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g);
__attribute__((visibility("default"))) wasi_nn_error
load_by_name(void *tflite_ctx, const char *name, uint32_t namelen, graph *g);
__attribute__((visibility("default"))) wasi_nn_error
load_by_name_with_config(void *ctx, const char *name, uint32_t namelen,
const char *config, uint32_t config_len, graph *g);
__attribute__((visibility("default"))) wasi_nn_error
init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx);
@ -35,7 +24,7 @@ compute(void *ctx, graph_execution_context exec_ctx);
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size);
tensor_data output_tensor, uint32_t *output_tensor_size);
__attribute__((visibility("default"))) wasi_nn_error
init_backend(void **ctx);
@ -43,8 +32,4 @@ init_backend(void **ctx);
__attribute__((visibility("default"))) wasi_nn_error
deinit_backend(void *ctx);
#ifdef __cplusplus
}
#endif
#endif /* WASI_NN_BACKEND_H */
#endif /* WASI_NN_OPENVINO_HPP */

View File

@ -9,11 +9,7 @@
#include "wasi_nn_types.h"
#include "wasm_export.h"
#include "bh_platform.h"
typedef struct {
korp_mutex lock;
bool busy;
bool is_backend_ctx_initialized;
bool is_model_loaded;
graph_encoding backend;
@ -32,7 +28,7 @@ typedef wasi_nn_error (*SET_INPUT)(void *, graph_execution_context, uint32_t,
tensor *);
typedef wasi_nn_error (*COMPUTE)(void *, graph_execution_context);
typedef wasi_nn_error (*GET_OUTPUT)(void *, graph_execution_context, uint32_t,
tensor_data *, uint32_t *);
tensor_data, uint32_t *);
/* wasi-nn general APIs */
typedef wasi_nn_error (*BACKEND_INITIALIZE)(void **);
typedef wasi_nn_error (*BACKEND_DEINITIALIZE)(void *);

View File

@ -3,10 +3,11 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "wasi_nn_tensorflowlite.hpp"
#include "utils/logger.h"
#include "bh_platform.h"
#include "wasi_nn_backend.h"
#include "wasi_nn_types.h"
#include "wasm_export.h"
#include <tensorflow/lite/interpreter.h>
@ -280,11 +281,6 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
{
TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx;
if (input_tensor->type != fp32) {
NN_ERR_PRINTF("unsupported input tensor type %u", input_tensor->type);
return runtime_error;
}
wasi_nn_error res;
if (success != (res = is_valid_graph_execution_context(tfl_ctx, ctx)))
return res;
@ -323,7 +319,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
index);
int size = model_tensor_size * sizeof(float);
bh_memcpy_s(it, size, input_tensor->data.buf, size);
bh_memcpy_s(it, size, input_tensor->data, size);
}
else { // TODO: Assuming uint8 quantized networks.
TfLiteAffineQuantization *quant_info =
@ -341,7 +337,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_DBG_PRINTF("input tensor: (scale, offset) = (%f, %f)", scale,
zero_point);
float *input_tensor_f = (float *)input_tensor->data.buf;
float *input_tensor_f = (float *)input_tensor->data;
for (uint32_t i = 0; i < model_tensor_size; ++i) {
it[i] = (uint8_t)(input_tensor_f[i] / scale + zero_point);
}
@ -365,7 +361,7 @@ compute(void *tflite_ctx, graph_execution_context ctx)
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx;
@ -388,34 +384,23 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
return too_large;
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
if (*output_tensor_size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
if (tensor->quantization.type == kTfLiteNoQuantization) {
NN_DBG_PRINTF("No quantization information");
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (output_tensor->size < tensor->bytes) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
if (output_tensor->size < tensor->bytes / sizeof(float)) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#endif
bh_memcpy_s(output_tensor->buf, output_tensor->size, tensor->data.data,
tensor->bytes);
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
*output_tensor_size = tensor->bytes;
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
*output_tensor_size = tensor->bytes / sizeof(float);
#endif
float *ot =
tfl_ctx->interpreters[ctx].interpreter->typed_output_tensor<float>(
index);
int size = model_tensor_size * sizeof(float);
bh_memcpy_s(output_tensor, size, ot, size);
}
else { // TODO: Assuming uint8 quantized networks.
TfLiteAffineQuantization *quant_info =
@ -424,27 +409,6 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_ERR_PRINTF("Quantization per channel is not supported");
return runtime_error;
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (output_tensor->size / sizeof(float) < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
if (output_tensor->size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#endif
uint8_t *ot = tfl_ctx->interpreters[ctx]
.interpreter->typed_output_tensor<uint8_t>(index);
@ -453,22 +417,13 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_DBG_PRINTF("output tensor: (scale, offset) = (%f, %f)", scale,
zero_point);
float *output_tensor_f = (float *)output_tensor->buf;
float *output_tensor_f = (float *)output_tensor;
for (uint32_t i = 0; i < model_tensor_size; ++i) {
output_tensor_f[i] = (ot[i] - zero_point) * scale;
}
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
*output_tensor_size = model_tensor_size * sizeof(float);
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
*output_tensor_size = model_tensor_size;
#endif
}
*output_tensor_size = model_tensor_size;
return success;
}

View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_TENSORFLOWLITE_HPP
#define WASI_NN_TENSORFLOWLITE_HPP
#include "wasi_nn_types.h"
#ifdef __cplusplus
extern "C" {
#endif
__attribute__((visibility("default"))) wasi_nn_error
load(void *tflite_ctx, graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g);
__attribute__((visibility("default"))) wasi_nn_error
load_by_name(void *tflite_ctx, const char *filename, uint32_t filename_len,
graph *g);
__attribute__((visibility("default"))) wasi_nn_error
init_execution_context(void *tflite_ctx, graph g, graph_execution_context *ctx);
__attribute__((visibility("default"))) wasi_nn_error
set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
tensor *input_tensor);
__attribute__((visibility("default"))) wasi_nn_error
compute(void *tflite_ctx, graph_execution_context ctx);
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size);
__attribute__((visibility("default"))) wasi_nn_error
init_backend(void **tflite_ctx);
__attribute__((visibility("default"))) wasi_nn_error
deinit_backend(void *tflite_ctx);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -3,17 +3,6 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# on intel mac, this ends up with a lot of the following error.
#
# AttributeError: 'Sequential' object has no attribute '_get_save_spec'.
#
# * "pip install tensorflow" installs tensorflow 2.16.2 on intel mac.
# (because it's the last version before tf deprecated the target.)
# * keras 3 support in the version seems incomplete (thus the error)
# * a workaround: use keras 2 as mentioned in:
# https://github.com/tensorflow/tensorflow/releases/tag/v2.16.1
# https://blog.tensorflow.org/2024/03/whats-new-in-tensorflow-216.html
CURR_PATH=$(cd $(dirname $0) && pwd -P)
# WASM application that uses WASI-NN

View File

@ -3,7 +3,7 @@
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import AveragePooling2D, Conv2D
from keras.layers import AveragePooling2D, Conv2D
from tensorflow.keras import Input, Model

View File

@ -35,8 +35,8 @@ extend_vector(Vector *vector, size_t length)
if (length <= vector->max_elems)
return true;
if (length < vector->max_elems * 3 / 2)
length = vector->max_elems * 3 / 2;
if (length < vector->size_elem * 3 / 2)
length = vector->size_elem * 3 / 2;
if (!(data = alloc_vector_data(length, vector->size_elem))) {
return false;
@ -194,12 +194,12 @@ bh_vector_append(Vector *vector, const void *elem_buf)
goto just_return;
}
/* make sure one more slot is used by the thread who allocates it */
/* make sure one more slot is used by the thread who allocas it */
if (vector->lock)
os_mutex_lock(vector->lock);
if (!extend_vector(vector, vector->num_elems + 1)) {
LOG_ERROR("Append vector elem failed: extend vector failed.\n");
LOG_ERROR("Append ector elem failed: extend vector failed.\n");
goto unlock_return;
}

View File

@ -102,7 +102,6 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
### **Enable lib wasi-nn**
- **WAMR_BUILD_WASI_NN**=1/0, default to disable if not set
> Note: WAMR_BUILD_WASI_NN without WAMR_BUILD_WASI_EPHEMERAL_NN is deprecated and will likely be removed in future versions of WAMR. Please consider to enable WAMR_BUILD_WASI_EPHEMERAL_NN as well.
> Note: See [WASI-NN](../core/iwasm/libraries/wasi-nn) for more details.
### **Enable lib wasi-nn GPU mode**
@ -114,7 +113,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
- **WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH**=Path to the external delegate shared library (e.g. `libedgetpu.so.1.0` for Coral USB)
### **Enable lib wasi-nn with `wasi_ephemeral_nn` module support**
- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to enable if not set
- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to disable if not set
### **Disable boundary check with hardware trap**
- **WAMR_DISABLE_HW_BOUND_CHECK**=1/0, default to enable if not set and supported by platform
@ -361,4 +360,4 @@ For Valgrind, begin with the following configurations and add additional ones as
-DWAMR_DISABLE_HW_BOUND_CHECK=0 \
-DWAMR_DISABLE_WRITE_GS_BASE=0
#...
```
```

View File

@ -4,22 +4,9 @@
# Refer to https://docs.zephyrproject.org/3.7.0/develop/getting_started/index.html
# for more information on how to set up the Zephyr development environment.
# https://docs.zephyrproject.org/latest/develop/application/index.html#zephyr-workspace-application
# zephyrproject/ --> CI ROOT
# ├─── .west/
# │ └─── config
# ├─── bootloader/
# ├─── zephyr/ --> Zephyr source code
# ├─── zephyr-sdk/
# ├─── modules/
# │ |─── wasm-micro-runtime --> WAMR source code
# ├─── tools/
# ├─── vendor/
# └─── application/ --> DUMMY. keep west_lite.yml here
# If you modify this file, you may need to sync the modifications to the
# .github/actions/setup-zephyr/action.yml
FROM ghcr.io/zephyrproject-rtos/ci-base:v0.26-branch
FROM ubuntu:22.04
ARG DEBIAN_FRONTEND=noninteractive
ENV TZ=Asian/Shanghai
@ -27,30 +14,44 @@ ARG ZEPHYR_SDK_VERSION=0.16.9
# In west_lite.yml, the Zephyr version is set to v3.7.0
#ARG ZEPHYR_VERSION=3.7.0
# Install dependencies for Zephyr
# hadolint ignore=DL3008
RUN apt-get update && apt-get install -y --no-install-recommends git cmake ninja-build gperf \
ccache dfu-util device-tree-compiler wget \
python3-dev python3-pip python3-setuptools python3-tk python3-wheel xz-utils file \
make gcc gcc-multilib g++-multilib libsdl2-dev libmagic1 \
&& apt-get clean -y && rm -rf /var/lib/apt/lists/*
# Install the Zephyr Software Development Kit (SDK)
WORKDIR /root/zephyrproject/zephyr-sdk
WORKDIR /opt
# hadolint ignore=DL4006
RUN wget --progress=dot:giga https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v${ZEPHYR_SDK_VERSION}/zephyr-sdk-${ZEPHYR_SDK_VERSION}_linux-x86_64.tar.xz \
&& wget --progress=dot:giga -O - https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v${ZEPHYR_SDK_VERSION}/sha256.sum | shasum --check --ignore-missing \
&& tar --strip-components=1 -xf zephyr-sdk-${ZEPHYR_SDK_VERSION}_linux-x86_64.tar.xz && rm zephyr-sdk-${ZEPHYR_SDK_VERSION}_linux-x86_64.tar.xz
&& tar xf zephyr-sdk-${ZEPHYR_SDK_VERSION}_linux-x86_64.tar.xz && rm zephyr-sdk-${ZEPHYR_SDK_VERSION}_linux-x86_64.tar.xz
WORKDIR /opt/zephyr-sdk-${ZEPHYR_SDK_VERSION}
# hadolint ignore=DL4006
# Install host tools and Register Zephyr SDK CMake package
RUN ./setup.sh -h -c
# Install west
# hadolint ignore=DL3013,DL3059
RUN pip3 install --no-cache-dir west
# Get Zephyr
WORKDIR /root/zephyrproject/smoke-test
# Setup a T2(Star topology) workspace
WORKDIR /root/zephyrproject/application
COPY ./west_lite.yml ./west_lite.yml
# hadolint ignore=DL3013
RUN pip3 install --no-cache-dir west
COPY ./west_lite.yml ./west.yml
# init the west workspace with a minimal manifest
RUN west init -l --mf west_lite.yml .
RUN west init -l
WORKDIR /root/zephyrproject
RUN west update --stats
WORKDIR /root/zephyrproject/modules/zephyr
RUN west zephyr-export && pip install --no-cache-dir -r ./scripts/requirements.txt
ENV ZEPHYR_BASE="/root/zephyrproject/modules/zephyr"
# Git clone wamr
WORKDIR /root/zephyrproject/modules/
RUN git clone https://github.com/bytecodealliance/wasm-micro-runtime.git wasm-micro-runtime

View File

@ -8,8 +8,8 @@ manifest:
url: https://github.com/zephyrproject-rtos/zephyr
revision: v3.7.0
clone-depth: 1
path: zephyr
path: modules/zephyr
west-commands: scripts/west-commands.yml
self:
path: application
path: smoke-test

View File

@ -72,7 +72,7 @@ def to_json(inst, cls):
class Fuzzing(db.Model):
__tablename__ = 'fuzzing_task'
__tablename__ = 'fazzing_task'
id = db.Column(db.Integer, autoincrement=True,
primary_key=True, nullable=False)
repo = db.Column(db.String(200), nullable=False, default='')
@ -96,7 +96,7 @@ class TaskError(db.Model):
__tablename__ = 'task_error'
id = db.Column(db.Integer, autoincrement=True,
primary_key=True, nullable=False)
fuzzing_id = db.Column(db.Integer, db.ForeignKey("fuzzing_task.id"))
fazzing_id = db.Column(db.Integer, db.ForeignKey("fazzing_task.id"))
name = db.Column(db.String(200), nullable=False, default='')
std_out = db.Column(db.Text, default='')
data = db.Column(db.JSON)
@ -119,9 +119,9 @@ def to_data(data):
def error_count(data):
error = len(TaskError.query.filter(
TaskError.fuzzing_id == data.get('id'), TaskError.status.in_([1, 2])).all())
TaskError.fazzing_id == data.get('id'), TaskError.status.in_([1, 2])).all())
end_error = len(TaskError.query.filter(
TaskError.fuzzing_id == data.get('id'), TaskError.status == 0).all())
TaskError.fazzing_id == data.get('id'), TaskError.status == 0).all())
data['error'] = error
data['end_error'] = end_error
return data
@ -159,11 +159,11 @@ def show_fuzz_list():
id = data.get('id')
if id:
all_error = TaskError.query.filter(
TaskError.fuzzing_id == id).with_entities(TaskError.id, TaskError.fuzzing_id,
TaskError.fazzing_id == id).with_entities(TaskError.id, TaskError.fazzing_id,
TaskError.create_time, TaskError.data,
TaskError.name, TaskError.status,
TaskError.update_time, TaskError.comment).order_by(TaskError.status.desc(), TaskError.update_time.desc(), TaskError.id.desc()).all()
data_message = [{'id': error['id'], "fuzzing_id": error['fuzzing_id'],
data_message = [{'id': error['id'], "fuzzing_id": error['fazzing_id'],
"name": error['name'], "data": error['data'],
'create_time': error['create_time'].strftime('%Y-%m-%d %H:%M:%S'),
'update_time': error['update_time'].strftime('%Y-%m-%d %H:%M:%S'),
@ -204,7 +204,7 @@ def New_fuzzing():
# curd.set_error_status_to(list(map(lambda x: x.id, error_list)), db)
# Fuzzing.query.filter_by(id=fuzz.id).delete()
fuzz.data = {'error': "Clone repo Error"}
db.session.commit()
db.commit()
return jsonify({"status": 0, "result": "", "msg": "Clone repo Error"})
wamr_path_parent = fuzz_dir.parent.parent
@ -277,7 +277,7 @@ def scheduler_run_task():
for fuzz in fuzz_query:
all_error = TaskError.query.filter(
TaskError.fuzzing_id == fuzz.id).with_entities(TaskError.name).all()
TaskError.fazzing_id == fuzz.id).with_entities(TaskError.name).all()
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{fuzz.id}'
dir_list = filter(lambda x: x.startswith(
@ -287,7 +287,7 @@ def scheduler_run_task():
for dir in dir_list:
cmd = f'cd {fuzz_cmd} && ./wasm_mutator_fuzz {dir}'
status, resp = getstatusoutput(cmd)
task_error = TaskError(name=dir, std_out=resp, fuzzing_id=fuzz.id,
task_error = TaskError(name=dir, std_out=resp, fazzing_id=fuzz.id,
create_time=datetime.utcnow() + timedelta(hours=8))
db.session.add(task_error)
db.session.commit()
@ -312,7 +312,7 @@ def get_error_txt():
return jsonify({"status": 0, "results": [], 'msg': "Error"})
error = TaskError.query.get(id)
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{error.fuzzing_id}'
'workspace' / f'build_{error.fazzing_id}'
file_cmd = fuzz_cmd / error.name
response = send_file(file_cmd, as_attachment=True,
@ -351,7 +351,7 @@ def get_cases_zip():
with ZipFile(memory_file, "w", ZIP_DEFLATED) as zf:
for task_error in task_query:
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{task_error.fuzzing_id}'
'workspace' / f'build_{task_error.fazzing_id}'
file_cmd = fuzz_cmd / task_error.name
zf.write(str(file_cmd), arcname=task_error.name)
memory_file.seek(0)
@ -399,7 +399,7 @@ def error_restart():
if run_status:
return jsonify({"status": 0, "results": [], 'msg': "There are already tasks in progress"})
task_query = TaskError.query.filter(TaskError.id.in_(id_list)).all()
fuzzing_id = task_query[0].fuzzing_id
fuzzing_id = task_query[0].fazzing_id
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{fuzzing_id}'
restart_cmd = wasm_mutator_dir / \
@ -412,7 +412,7 @@ def error_restart():
if not Path(restart_cmd / 'wamr').exists():
print('------ error: clone repo not folder exists ------')
# fuzz.data = {'error': "Clone repo Error"}
db.session.commit()
db.commit()
return jsonify({"status": 0, "result": "", "msg": "Clone repo Error"})
wamr_path_parent = fuzz_dir.parent.parent
wamr_path = wamr_path_parent / 'wamr'

View File

@ -17,7 +17,7 @@ git apply ../../../wamr-test-suites/spec-test-script/gc_ignore_cases.patch
# Set OCaml compiler environment
eval $(opam config env)
echo "compile the reference interpreter"
echo "compile the reference intepreter"
pushd interpreter
make
popd
popd

View File

@ -9,7 +9,7 @@ import os
from collections import OrderedDict
def CLI_ARGS_GENERATOR(running_modes_supported: list[str]) -> list[str]:
def CLI_ARGS_GENREATOR(running_modes_supported: list[str]) -> list[str]:
res = []
list_2d = [["--default-running-mode={} --module-running-mode={}".format(i, j)
for i in running_modes_supported] for j in running_modes_supported]
@ -35,16 +35,16 @@ def main():
]
# Python 3.7+: Dictionary iteration order is guaranteed to be in order of insertion.
# just to be safe, using OrderedDict
# just to be safe, using orderreddict
# key: value -> compile mode, {"compile_flag": CMake compile flag, "iwasm_cli_args": array of CLI args tested}
test_options = OrderedDict({
"INTERP": {"compile_flag": COMPILE_FLAGS[0], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:1])},
"FAST_JIT": {"compile_flag": COMPILE_FLAGS[1], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:2])},
"INTERP": {"compile_flag": COMPILE_FLAGS[0], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:1])},
"FAST_JIT": {"compile_flag": COMPILE_FLAGS[1], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:2])},
"LLVM_JIT": {"compile_flag": COMPILE_FLAGS[2],
"cli_args": CLI_ARGS_GENERATOR([RUNNING_MODES[0], RUNNING_MODES[2]])},
"MULTI_TIER_JIT": {"compile_flag": COMPILE_FLAGS[3], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES)},
"cli_args": CLI_ARGS_GENREATOR([RUNNING_MODES[0], RUNNING_MODES[2]])},
"MULTI_TIER_JIT": {"compile_flag": COMPILE_FLAGS[3], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES)},
"EAGER_JIT_WITH_BOTH_JIT": {"compile_flag": COMPILE_FLAGS[4],
"cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:3])}
"cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:3])}
})
build_cmd = "./build_c_embed.sh \"{build_flag}\""

View File

@ -29,7 +29,7 @@ def main():
]
# Python 3.7+: Dictionary iteration order is guaranteed to be in order of insertion.
# just to be safe, using OrderedDict
# just to be safe, using orderreddict
# key: value -> compile mode, {"compile_flag": CMake compile flag, "iwasm_cli_args": array of CLI args tested}
test_options = OrderedDict({
"INTERP": {"compile_flag": COMPILE_FLAGS[0], "iwasm_cli_args": IWASM_CLI_ARGS[:1]},

View File

@ -31,7 +31,7 @@ class memory64_atomic_test_suite : public testing::TestWithParam<RunningMode>
return true;
fail:
if (module)
if (!module)
wasm_runtime_unload(module);
return false;
@ -56,8 +56,6 @@ class memory64_atomic_test_suite : public testing::TestWithParam<RunningMode>
if (exec_env)
wasm_runtime_destroy_exec_env(exec_env);
if (module_inst)
wasm_runtime_deinstantiate(module_inst);
if (module)
wasm_runtime_unload(module);
return false;
}

View File

@ -31,7 +31,7 @@ class memory64_test_suite : public testing::TestWithParam<RunningMode>
return true;
fail:
if (module)
if (!module)
wasm_runtime_unload(module);
return false;
@ -56,13 +56,11 @@ class memory64_test_suite : public testing::TestWithParam<RunningMode>
if (exec_env)
wasm_runtime_destroy_exec_env(exec_env);
if (module_inst)
wasm_runtime_deinstantiate(module_inst);
if (module)
wasm_runtime_unload(module);
return false;
}
void destroy_exec_env()
void destory_exec_env()
{
wasm_runtime_destroy_exec_env(exec_env);
wasm_runtime_deinstantiate(module_inst);
@ -203,7 +201,7 @@ TEST_P(memory64_test_suite, memory_8GB)
i64 = 0xbeefdead;
ASSERT_EQ(i64, GET_U64_FROM_ADDR(wasm_argv));
destroy_exec_env();
destory_exec_env();
}
TEST_P(memory64_test_suite, mem64_from_clang)
@ -230,7 +228,7 @@ TEST_P(memory64_test_suite, mem64_from_clang)
i32 = 0x109;
ASSERT_EQ(i32, wasm_argv[0]);
destroy_exec_env();
destory_exec_env();
}
INSTANTIATE_TEST_CASE_P(RunningMode, memory64_test_suite,

View File

@ -21,7 +21,7 @@ std::string TEST_WASM1 = "/hello.wasm";
std::string TEST_WASM2 = "/mytest.wasm";
char *WASM_FILE_1;
char *WASM_FILE_2;
std::vector<RunningMode> running_mode_supported = { Mode_Interp,
std::vector<RunningMode> running_mode_supportted = { Mode_Interp,
#if WASM_ENABLE_FAST_JIT != 0
Mode_Fast_JIT,
#endif
@ -76,7 +76,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
return true;
fail:
if (module)
if (!module)
wasm_runtime_unload(module);
return false;
@ -101,13 +101,11 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
if (exec_env)
wasm_runtime_destroy_exec_env(exec_env);
if (module_inst)
wasm_runtime_deinstantiate(module_inst);
if (module)
wasm_runtime_unload(module);
return false;
}
void destroy_exec_env()
void destory_exec_env()
{
wasm_runtime_destroy_exec_env(exec_env);
wasm_runtime_deinstantiate(module_inst);
@ -141,7 +139,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
ASSERT_TRUE(ret);
ASSERT_EQ(10, wasm_argv[0]);
destroy_exec_env();
destory_exec_env();
}
void run_wasm_complex(char *filename1, char *filename2,
@ -170,7 +168,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
ASSERT_TRUE(ret);
ASSERT_EQ(10, wasm_argv[0]);
destroy_exec_env();
destory_exec_env();
/* run wasm file 2 in running_mode */
ret = load_wasm_file(filename2);
@ -186,7 +184,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam<RunningMode>
ret = wasm_runtime_call_wasm(exec_env, main, 2, wasm_argv);
ASSERT_TRUE(ret);
destroy_exec_env();
destory_exec_env();
}
public:
@ -248,7 +246,7 @@ TEST_F(wasm_running_modes_test_suite, wasm_runtime_is_running_mode_supported)
// normal situation
ASSERT_EQ(true, wasm_runtime_is_running_mode_supported(
static_cast<RunningMode>(Mode_Default)));
for (auto running_mode : running_mode_supported) {
for (auto running_mode : running_mode_supportted) {
ASSERT_EQ(true, wasm_runtime_is_running_mode_supported(running_mode));
}
@ -266,7 +264,7 @@ TEST_F(wasm_running_modes_test_suite, wasm_runtime_set_default_running_mode)
// normal situation: only set up
ASSERT_EQ(true, wasm_runtime_set_default_running_mode(
static_cast<RunningMode>(Mode_Default)));
for (auto running_mode : running_mode_supported) {
for (auto running_mode : running_mode_supportted) {
ASSERT_EQ(true, wasm_runtime_set_default_running_mode(running_mode));
}
@ -298,13 +296,13 @@ TEST_P(wasm_running_modes_test_suite,
wasm_runtime_set_and_get_running_mode_complex)
{
RunningMode default_running_mode = GetParam();
for (auto running_mode : running_mode_supported) {
for (auto running_mode : running_mode_supportted) {
run_wasm_complex(WASM_FILE_1, WASM_FILE_2, default_running_mode,
running_mode);
}
}
INSTANTIATE_TEST_CASE_P(RunningMode, wasm_running_modes_test_suite,
testing::ValuesIn(running_mode_supported));
testing::ValuesIn(running_mode_supportted));
}
}

View File

@ -362,31 +362,31 @@ function sightglass_test()
function setup_wabt()
{
# please sync with .github/actions/install-wasi-sdk-wabt/action.yml
case ${PLATFORM} in
cosmopolitan)
;;
linux)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz
WABT_VERSION=1.0.37
;;
darwin)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz
WABT_VERSION=1.0.36
;;
windows)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-windows.tar.gz
WABT_VERSION=1.0.37
;;
*)
echo "wabt platform for ${PLATFORM} in unknown"
exit 1
;;
esac
if [ ${WABT_BINARY_RELEASE} == "YES" ]; then
echo "download a binary release and install"
local WAT2WASM=${WORK_DIR}/wabt/out/gcc/Release/wat2wasm
if [ ! -f ${WAT2WASM} ]; then
case ${PLATFORM} in
cosmopolitan)
;;
linux)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz
WABT_VERSION=1.0.37
;;
darwin)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz
WABT_VERSION=1.0.36
;;
windows)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-windows.tar.gz
WABT_VERSION=1.0.37
;;
*)
echo "wabt platform for ${PLATFORM} in unknown"
exit 1
;;
esac
pushd /tmp
wget -O wabt-tar.gz --progress=dot:giga ${WABT_URL}
tar xf wabt-tar.gz
@ -414,7 +414,7 @@ function setup_wabt()
function compile_reference_interpreter()
{
echo "compile the reference interpreter"
echo "compile the reference intepreter"
pushd interpreter
make
if [ $? -ne 0 ]

View File

@ -1,15 +0,0 @@
#! /bin/sh
# Copyright (C) 2025 Midokura Japan KK. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
PREFIX=${1:-/tmp/wamr}
WASI_SDK=${WASI_SDK:-/opt/wasi-sdk}
cmake -B build-lib \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
.
cmake --build build-lib -t install

View File

@ -1,33 +0,0 @@
#! /bin/sh
# Copyright (C) 2025 Midokura Japan KK. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
PREFIX=${1:-/tmp/wamr}
WASI_SDK=${WASI_SDK:-/opt/wasi-sdk}
cmake -B build-app-nn \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/nn
cmake --build build-app-nn
cmake -B build-app-nn-cli \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/nn-cli
cmake --build build-app-nn-cli
cmake -B build-app-socket-nslookup \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/socket-nslookup
cmake --build build-app-socket-nslookup
cmake -B build-app-socket-tcp-udp \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/socket-tcp-udp
cmake --build build-app-socket-tcp-udp

View File

@ -266,8 +266,7 @@ set_input(char *options)
wasi_ephemeral_nn_error nnret;
wasi_ephemeral_nn_graph_execution_context c =
map_get(&contexts, context_id);
tensor.data.buf = buf;
tensor.data.size = sz;
tensor.data = buf;
nnret = wasi_ephemeral_nn_set_input(c, idx, &tensor);
unmap_file(buf, sz);
if (nnret != wasi_ephemeral_nn_error_success) {

View File

@ -147,8 +147,7 @@ main(int argc, char **argv)
wasi_ephemeral_nn_tensor tensor = {
.dimensions = { .buf = (uint32_t[]){1, 3, 224, 224,}, .size = 4, },
.type = wasi_ephemeral_nn_type_fp32,
.data.buf = tensordata,
.data.size = tensordatasz,
.data = tensordata,
};
nnret = wasi_ephemeral_nn_set_input(ctx, 0, &tensor);
unmap_file(tensordata, tensordatasz);

View File

@ -13,12 +13,6 @@ target_include_directories(wamr-wasi-socket
$<BUILD_INTERFACE:${wasi_socket_header_dir}>
$<INSTALL_INTERFACE:include>)
# as this is a library, be extra conservative about wasm features
# to improve compatibilities. as this particular library is just a
# simple static stub, extra wasm features won't benefit us much anyway.
# note that LLVM-19 enables reference-types by default.
target_compile_options(wamr-wasi-socket PRIVATE -mno-reference-types)
install(TARGETS wamr-wasi-socket
EXPORT wamr-wasi-socket-config
PUBLIC_HEADER DESTINATION include)

View File

@ -5,7 +5,35 @@
set -e
PREFIX=${1:-/tmp/wamr}
PREFIX=/tmp/wamr
WASI_SDK=${WASI_SDK:-/opt/wasi-sdk}
./build_libs.sh ${PREFIX}
./build_samples.sh ${PREFIX}
cmake -B build-lib \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \
-DCMAKE_INSTALL_PREFIX=${PREFIX} \
.
cmake --build build-lib -t install
cmake -B build-app-nn \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/nn
cmake --build build-app-nn
cmake -B build-app-nn-cli \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/nn-cli
cmake --build build-app-nn-cli
cmake -B build-app-socket-nslookup \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/socket-nslookup
cmake --build build-app-socket-nslookup
cmake -B build-app-socket-tcp-udp \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/socket-tcp-udp
cmake --build build-app-socket-tcp-udp