Compare commits

..

1 Commits

Author SHA1 Message Date
liang.he
86be34c387
Merge c1df02fddb into 5478d267f4 2025-06-12 10:52:13 +08:00
60 changed files with 711 additions and 1635 deletions

View File

@ -30,23 +30,14 @@ runs:
if: ${{ startsWith(inputs.os, 'ubuntu') }} if: ${{ startsWith(inputs.os, 'ubuntu') }}
shell: bash shell: bash
run: | run: |
echo "Downloading wasi-sdk for Ubuntu..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-linux.tar.gz sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-linux.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-x86_64-linux/ wasi-sdk sudo ln -sf wasi-sdk-25.0-x86_64-linux/ wasi-sdk
echo "Downloading wabt for Ubuntu..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.37 wabt sudo ln -sf wabt-1.0.37 wabt
/opt/wasi-sdk/bin/clang --version /opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version /opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on ubuntu" echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on ubuntu"
working-directory: /opt working-directory: /opt
@ -54,23 +45,14 @@ runs:
if: ${{ inputs.os == 'macos-13' }} if: ${{ inputs.os == 'macos-13' }}
shell: bash shell: bash
run: | run: |
echo "Downloading wasi-sdk for macOS-13..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-macos.tar.gz sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-macos.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-x86_64-macos wasi-sdk sudo ln -sf wasi-sdk-25.0-x86_64-macos wasi-sdk
echo "Downloading wabt for macOS-13..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.36 wabt sudo ln -sf wabt-1.0.36 wabt
/opt/wasi-sdk/bin/clang --version /opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version /opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.36 installed on macos-13" echo "::notice::wasi-sdk-25 and wabt-1.0.36 installed on macos-13"
working-directory: /opt working-directory: /opt
@ -78,48 +60,21 @@ runs:
if: ${{ inputs.os == 'macos-14' }} if: ${{ inputs.os == 'macos-14' }}
shell: bash shell: bash
run: | run: |
echo "Downloading wasi-sdk for macOS-14..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-arm64-macos.tar.gz sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-arm64-macos.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-arm64-macos wasi-sdk sudo ln -sf wasi-sdk-25.0-arm64-macos wasi-sdk
echo "Downloading wabt for macOS-14..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-macos-14.tar.gz sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-macos-14.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.37 wabt sudo ln -sf wabt-1.0.37 wabt
/opt/wasi-sdk/bin/clang --version /opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version /opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on macos-14" echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on macos-14"
working-directory: /opt working-directory: /opt
#TODO: Add support for Windows
- name: Set up wasi-sdk and wabt on Windows - name: Set up wasi-sdk and wabt on Windows
if: ${{ startsWith(inputs.os, 'windows') }} if: ${{ startsWith(inputs.os, 'windows') }}
shell: bash shell: powershell
run: | run: |
choco install -y wget echo "::notice::Support for Windows is not implemented yet"
exit 1
mkdir -p /opt/wasi-sdk
mkdir -p /opt/wabt
echo "Downloading wasi-sdk for Windows..."
wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-windows.tar.gz
echo "Extracting wasi-sdk..."
tar --strip-components=1 -xf wasi-sdk.tar.gz -C /opt/wasi-sdk
echo "Downloading wabt for Windows..."
wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-windows.tar.gz
echo "Extracting wabt..."
tar --strip-components=1 -xf wabt.tar.gz -C /opt/wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on Windows"

View File

@ -23,7 +23,7 @@ on:
type: string type: string
required: true required: true
upload_url: upload_url:
description: upload binary assets to the URL of release description: a semantic version number. it is required when `release` is true.
type: string type: string
required: false required: false
ver_num: ver_num:

View File

@ -23,7 +23,7 @@ on:
type: string type: string
required: true required: true
upload_url: upload_url:
description: upload binary assets to the URL of release description: a semantic version number. it is required when `release` is true.
type: string type: string
required: false required: false
ver_num: ver_num:

View File

@ -53,7 +53,7 @@ jobs:
# Initializes the CodeQL tools for scanning. # Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL - name: Initialize CodeQL
uses: github/codeql-action/init@v3.29.0 uses: github/codeql-action/init@v3.28.19
with: with:
languages: ${{ matrix.language }} languages: ${{ matrix.language }}
@ -70,7 +70,7 @@ jobs:
- run: | - run: |
./.github/scripts/codeql_buildscript.sh ./.github/scripts/codeql_buildscript.sh
- name: Perform CodeQL Analysis - name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3.29.0 uses: github/codeql-action/analyze@v3.28.19
with: with:
category: "/language:${{matrix.language}}" category: "/language:${{matrix.language}}"
upload: false upload: false
@ -99,7 +99,7 @@ jobs:
output: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif output: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif
- name: Upload CodeQL results to code scanning - name: Upload CodeQL results to code scanning
uses: github/codeql-action/upload-sarif@v3.29.0 uses: github/codeql-action/upload-sarif@v3.28.19
with: with:
sarif_file: ${{ steps.step1.outputs.sarif-output }} sarif_file: ${{ steps.step1.outputs.sarif-output }}
category: "/language:${{matrix.language}}" category: "/language:${{matrix.language}}"

View File

@ -618,6 +618,49 @@ jobs:
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }} llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
running_mode: aot running_mode: aot
test_option: $WAMR_COMPILER_TEST_OPTIONS test_option: $WAMR_COMPILER_TEST_OPTIONS
exclude:
# incompatible modes and features
# classic-interp doesn't support simd
- running_mode: "classic-interp"
test_option: $SIMD_TEST_OPTIONS
# llvm jit doesn't support multi module
- running_mode: "jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
# fast-jit doesn't support multi module, simd
- running_mode: "fast-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $SIMD_TEST_OPTIONS
# multi-tier-jit doesn't support multi module, simd
- running_mode: "multi-tier-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $SIMD_TEST_OPTIONS
# fast-jit and multi-tier-jit don't support GC
- running_mode: "fast-jit"
test_option: $GC_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $GC_TEST_OPTIONS
# fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Memory64
- running_mode: "fast-interp"
test_option: $MEMORY64_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $MEMORY64_TEST_OPTIONS
- running_mode: "jit"
test_option: $MEMORY64_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $MEMORY64_TEST_OPTIONS
# aot, fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Multi Memory
- running_mode: "aot"
test_option: $MULTI_MEMORY_TEST_OPTIONS
- running_mode: "fast-interp"
test_option: $MULTI_MEMORY_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $MULTI_MEMORY_TEST_OPTIONS
- running_mode: "jit"
test_option: $MULTI_MEMORY_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $MULTI_MEMORY_TEST_OPTIONS
steps: steps:
- name: checkout - name: checkout
@ -721,3 +764,123 @@ jobs:
eval $(opam env) eval $(opam env)
./test_wamr.sh ${{ env.X86_32_TARGET_TEST_OPTIONS }} ${{ matrix.test_option }} -t ${{ matrix.running_mode }} ./test_wamr.sh ${{ env.X86_32_TARGET_TEST_OPTIONS }} ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
working-directory: ./tests/wamr-test-suites working-directory: ./tests/wamr-test-suites
test-wamr-ide:
needs:
[
build_iwasm
]
runs-on: ubuntu-22.04
env:
PYTHON_VERSION: '3.10'
PYTHON_UBUNTU_STANDALONE_BUILD: https://github.com/indygreg/python-build-standalone/releases/download/20230507/cpython-3.10.11+20230507-x86_64-unknown-linux-gnu-install_only.tar.gz
steps:
- name: checkout
uses: actions/checkout@v4
- name: install dependencies
run: |
rustup target add wasm32-wasip1
sudo apt update && sudo apt-get install -y lld ninja-build
npm install
working-directory: test-tools/wamr-ide/VSCode-Extension
- name: code style check
run: |
npm install --save-dev prettier
npm run prettier-format-check
working-directory: test-tools/wamr-ide/VSCode-Extension
- name: build iwasm with source debugging feature
run: |
mkdir build
cd build
cmake .. -DWAMR_BUILD_DEBUG_INTERP=1 -DWAMR_BUILD_REF_TYPES=1
make
working-directory: product-mini/platforms/linux
- name: Cache LLDB
id: cache-lldb
uses: actions/cache@v4
env:
cache-name: cache-lldb-vscode
with:
path: test-tools/wamr-ide/VSCode-Extension/resource/debug/linux
key: ${{ env.cache-name }}-${{ hashFiles('build-scripts/lldb_wasm.patch') }}-${{ env.PYTHON_UBUNTU_STANDALONE_BUILD }}
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: get stand-alone python ubuntu
run: |
wget ${{ env.PYTHON_UBUNTU_STANDALONE_BUILD }} -O python.tar.gz
tar -xvf python.tar.gz
working-directory: core/deps
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: download llvm
run: |
wget https://github.com/llvm/llvm-project/archive/1f27fe6128769f00197925c3b8f6abb9d0e5cd2e.zip
unzip -q 1f27fe6128769f00197925c3b8f6abb9d0e5cd2e.zip
mv llvm-project-1f27fe6128769f00197925c3b8f6abb9d0e5cd2e llvm-project
working-directory: core/deps
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: apply wamr patch
run: |
git init
git config user.email "action@github.com"
git config user.name "github action"
git apply ../../../build-scripts/lldb_wasm.patch
working-directory: core/deps/llvm-project
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: build lldb ubuntu
run: |
echo "start to build lldb..."
mkdir -p wamr-lldb
cmake -S ./llvm -B build \
-G Ninja \
-DCMAKE_INSTALL_PREFIX=../wamr-lldb \
-DCMAKE_BUILD_TYPE:STRING="Release" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DLLVM_ENABLE_PROJECTS="clang;lldb" \
-DLLVM_TARGETS_TO_BUILD:STRING="X86;WebAssembly" \
-DLLVM_BUILD_BENCHMARKS:BOOL=OFF \
-DLLVM_BUILD_DOCS:BOOL=OFF \
-DLLVM_BUILD_EXAMPLES:BOOL=OFF \
-DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF \
-DLLVM_BUILD_TESTS:BOOL=OFF \
-DLLVM_INCLUDE_BENCHMARKS:BOOL=OFF \
-DLLVM_INCLUDE_DOCS:BOOL=OFF \
-DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \
-DLLVM_INCLUDE_TESTS:BOOL=OFF \
-DLLVM_ENABLE_BINDINGS:BOOL=OFF \
-DLLVM_ENABLE_LIBXML2:BOOL=ON \
-DLLVM_ENABLE_LLD:BOOL=ON \
-DLLDB_ENABLE_PYTHON:BOOL=ON \
-DLLDB_EMBED_PYTHON_HOME=ON \
-DLLDB_PYTHON_HOME=.. \
-DLLDB_PYTHON_RELATIVE_PATH=lib/lldb-python \
-DPython3_EXECUTABLE="$(pwd)/../python/bin/python${{ env.PYTHON_VERSION }}"
cmake --build build --target lldb install --parallel $(nproc)
working-directory: core/deps/llvm-project
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: copy lldb to extension folder
run: |
mkdir -p bin
mkdir -p lib
cp ../../../../../../core/deps/llvm-project/lldb/tools/lldb-vscode/package.json ./
cp -r ../../../../../../core/deps/llvm-project/lldb/tools/lldb-vscode/syntaxes/ ./
cp ../../../../../../core/deps/llvm-project/build/bin/lldb* bin
cp ../../../../../../core/deps/llvm-project/build/lib/liblldb*.so lib
cp ../../../../../../core/deps/llvm-project/build/lib/liblldb*.so.* lib
cp -R ../../../../../../core/deps/llvm-project/build/lib/lldb-python lib
cp -R ../../../../../../core/deps/python/lib/python* lib
cp ../../../../../../core/deps/python/lib/libpython${{ env.PYTHON_VERSION }}.so.1.0 lib
working-directory: test-tools/wamr-ide/VSCode-Extension/resource/debug/linux
- name: run tests
timeout-minutes: 5
run: xvfb-run npm run test
working-directory: test-tools/wamr-ide/VSCode-Extension

View File

@ -288,24 +288,30 @@ jobs:
sudo swapon /swapfile sudo swapon /swapfile
sudo swapon --show sudo swapon --show
- name: run spec tests with retry - name: run spec tests
id: run_spec_tests
uses: nick-fields/retry@v3
with:
command: |
cd ./tests/wamr-test-suites
source /opt/intel/sgxsdk/environment
./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
max_attempts: 3
retry_on: error
shell: bash
timeout_minutes: 10
- name: print test results
run: | run: |
echo "Test results:" set +e
echo "${{ steps.run_spec_tests.outputs.stdout }}" source /opt/intel/sgxsdk/environment
echo "${{ steps.run_spec_tests.outputs.stderr }}" attempts=0
echo "Exit code: ${{ steps.run_spec_tests.outputs.exit_code }}" max_attempts=3
echo "Exit error: ${{ steps.run_spec_tests.outputs.exit_error }}"
shell: bash while [ $attempts -lt $max_attempts ]; do
./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
exitcode="$?"
if [ $exitcode -eq 0 ]; then
echo "Spec test passed"
exit 0
elif [ $exitcode -ne 143 ]; then
echo "Spec test failed with error code $exitcode"
exit 1
fi
echo "$exitcode is a known GitHub-hosted runner issue"
echo "::notice::Re-running the spec test due to error code 143"
attempts=$((attempts + 1))
done
echo "::notice::Report an error with code 143 in SGX CI after $max_attempts attempts"
exit 143
working-directory: ./tests/wamr-test-suites

View File

@ -172,10 +172,6 @@ jobs:
run: ./build.sh run: ./build.sh
working-directory: ./core/iwasm/libraries/lib-wasi-threads/test/ working-directory: ./core/iwasm/libraries/lib-wasi-threads/test/
- name: install wget
shell: bash
run: choco install wget
- name: run tests - name: run tests
shell: bash shell: bash
timeout-minutes: 20 timeout-minutes: 20

View File

@ -36,11 +36,12 @@ env:
LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0" LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0"
MULTI_TIER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1" MULTI_TIER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1"
# For Spec Test # For Spec Test
DEFAULT_TEST_OPTIONS: "-s spec -b -P" # FIXME: use binary release(adding -b) instead of building from source after upgrading to 22.04
MULTI_MODULES_TEST_OPTIONS: "-s spec -b -P -M" DEFAULT_TEST_OPTIONS: "-s spec -P"
SIMD_TEST_OPTIONS: "-s spec -b -P -S" MULTI_MODULES_TEST_OPTIONS: "-s spec -M -P"
THREADS_TEST_OPTIONS: "-s spec -b -P -p" SIMD_TEST_OPTIONS: "-s spec -S -P"
X86_32_TARGET_TEST_OPTIONS: "-m x86_32" THREADS_TEST_OPTIONS: "-s spec -p -P"
X86_32_TARGET_TEST_OPTIONS: "-m x86_32 -P"
WASI_TEST_OPTIONS: "-s wasi_certification -w" WASI_TEST_OPTIONS: "-s wasi_certification -w"
permissions: permissions:
@ -616,11 +617,24 @@ jobs:
sanitizer: tsan sanitizer: tsan
- running_mode: "multi-tier-jit" - running_mode: "multi-tier-jit"
sanitizer: tsan sanitizer: tsan
# simd128.h brings ubsan errors # classic-interp and fast-interp don't support simd
# like: negation of XXXcannot be represented in type 'long int'; - running_mode: "classic-interp"
# cast to an unsigned type to negate this value to itself test_option: $SIMD_TEST_OPTIONS
- running_mode: "fast-interp" - running_mode: "fast-interp"
sanitizer: ubsan test_option: $SIMD_TEST_OPTIONS
# llvm jit doesn't support multi module
- running_mode: "jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
# fast-jit doesn't support multi module, simd
- running_mode: "fast-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $SIMD_TEST_OPTIONS
# multi-tier-jit doesn't support multi module, simd
- running_mode: "multi-tier-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $SIMD_TEST_OPTIONS
steps: steps:
- name: checkout - name: checkout
uses: actions/checkout@v4 uses: actions/checkout@v4

View File

@ -60,6 +60,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard. # Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning" - name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@2847b7f7ab9f48fc49eca90a53fff6007285f399 uses: github/codeql-action/upload-sarif@b1e4dc3db58c9601794e22a9f6d28d45461b9dbf
with: with:
sarif_file: results.sarif sarif_file: results.sarif

View File

@ -99,9 +99,9 @@ if (NOT DEFINED WAMR_BUILD_LIB_WASI_THREADS)
set (WAMR_BUILD_LIB_WASI_THREADS 0) set (WAMR_BUILD_LIB_WASI_THREADS 0)
endif () endif ()
if (NOT DEFINED WAMR_BUILD_COPY_CALL_STACK) if (NOT DEFINED WAMR_ENABLE_COPY_CALLSTACK)
# Disable copy callstack by default # Disable copy callstack by default
set (WAMR_BUILD_COPY_CALL_STACK 0) set (WAMR_ENABLE_COPY_CALLSTACK 0)
endif() endif()
if (NOT DEFINED WAMR_BUILD_MINI_LOADER) if (NOT DEFINED WAMR_BUILD_MINI_LOADER)

View File

@ -497,7 +497,7 @@
- wasm loader: Fix handling if block without op else (#3404) - wasm loader: Fix handling if block without op else (#3404)
- ref-types: Correct default value for function local variables (#3397) - ref-types: Correct default value for function local variables (#3397)
- aot compiler: Fix the length type passed to aot_memmove/aot_memset (#3378) - aot compiler: Fix the length type passed to aot_memmove/aot_memset (#3378)
- Fix loader and mini-loader select potential error (#3374) - Fix loader and mini-loader select potiential error (#3374)
- Fix aot debugger compilation error on windows (#3370) - Fix aot debugger compilation error on windows (#3370)
- A few native stack detection fixes for macOS/arm64 (#3368) - A few native stack detection fixes for macOS/arm64 (#3368)
- Fix ESP32-S3 compiling error (#3359) - Fix ESP32-S3 compiling error (#3359)

View File

@ -334,10 +334,15 @@ if (WAMR_BUILD_SHARED_HEAP EQUAL 1)
add_definitions (-DWASM_ENABLE_SHARED_HEAP=1) add_definitions (-DWASM_ENABLE_SHARED_HEAP=1)
message (" Shared heap enabled") message (" Shared heap enabled")
endif() endif()
if (WAMR_BUILD_COPY_CALL_STACK EQUAL 1)
add_definitions (-DWASM_ENABLE_COPY_CALL_STACK=1) if (WAMR_ENABLE_COPY_CALLSTACK EQUAL 1)
add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=1)
message(" Copy callstack enabled") message(" Copy callstack enabled")
else ()
add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=0)
message(" Copy callstack disabled")
endif() endif()
if (WAMR_BUILD_MEMORY64 EQUAL 1) if (WAMR_BUILD_MEMORY64 EQUAL 1)
# if native is 32-bit or cross-compiled to 32-bit # if native is 32-bit or cross-compiled to 32-bit
if (NOT WAMR_BUILD_TARGET MATCHES ".*64.*") if (NOT WAMR_BUILD_TARGET MATCHES ".*64.*")
@ -534,9 +539,6 @@ if (WAMR_BUILD_WASI_NN EQUAL 1)
if (DEFINED WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH) if (DEFINED WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH)
add_definitions (-DWASM_WASI_NN_EXTERNAL_DELEGATE_PATH="${WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH}") add_definitions (-DWASM_WASI_NN_EXTERNAL_DELEGATE_PATH="${WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH}")
endif () endif ()
if (NOT DEFINED WAMR_BUILD_WASI_EPHEMERAL_NN)
set(WAMR_BUILD_WASI_EPHEMERAL_NN 1)
endif()
if (WAMR_BUILD_WASI_EPHEMERAL_NN EQUAL 1) if (WAMR_BUILD_WASI_EPHEMERAL_NN EQUAL 1)
message (" WASI-NN: use 'wasi_ephemeral_nn' instead of 'wasi-nn'") message (" WASI-NN: use 'wasi_ephemeral_nn' instead of 'wasi-nn'")
add_definitions (-DWASM_ENABLE_WASI_EPHEMERAL_NN=1) add_definitions (-DWASM_ENABLE_WASI_EPHEMERAL_NN=1)

View File

@ -106,7 +106,6 @@ endif ()
if (WAMR_BUILD_WASI_NN EQUAL 1) if (WAMR_BUILD_WASI_NN EQUAL 1)
include (${IWASM_DIR}/libraries/wasi-nn/cmake/wasi_nn.cmake) include (${IWASM_DIR}/libraries/wasi-nn/cmake/wasi_nn.cmake)
set (WAMR_BUILD_MODULE_INST_CONTEXT 1)
endif () endif ()
if (WAMR_BUILD_LIB_PTHREAD EQUAL 1) if (WAMR_BUILD_LIB_PTHREAD EQUAL 1)

View File

@ -193,8 +193,8 @@
#error "Heap aux stack allocation must be enabled for WASI threads" #error "Heap aux stack allocation must be enabled for WASI threads"
#endif #endif
#ifndef WASM_ENABLE_COPY_CALL_STACK #ifndef WAMR_ENABLE_COPY_CALLSTACK
#define WASM_ENABLE_COPY_CALL_STACK 0 #define WAMR_ENABLE_COPY_CALLSTACK 0
#endif #endif
#ifndef WASM_ENABLE_BASE_LIB #ifndef WASM_ENABLE_BASE_LIB

View File

@ -1309,13 +1309,6 @@ load_init_expr(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
read_uint32(buf, buf_end, type_idx); read_uint32(buf, buf_end, type_idx);
read_uint32(buf, buf_end, length); read_uint32(buf, buf_end, length);
if (type_idx >= module->type_count
|| !wasm_type_is_array_type(module->types[type_idx])) {
set_error_buf(error_buf, error_buf_size,
"invalid or non-array type index.");
goto fail;
}
if (init_expr_type == INIT_EXPR_TYPE_ARRAY_NEW_DEFAULT) { if (init_expr_type == INIT_EXPR_TYPE_ARRAY_NEW_DEFAULT) {
expr->u.array_new_default.type_index = type_idx; expr->u.array_new_default.type_index = type_idx;
expr->u.array_new_default.length = length; expr->u.array_new_default.length = length;
@ -1730,12 +1723,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
(void)u8; (void)u8;
read_uint32(buf, buf_end, j); read_uint32(buf, buf_end, j);
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (j >= module->type_count) {
set_error_buf(error_buf, error_buf_size, "invalid type index");
goto fail;
}
#endif
if (module->types[j]->ref_count == UINT16_MAX) { if (module->types[j]->ref_count == UINT16_MAX) {
set_error_buf(error_buf, error_buf_size, set_error_buf(error_buf, error_buf_size,
"wasm type's ref count too large"); "wasm type's ref count too large");
@ -1999,13 +1986,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
AOTType *cur_type = module->types[j]; AOTType *cur_type = module->types[j];
parent_type_idx = cur_type->parent_type_idx; parent_type_idx = cur_type->parent_type_idx;
if (parent_type_idx != (uint32)-1) { /* has parent */ if (parent_type_idx != (uint32)-1) { /* has parent */
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (parent_type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"invalid parent type index");
goto fail;
}
#endif
AOTType *parent_type = module->types[parent_type_idx]; AOTType *parent_type = module->types[parent_type_idx];
module->types[j]->parent_type = parent_type; module->types[j]->parent_type = parent_type;
@ -2029,13 +2009,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
AOTType *cur_type = module->types[j]; AOTType *cur_type = module->types[j];
parent_type_idx = cur_type->parent_type_idx; parent_type_idx = cur_type->parent_type_idx;
if (parent_type_idx != (uint32)-1) { /* has parent */ if (parent_type_idx != (uint32)-1) { /* has parent */
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (parent_type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"invalid parent type index");
goto fail;
}
#endif
AOTType *parent_type = module->types[parent_type_idx]; AOTType *parent_type = module->types[parent_type_idx];
/* subtyping has been checked during compilation */ /* subtyping has been checked during compilation */
bh_assert(wasm_type_is_subtype_of( bh_assert(wasm_type_is_subtype_of(
@ -3350,7 +3323,7 @@ do_data_relocation(AOTModule *module, AOTRelocationGroup *group,
uint8 *data_addr; uint8 *data_addr;
uint32 data_size = 0, i; uint32 data_size = 0, i;
AOTRelocation *relocation = group->relocations; AOTRelocation *relocation = group->relocations;
void *symbol_addr = NULL; void *symbol_addr;
char *symbol, *data_section_name; char *symbol, *data_section_name;
if (!strncmp(group->section_name, ".rela.", 6)) { if (!strncmp(group->section_name, ".rela.", 6)) {

View File

@ -4137,9 +4137,9 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame)
} }
#endif /* end of WASM_ENABLE_AOT_STACK_FRAME != 0 */ #endif /* end of WASM_ENABLE_AOT_STACK_FRAME != 0 */
#if WASM_ENABLE_COPY_CALL_STACK != 0 #if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32 uint32
aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer, aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, const uint32 length, const uint32 skip_n,
char *error_buf, uint32 error_buf_size) char *error_buf, uint32 error_buf_size)
{ {
@ -4193,7 +4193,7 @@ aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
} }
uint32 uint32
aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer, aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, const uint32 length, const uint32 skip_n,
char *error_buf, uint32_t error_buf_size) char *error_buf, uint32_t error_buf_size)
{ {
@ -4243,7 +4243,7 @@ aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
} }
uint32 uint32
aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer, aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf, const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size) uint32_t error_buf_size)
{ {
@ -4265,7 +4265,7 @@ aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
error_buf, error_buf_size); error_buf, error_buf_size);
} }
} }
#endif // WASM_ENABLE_COPY_CALL_STACK #endif // WAMR_ENABLE_COPY_CALLSTACK
#if WASM_ENABLE_DUMP_CALL_STACK != 0 #if WASM_ENABLE_DUMP_CALL_STACK != 0
bool bool
@ -4877,8 +4877,8 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf,
} }
prof_header.magic = 0xFF6C70726F667281LL; prof_header.magic = 0xFF6C70726F667281LL;
/* Version 9 */ /* Version 8 */
prof_header.version = 0x0000000000000009LL; prof_header.version = 0x0000000000000008LL;
/* with VARIANT_MASK_IR_PROF (IR Instrumentation) */ /* with VARIANT_MASK_IR_PROF (IR Instrumentation) */
prof_header.version |= 0x1ULL << 56; prof_header.version |= 0x1ULL << 56;
/* with VARIANT_MASK_MEMPROF (Memory Profile) */ /* with VARIANT_MASK_MEMPROF (Memory Profile) */
@ -4887,19 +4887,14 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf,
prof_header.num_prof_counters = num_prof_counters; prof_header.num_prof_counters = num_prof_counters;
prof_header.names_size = prof_names_size; prof_header.names_size = prof_names_size;
prof_header.value_kind_last = 1; prof_header.value_kind_last = 1;
/* __llvm_prf_bits won't be used in PGO, set dummy value here */
prof_header.num_prof_bitmaps = 0;
prof_header.bitmap_delta = 0;
if (!is_little_endian()) { if (!is_little_endian()) {
aot_exchange_uint64((uint8 *)&prof_header.magic); aot_exchange_uint64((uint8 *)&prof_header.magic);
aot_exchange_uint64((uint8 *)&prof_header.version); aot_exchange_uint64((uint8 *)&prof_header.version);
aot_exchange_uint64((uint8 *)&prof_header.num_prof_data); aot_exchange_uint64((uint8 *)&prof_header.num_prof_data);
aot_exchange_uint64((uint8 *)&prof_header.num_prof_counters); aot_exchange_uint64((uint8 *)&prof_header.num_prof_counters);
aot_exchange_uint64((uint8 *)&prof_header.num_prof_bitmaps);
aot_exchange_uint64((uint8 *)&prof_header.names_size); aot_exchange_uint64((uint8 *)&prof_header.names_size);
aot_exchange_uint64((uint8 *)&prof_header.counters_delta); aot_exchange_uint64((uint8 *)&prof_header.counters_delta);
aot_exchange_uint64((uint8 *)&prof_header.bitmap_delta);
aot_exchange_uint64((uint8 *)&prof_header.value_kind_last); aot_exchange_uint64((uint8 *)&prof_header.value_kind_last);
} }
@ -4917,23 +4912,19 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf,
prof_data_64->func_md5 = prof_data->func_md5; prof_data_64->func_md5 = prof_data->func_md5;
prof_data_64->func_hash = prof_data->func_hash; prof_data_64->func_hash = prof_data->func_hash;
prof_data_64->offset_counters = prof_data->offset_counters; prof_data_64->offset_counters = prof_data->offset_counters;
prof_data_64->offset_bitmaps = prof_data->offset_bitmaps;
prof_data_64->func_ptr = prof_data->func_ptr; prof_data_64->func_ptr = prof_data->func_ptr;
prof_data_64->values = (uint64)(uintptr_t)prof_data->values; prof_data_64->values = (uint64)(uintptr_t)prof_data->values;
prof_data_64->num_counters = prof_data->num_counters; prof_data_64->num_counters = prof_data->num_counters;
/* __llvm_prf_bits won't be used in PGO, set dummy value here */
prof_data_64->num_bitmaps = 0;
prof_data_64->num_value_sites[0] = prof_data->num_value_sites[0]; prof_data_64->num_value_sites[0] = prof_data->num_value_sites[0];
prof_data_64->num_value_sites[1] = prof_data->num_value_sites[1]; prof_data_64->num_value_sites[1] = prof_data->num_value_sites[1];
if (!is_little_endian()) { if (!is_little_endian()) {
aot_exchange_uint64((uint8 *)&prof_data_64->func_hash); aot_exchange_uint64((uint8 *)&prof_data_64->func_hash);
aot_exchange_uint64((uint8 *)&prof_data_64->offset_counters); aot_exchange_uint64((uint8 *)&prof_data_64->offset_counters);
aot_exchange_uint64((uint8 *)&prof_data_64->offset_bitmaps); aot_exchange_uint64((uint8 *)&prof_data_64->offset_counters);
aot_exchange_uint64((uint8 *)&prof_data_64->func_ptr); aot_exchange_uint64((uint8 *)&prof_data_64->func_ptr);
aot_exchange_uint64((uint8 *)&prof_data_64->values); aot_exchange_uint64((uint8 *)&prof_data_64->values);
aot_exchange_uint32((uint8 *)&prof_data_64->num_counters); aot_exchange_uint32((uint8 *)&prof_data_64->num_counters);
aot_exchange_uint32((uint8 *)&prof_data_64->num_bitmaps);
aot_exchange_uint16((uint8 *)&prof_data_64->num_value_sites[0]); aot_exchange_uint16((uint8 *)&prof_data_64->num_value_sites[0]);
aot_exchange_uint16((uint8 *)&prof_data_64->num_value_sites[1]); aot_exchange_uint16((uint8 *)&prof_data_64->num_value_sites[1]);
} }

View File

@ -437,9 +437,6 @@ typedef struct AOTFrame {
} AOTFrame; } AOTFrame;
#if WASM_ENABLE_STATIC_PGO != 0 #if WASM_ENABLE_STATIC_PGO != 0
/* The bitmaps fields in LLVMProfileRawHeader, LLVMProfileData,
* LLVMProfileData_64 all dummy fields, it's used in MC/DC code coverage
* instead of PGO. See https://llvm.org/docs/InstrProfileFormat.html#bitmap */
typedef struct LLVMProfileRawHeader { typedef struct LLVMProfileRawHeader {
uint64 magic; uint64 magic;
uint64 version; uint64 version;
@ -448,11 +445,8 @@ typedef struct LLVMProfileRawHeader {
uint64 padding_bytes_before_counters; uint64 padding_bytes_before_counters;
uint64 num_prof_counters; uint64 num_prof_counters;
uint64 padding_bytes_after_counters; uint64 padding_bytes_after_counters;
uint64 num_prof_bitmaps;
uint64 padding_bytes_after_bitmaps;
uint64 names_size; uint64 names_size;
uint64 counters_delta; uint64 counters_delta;
uint64 bitmap_delta;
uint64 names_delta; uint64 names_delta;
uint64 value_kind_last; uint64 value_kind_last;
} LLVMProfileRawHeader; } LLVMProfileRawHeader;
@ -470,12 +464,10 @@ typedef struct LLVMProfileData {
uint64 func_md5; uint64 func_md5;
uint64 func_hash; uint64 func_hash;
uint64 offset_counters; uint64 offset_counters;
uint64 offset_bitmaps;
uintptr_t func_ptr; uintptr_t func_ptr;
ValueProfNode **values; ValueProfNode **values;
uint32 num_counters; uint32 num_counters;
uint16 num_value_sites[2]; uint16 num_value_sites[2];
uint32 num_bitmaps;
} LLVMProfileData; } LLVMProfileData;
/* The profiling data for writing to the output file, the width of /* The profiling data for writing to the output file, the width of
@ -485,12 +477,10 @@ typedef struct LLVMProfileData_64 {
uint64 func_md5; uint64 func_md5;
uint64 func_hash; uint64 func_hash;
uint64 offset_counters; uint64 offset_counters;
uint64 offset_bitmaps;
uint64 func_ptr; uint64 func_ptr;
uint64 values; uint64 values;
uint32 num_counters; uint32 num_counters;
uint16 num_value_sites[2]; uint16 num_value_sites[2];
uint32 num_bitmaps;
} LLVMProfileData_64; } LLVMProfileData_64;
#endif /* end of WASM_ENABLE_STATIC_PGO != 0 */ #endif /* end of WASM_ENABLE_STATIC_PGO != 0 */
@ -787,12 +777,12 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame);
bool bool
aot_create_call_stack(struct WASMExecEnv *exec_env); aot_create_call_stack(struct WASMExecEnv *exec_env);
#if WASM_ENABLE_COPY_CALL_STACK != 0 #if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32 uint32
aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer, aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf, const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size); uint32_t error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK #endif // WAMR_ENABLE_COPY_CALLSTACK
/** /**
* @brief Dump wasm call stack or get the size * @brief Dump wasm call stack or get the size

View File

@ -1145,7 +1145,7 @@ wasm_reftype_is_subtype_of(uint8 type1, const WASMRefType *ref_type1,
return true; return true;
else { else {
int32 heap_type = ref_type1->ref_ht_common.heap_type; int32 heap_type = ref_type1->ref_ht_common.heap_type;
// We don't care whether type2 is nullable or not. So // We dont care whether type2 is nullable or not. So
// we normalize it into its related one-byte type. // we normalize it into its related one-byte type.
if (type2 == REF_TYPE_HT_NULLABLE if (type2 == REF_TYPE_HT_NULLABLE
|| type2 == REF_TYPE_HT_NON_NULLABLE) { || type2 == REF_TYPE_HT_NON_NULLABLE) {

View File

@ -1743,9 +1743,9 @@ wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env)
wasm_exec_env_destroy(exec_env); wasm_exec_env_destroy(exec_env);
} }
#if WASM_ENABLE_COPY_CALL_STACK != 0 #if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32 uint32
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer, wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf, const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size) uint32_t error_buf_size)
{ {
@ -1780,7 +1780,7 @@ wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
strncpy(error_buf, err_msg, error_buf_size); strncpy(error_buf, err_msg, error_buf_size);
return 0; return 0;
} }
#endif // WASM_ENABLE_COPY_CALL_STACK #endif // WAMR_ENABLE_COPY_CALLSTACK
bool bool
wasm_runtime_init_thread_env(void) wasm_runtime_init_thread_env(void)

View File

@ -758,12 +758,12 @@ wasm_runtime_create_exec_env(WASMModuleInstanceCommon *module_inst,
WASM_RUNTIME_API_EXTERN void WASM_RUNTIME_API_EXTERN void
wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env); wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env);
#if WASM_ENABLE_COPY_CALL_STACK != 0 #if WAMR_ENABLE_COPY_CALLSTACK != 0
WASM_RUNTIME_API_EXTERN uint32_t WASM_RUNTIME_API_EXTERN uint32_t
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer, wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf, const uint32 length, const uint32 skip_n, char *error_buf,
uint32 error_buf_size); uint32 error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK #endif // WAMR_ENABLE_COPY_CALLSTACK
/* See wasm_export.h for description */ /* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN WASMModuleInstanceCommon * WASM_RUNTIME_API_EXTERN WASMModuleInstanceCommon *

View File

@ -3378,12 +3378,6 @@ aot_resolve_object_data_sections(AOTObjectData *obj_data)
bh_memcpy_s(data_section->name, size, buf, size); bh_memcpy_s(data_section->name, size, buf, size);
data_section->is_name_allocated = true; data_section->is_name_allocated = true;
} }
else if (obj_data->comp_ctx->enable_llvm_pgo
&& !strcmp(name, "__llvm_prf_bits")) {
LOG_WARNING("__llvm_prf_bits section is not supported and "
"shouldn't be used in PGO.");
return false;
}
if (obj_data->comp_ctx->enable_llvm_pgo if (obj_data->comp_ctx->enable_llvm_pgo
&& !strcmp(name, "__llvm_prf_names")) { && !strcmp(name, "__llvm_prf_names")) {

View File

@ -139,6 +139,8 @@ typedef struct wasm_frame_t {
uint32_t *lp; uint32_t *lp;
} WASMCApiFrame; } WASMCApiFrame;
typedef WASMCApiFrame wasm_frame_t;
/* WASM section */ /* WASM section */
typedef struct wasm_section_t { typedef struct wasm_section_t {
struct wasm_section_t *next; struct wasm_section_t *next;
@ -902,7 +904,7 @@ wasm_runtime_destroy_exec_env(wasm_exec_env_t exec_env);
* @return number of copied frames * @return number of copied frames
*/ */
WASM_RUNTIME_API_EXTERN uint32_t WASM_RUNTIME_API_EXTERN uint32_t
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer, wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32_t length, const uint32_t skip_n, const uint32_t length, const uint32_t skip_n,
char *error_buf, uint32_t error_buf_size); char *error_buf, uint32_t error_buf_size);

View File

@ -2588,8 +2588,7 @@ load_table_import(const uint8 **p_buf, const uint8 *buf_end,
error_buf_size)) { error_buf_size)) {
return false; return false;
} }
if (!wasm_is_type_reftype(ref_type.ref_type) if (wasm_is_reftype_htref_non_nullable(ref_type.ref_type)) {
|| wasm_is_reftype_htref_non_nullable(ref_type.ref_type)) {
set_error_buf(error_buf, error_buf_size, "type mismatch"); set_error_buf(error_buf, error_buf_size, "type mismatch");
return false; return false;
} }
@ -3115,15 +3114,6 @@ load_table(const uint8 **p_buf, const uint8 *buf_end, WASMModule *module,
error_buf_size)) { error_buf_size)) {
return false; return false;
} }
/*
* TODO: add this validator
* `wasm_is_reftype_htref_non_nullable(ref_type.ref_type)`
* after sync up with the latest GC spec
*/
if (!wasm_is_type_reftype(ref_type.ref_type)) {
set_error_buf(error_buf, error_buf_size, "type mismatch");
return false;
}
table->table_type.elem_type = ref_type.ref_type; table->table_type.elem_type = ref_type.ref_type;
if (need_ref_type_map) { if (need_ref_type_map) {
if (!(table->table_type.elem_ref_type = if (!(table->table_type.elem_ref_type =
@ -3712,7 +3702,7 @@ load_function_section(const uint8 *buf, const uint8 *buf_end,
* we shall make a copy of code body [p_code, p_code + code_size] * we shall make a copy of code body [p_code, p_code + code_size]
* when we are worrying about inappropriate releasing behaviour. * when we are worrying about inappropriate releasing behaviour.
* all code bodies are actually in a buffer which user allocates in * all code bodies are actually in a buffer which user allocates in
* their embedding environment and we don't have power over them. * his embedding environment and we don't have power on them.
* it will be like: * it will be like:
* code_body_cp = malloc(code_size); * code_body_cp = malloc(code_size);
* memcpy(code_body_cp, p_code, code_size); * memcpy(code_body_cp, p_code, code_size);

View File

@ -1226,7 +1226,7 @@ load_function_section(const uint8 *buf, const uint8 *buf_end,
* we shall make a copy of code body [p_code, p_code + code_size] * we shall make a copy of code body [p_code, p_code + code_size]
* when we are worrying about inappropriate releasing behaviour. * when we are worrying about inappropriate releasing behaviour.
* all code bodies are actually in a buffer which user allocates in * all code bodies are actually in a buffer which user allocates in
* their embedding environment and we don't have power over them. * his embedding environment and we don't have power on them.
* it will be like: * it will be like:
* code_body_cp = malloc(code_size); * code_body_cp = malloc(code_size);
* memcpy(code_body_cp, p_code, code_size); * memcpy(code_body_cp, p_code, code_size);

View File

@ -4195,9 +4195,9 @@ wasm_get_module_inst_mem_consumption(const WASMModuleInstance *module_inst,
#endif /* end of (WASM_ENABLE_MEMORY_PROFILING != 0) \ #endif /* end of (WASM_ENABLE_MEMORY_PROFILING != 0) \
|| (WASM_ENABLE_MEMORY_TRACING != 0) */ || (WASM_ENABLE_MEMORY_TRACING != 0) */
#if WASM_ENABLE_COPY_CALL_STACK != 0 #if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32 uint32
wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer, wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
uint32 length, uint32 skip_n, char *error_buf, uint32 length, uint32 skip_n, char *error_buf,
uint32_t error_buf_size) uint32_t error_buf_size)
{ {
@ -4242,7 +4242,7 @@ wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
} }
return count >= skip_n ? count - skip_n : 0; return count >= skip_n ? count - skip_n : 0;
} }
#endif // WASM_ENABLE_COPY_CALL_STACK #endif // WAMR_ENABLE_COPY_CALLSTACK
#if WASM_ENABLE_DUMP_CALL_STACK != 0 #if WASM_ENABLE_DUMP_CALL_STACK != 0
bool bool

View File

@ -731,12 +731,12 @@ wasm_get_table_inst(const WASMModuleInstance *module_inst, uint32 tbl_idx)
#if WASM_ENABLE_DUMP_CALL_STACK != 0 #if WASM_ENABLE_DUMP_CALL_STACK != 0
#if WASM_ENABLE_COPY_CALL_STACK != 0 #if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32 uint32
wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer, wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
uint32 length, uint32 skip_n, char *error_buf, uint32 length, uint32 skip_n, char *error_buf,
uint32_t error_buf_size); uint32_t error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK #endif // WAMR_ENABLE_COPY_CALLSTACK
bool bool
wasm_interp_create_call_stack(struct WASMExecEnv *exec_env); wasm_interp_create_call_stack(struct WASMExecEnv *exec_env);

View File

@ -743,7 +743,7 @@ wasm_debug_instance_get_obj_mem(WASMDebugInstance *instance, uint64 offset,
module_inst = (WASMModuleInstance *)exec_env->module_inst; module_inst = (WASMModuleInstance *)exec_env->module_inst;
if (offset + *size > module_inst->module->load_size) { if (offset + *size > module_inst->module->load_size) {
LOG_VERBOSE("wasm_debug_instance_get_data_mem size overflow!\n"); LOG_VERBOSE("wasm_debug_instance_get_data_mem size over flow!\n");
*size = module_inst->module->load_size >= offset *size = module_inst->module->load_size >= offset
? module_inst->module->load_size - offset ? module_inst->module->load_size - offset
: 0; : 0;
@ -797,7 +797,7 @@ wasm_debug_instance_get_linear_mem(WASMDebugInstance *instance, uint64 offset,
num_bytes_per_page = memory->num_bytes_per_page; num_bytes_per_page = memory->num_bytes_per_page;
linear_mem_size = num_bytes_per_page * memory->cur_page_count; linear_mem_size = num_bytes_per_page * memory->cur_page_count;
if (offset + *size > linear_mem_size) { if (offset + *size > linear_mem_size) {
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size overflow!\n"); LOG_VERBOSE("wasm_debug_instance_get_linear_mem size over flow!\n");
*size = linear_mem_size >= offset ? linear_mem_size - offset : 0; *size = linear_mem_size >= offset ? linear_mem_size - offset : 0;
} }
bh_memcpy_s(buf, (uint32)*size, memory->memory_data + offset, bh_memcpy_s(buf, (uint32)*size, memory->memory_data + offset,
@ -830,7 +830,7 @@ wasm_debug_instance_set_linear_mem(WASMDebugInstance *instance, uint64 offset,
num_bytes_per_page = memory->num_bytes_per_page; num_bytes_per_page = memory->num_bytes_per_page;
linear_mem_size = num_bytes_per_page * memory->cur_page_count; linear_mem_size = num_bytes_per_page * memory->cur_page_count;
if (offset + *size > linear_mem_size) { if (offset + *size > linear_mem_size) {
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size overflow!\n"); LOG_VERBOSE("wasm_debug_instance_get_linear_mem size over flow!\n");
*size = linear_mem_size >= offset ? linear_mem_size - offset : 0; *size = linear_mem_size >= offset ? linear_mem_size - offset : 0;
} }
bh_memcpy_s(memory->memory_data + offset, (uint32)*size, buf, bh_memcpy_s(memory->memory_data + offset, (uint32)*size, buf,

View File

@ -175,19 +175,6 @@ process_wasm_global(WASMGDBServer *server, char *args)
os_mutex_unlock(&tmpbuf_lock); os_mutex_unlock(&tmpbuf_lock);
} }
/* TODO: let server send an empty/error reply.
Original issue: 4265
Not tested yet, but it should work.
*/
static void
send_reply(WASMGDBServer *server, const char *err)
{
if (!err || !*err)
write_packet(server, "");
else
write_packet(server, err);
}
void void
handle_general_query(WASMGDBServer *server, char *payload) handle_general_query(WASMGDBServer *server, char *payload)
{ {
@ -227,7 +214,6 @@ handle_general_query(WASMGDBServer *server, char *payload)
if (!args) { if (!args) {
LOG_ERROR("payload parse error during handle_general_query"); LOG_ERROR("payload parse error during handle_general_query");
send_reply(server, "");
return; return;
} }
@ -398,7 +384,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
if (status == 0) { if (status == 0) {
os_mutex_lock(&tmpbuf_lock); os_mutex_lock(&tmpbuf_lock);
(void)snprintf(tmpbuf, MAX_PACKET_SIZE, "W%02" PRIx32, status); (void)snprintf(tmpbuf, MAX_PACKET_SIZE, "W%02" PRIx32, status);
send_reply(server, tmpbuf); write_packet(server, tmpbuf);
os_mutex_unlock(&tmpbuf_lock); os_mutex_unlock(&tmpbuf_lock);
return; return;
} }
@ -417,7 +403,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
"T%02" PRIx32 "thread:%" PRIx64 ";name:%s;", gdb_status, "T%02" PRIx32 "thread:%" PRIx64 ";name:%s;", gdb_status,
(uint64)(uintptr_t)tid, "nobody"); (uint64)(uintptr_t)tid, "nobody");
if (len < 0 || len >= MAX_PACKET_SIZE) { if (len < 0 || len >= MAX_PACKET_SIZE) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock); os_mutex_unlock(&tmpbuf_lock);
return; return;
} }
@ -425,7 +410,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
if (tids_count > 0) { if (tids_count > 0) {
int n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "threads:"); int n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "threads:");
if (n < 0 || n >= MAX_PACKET_SIZE - len) { if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock); os_mutex_unlock(&tmpbuf_lock);
return; return;
} }
@ -442,7 +426,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
} }
if (n < 0 || n >= MAX_PACKET_SIZE - len) { if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock); os_mutex_unlock(&tmpbuf_lock);
return; return;
} }
@ -469,7 +452,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
"thread-pcs:%" PRIx64 ";00:%s;reason:%s;description:", pc, "thread-pcs:%" PRIx64 ";00:%s;reason:%s;description:", pc,
pc_string, "exception"); pc_string, "exception");
if (n < 0 || n >= MAX_PACKET_SIZE - len) { if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock); os_mutex_unlock(&tmpbuf_lock);
return; return;
} }
@ -480,7 +462,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "%02x", n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "%02x",
exception[i]); exception[i]);
if (n < 0 || n >= MAX_PACKET_SIZE - len) { if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock); os_mutex_unlock(&tmpbuf_lock);
return; return;
} }
@ -611,7 +592,7 @@ handle_get_register(WASMGDBServer *server, char *payload)
int32 i = strtol(payload, NULL, 16); int32 i = strtol(payload, NULL, 16);
if (i != 0) { if (i != 0) {
send_reply(server, "E01"); write_packet(server, "E01");
return; return;
} }
regdata = wasm_debug_instance_get_pc( regdata = wasm_debug_instance_get_pc(
@ -767,7 +748,7 @@ handle_add_break(WASMGDBServer *server, char *payload)
if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length)) if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length))
!= 3) { != 3) {
LOG_ERROR("Unsupported number of add break arguments %d", arg_c); LOG_ERROR("Unsupported number of add break arguments %d", arg_c);
send_reply(server, ""); write_packet(server, "");
return; return;
} }
@ -802,7 +783,7 @@ handle_remove_break(WASMGDBServer *server, char *payload)
if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length)) if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length))
!= 3) { != 3) {
LOG_ERROR("Unsupported number of remove break arguments %d", arg_c); LOG_ERROR("Unsupported number of remove break arguments %d", arg_c);
send_reply(server, ""); write_packet(server, "");
return; return;
} }
@ -854,7 +835,6 @@ handle_malloc(WASMGDBServer *server, char *payload)
} }
else { else {
LOG_ERROR("Payload parse error during handle malloc"); LOG_ERROR("Payload parse error during handle malloc");
send_reply(server, "");
return; return;
} }

View File

@ -4,9 +4,4 @@
*/ */
#define WASM_ENABLE_WASI_EPHEMERAL_NN 1 #define WASM_ENABLE_WASI_EPHEMERAL_NN 1
#define WASI_NN_NAME(name) wasi_ephemeral_nn_##name
#include "wasi_nn.h" #include "wasi_nn.h"
#undef WASM_ENABLE_WASI_EPHEMERAL_NN
#undef WASI_NN_NAME

View File

@ -21,7 +21,6 @@
#else #else
#define WASI_NN_IMPORT(name) \ #define WASI_NN_IMPORT(name) \
__attribute__((import_module("wasi_nn"), import_name(name))) __attribute__((import_module("wasi_nn"), import_name(name)))
#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.)
#endif #endif
/** /**
@ -35,22 +34,17 @@
* @return wasi_nn_error Execution status. * @return wasi_nn_error Execution status.
*/ */
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
WASI_NN_ERROR_TYPE wasi_nn_error
WASI_NN_NAME(load) load(graph_builder *builder, uint32_t builder_len, graph_encoding encoding,
(WASI_NN_NAME(graph_builder) * builder, uint32_t builder_len, execution_target target, graph *g) WASI_NN_IMPORT("load");
WASI_NN_NAME(graph_encoding) encoding, WASI_NN_NAME(execution_target) target,
WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load");
#else #else
WASI_NN_ERROR_TYPE wasi_nn_error
WASI_NN_NAME(load) load(graph_builder_array *builder, graph_encoding encoding,
(WASI_NN_NAME(graph_builder_array) * builder, execution_target target, graph *g) WASI_NN_IMPORT("load");
WASI_NN_NAME(graph_encoding) encoding, WASI_NN_NAME(execution_target) target,
WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load");
#endif #endif
WASI_NN_ERROR_TYPE wasi_nn_error
WASI_NN_NAME(load_by_name) load_by_name(const char *name, uint32_t name_len, graph *g)
(const char *name, uint32_t name_len, WASI_NN_NAME(graph) * g)
WASI_NN_IMPORT("load_by_name"); WASI_NN_IMPORT("load_by_name");
/** /**
@ -65,9 +59,8 @@ WASI_NN_NAME(load_by_name)
* @param ctx Execution context. * @param ctx Execution context.
* @return wasi_nn_error Execution status. * @return wasi_nn_error Execution status.
*/ */
WASI_NN_ERROR_TYPE wasi_nn_error
WASI_NN_NAME(init_execution_context) init_execution_context(graph g, graph_execution_context *ctx)
(WASI_NN_NAME(graph) g, WASI_NN_NAME(graph_execution_context) * ctx)
WASI_NN_IMPORT("init_execution_context"); WASI_NN_IMPORT("init_execution_context");
/** /**
@ -78,10 +71,9 @@ WASI_NN_NAME(init_execution_context)
* @param tensor Input tensor. * @param tensor Input tensor.
* @return wasi_nn_error Execution status. * @return wasi_nn_error Execution status.
*/ */
WASI_NN_ERROR_TYPE wasi_nn_error
WASI_NN_NAME(set_input) set_input(graph_execution_context ctx, uint32_t index, tensor *tensor)
(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index, WASI_NN_IMPORT("set_input");
WASI_NN_NAME(tensor) * tensor) WASI_NN_IMPORT("set_input");
/** /**
* @brief Compute the inference on the given inputs. * @brief Compute the inference on the given inputs.
@ -89,9 +81,8 @@ WASI_NN_NAME(set_input)
* @param ctx Execution context. * @param ctx Execution context.
* @return wasi_nn_error Execution status. * @return wasi_nn_error Execution status.
*/ */
WASI_NN_ERROR_TYPE wasi_nn_error
WASI_NN_NAME(compute) compute(graph_execution_context ctx) WASI_NN_IMPORT("compute");
(WASI_NN_NAME(graph_execution_context) ctx) WASI_NN_IMPORT("compute");
/** /**
* @brief Extract the outputs after inference. * @brief Extract the outputs after inference.
@ -106,16 +97,15 @@ WASI_NN_NAME(compute)
* @return wasi_nn_error Execution status. * @return wasi_nn_error Execution status.
*/ */
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
WASI_NN_ERROR_TYPE wasi_nn_error
WASI_NN_NAME(get_output) get_output(graph_execution_context ctx, uint32_t index,
(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index, tensor_data output_tensor, uint32_t output_tensor_max_size,
uint8_t *output_tensor, uint32_t output_tensor_max_size, uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output");
uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output");
#else #else
WASI_NN_ERROR_TYPE wasi_nn_error
WASI_NN_NAME(get_output) get_output(graph_execution_context ctx, uint32_t index,
(graph_execution_context ctx, uint32_t index, uint8_t *output_tensor, tensor_data output_tensor, uint32_t *output_tensor_size)
uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output"); WASI_NN_IMPORT("get_output");
#endif #endif
#endif #endif

View File

@ -13,23 +13,6 @@
extern "C" { extern "C" {
#endif #endif
/* our host logic doesn't use any prefix. neither legacy wasi_nn.h does. */
#if !defined(__wasm__) || !defined(WASI_NN_NAME)
#define WASI_NN_NAME(name) name
#define WASI_NN_ERROR_NAME(name) name
#define WASI_NN_TYPE_NAME(name) name
#define WASI_NN_ENCODING_NAME(name) name
#define WASI_NN_TARGET_NAME(name) name
#define WASI_NN_ERROR_TYPE wasi_nn_error
#else
#define WASI_NN_ERROR_NAME(name) WASI_NN_NAME(error_##name)
#define WASI_NN_TYPE_NAME(name) WASI_NN_NAME(type_##name)
#define WASI_NN_ENCODING_NAME(name) WASI_NN_NAME(encoding_##name)
#define WASI_NN_TARGET_NAME(name) WASI_NN_NAME(target_##name)
#define WASI_NN_ERROR_TYPE WASI_NN_NAME(error);
#endif
/** /**
* ERRORS * ERRORS
* *
@ -39,22 +22,22 @@ extern "C" {
// https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L5-L17 // https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L5-L17
// Error codes returned by functions in this API. // Error codes returned by functions in this API.
typedef enum { typedef enum {
WASI_NN_ERROR_NAME(success) = 0, success = 0,
WASI_NN_ERROR_NAME(invalid_argument), invalid_argument,
WASI_NN_ERROR_NAME(invalid_encoding), invalid_encoding,
WASI_NN_ERROR_NAME(missing_memory), missing_memory,
WASI_NN_ERROR_NAME(busy), busy,
WASI_NN_ERROR_NAME(runtime_error), runtime_error,
WASI_NN_ERROR_NAME(unsupported_operation), unsupported_operation,
WASI_NN_ERROR_NAME(too_large), too_large,
WASI_NN_ERROR_NAME(not_found), not_found,
// for WasmEdge-wasi-nn // for WasmEdge-wasi-nn
WASI_NN_ERROR_NAME(end_of_sequence) = 100, // End of Sequence Found. end_of_sequence = 100, // End of Sequence Found.
WASI_NN_ERROR_NAME(context_full) = 101, // Context Full. context_full = 101, // Context Full.
WASI_NN_ERROR_NAME(prompt_tool_long) = 102, // Prompt Too Long. prompt_tool_long = 102, // Prompt Too Long.
WASI_NN_ERROR_NAME(model_not_found) = 103, // Model Not Found. model_not_found = 103, // Model Not Found.
} WASI_NN_ERROR_TYPE; } wasi_nn_error;
/** /**
* TENSOR * TENSOR
@ -68,27 +51,15 @@ typedef enum {
typedef struct { typedef struct {
uint32_t *buf; uint32_t *buf;
uint32_t size; uint32_t size;
} WASI_NN_NAME(tensor_dimensions); } tensor_dimensions;
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
// sync up with // sync up with
// https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L19-L28 // https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L19-L28
// The type of the elements in a tensor. // The type of the elements in a tensor.
typedef enum { typedef enum { fp16 = 0, fp32, fp64, u8, i32, i64 } tensor_type;
WASI_NN_TYPE_NAME(fp16) = 0,
WASI_NN_TYPE_NAME(fp32),
WASI_NN_TYPE_NAME(fp64),
WASI_NN_TYPE_NAME(u8),
WASI_NN_TYPE_NAME(i32),
WASI_NN_TYPE_NAME(i64),
} WASI_NN_NAME(tensor_type);
#else #else
typedef enum { typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type;
WASI_NN_TYPE_NAME(fp16) = 0,
WASI_NN_TYPE_NAME(fp32),
WASI_NN_TYPE_NAME(up8),
WASI_NN_TYPE_NAME(ip32),
} WASI_NN_NAME(tensor_type);
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ #endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
// The tensor data. // The tensor data.
@ -99,14 +70,7 @@ typedef enum {
// 4-byte f32 elements would have a data array of length 16). Naturally, this // 4-byte f32 elements would have a data array of length 16). Naturally, this
// representation requires some knowledge of how to lay out data in // representation requires some knowledge of how to lay out data in
// memory--e.g., using row-major ordering--and could perhaps be improved. // memory--e.g., using row-major ordering--and could perhaps be improved.
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 && defined(__wasm__) typedef uint8_t *tensor_data;
typedef struct {
uint8_t *buf;
uint32_t size;
} WASI_NN_NAME(tensor_data);
#else
typedef uint8_t *WASI_NN_NAME(tensor_data);
#endif
// A tensor. // A tensor.
typedef struct { typedef struct {
@ -114,16 +78,16 @@ typedef struct {
// represent a tensor containing a single value, use `[1]` for the tensor // represent a tensor containing a single value, use `[1]` for the tensor
// dimensions. // dimensions.
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 && defined(__wasm__) #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 && defined(__wasm__)
WASI_NN_NAME(tensor_dimensions) dimensions; tensor_dimensions dimensions;
#else #else
WASI_NN_NAME(tensor_dimensions) * dimensions; tensor_dimensions *dimensions;
#endif #endif
// Describe the type of element in the tensor (e.g., f32). // Describe the type of element in the tensor (e.g., f32).
uint8_t type; uint8_t type;
uint8_t _pad[3]; uint8_t _pad[3];
// Contains the tensor data. // Contains the tensor data.
WASI_NN_NAME(tensor_data) data; tensor_data data;
} WASI_NN_NAME(tensor); } tensor;
/** /**
* GRAPH * GRAPH
@ -138,15 +102,15 @@ typedef struct {
typedef struct { typedef struct {
uint8_t *buf; uint8_t *buf;
uint32_t size; uint32_t size;
} WASI_NN_NAME(graph_builder); } graph_builder;
typedef struct { typedef struct {
WASI_NN_NAME(graph_builder) * buf; graph_builder *buf;
uint32_t size; uint32_t size;
} WASI_NN_NAME(graph_builder_array); } graph_builder_array;
// An execution graph for performing inference (i.e., a model). // An execution graph for performing inference (i.e., a model).
typedef uint32_t WASI_NN_NAME(graph); typedef uint32_t graph;
// sync up with // sync up with
// https://github.com/WebAssembly/wasi-nn/blob/main/wit/wasi-nn.wit#L75 // https://github.com/WebAssembly/wasi-nn/blob/main/wit/wasi-nn.wit#L75
@ -154,25 +118,21 @@ typedef uint32_t WASI_NN_NAME(graph);
// various backends that encode (i.e., serialize) their graph IR with different // various backends that encode (i.e., serialize) their graph IR with different
// formats. // formats.
typedef enum { typedef enum {
WASI_NN_ENCODING_NAME(openvino) = 0, openvino = 0,
WASI_NN_ENCODING_NAME(onnx), onnx,
WASI_NN_ENCODING_NAME(tensorflow), tensorflow,
WASI_NN_ENCODING_NAME(pytorch), pytorch,
WASI_NN_ENCODING_NAME(tensorflowlite), tensorflowlite,
WASI_NN_ENCODING_NAME(ggml), ggml,
WASI_NN_ENCODING_NAME(autodetect), autodetect,
WASI_NN_ENCODING_NAME(unknown_backend), unknown_backend,
} WASI_NN_NAME(graph_encoding); } graph_encoding;
// Define where the graph should be executed. // Define where the graph should be executed.
typedef enum WASI_NN_NAME(execution_target) { typedef enum execution_target { cpu = 0, gpu, tpu } execution_target;
WASI_NN_TARGET_NAME(cpu) = 0,
WASI_NN_TARGET_NAME(gpu),
WASI_NN_TARGET_NAME(tpu),
} WASI_NN_NAME(execution_target);
// Bind a `graph` to the input and output tensors for an inference. // Bind a `graph` to the input and output tensors for an inference.
typedef uint32_t WASI_NN_NAME(graph_execution_context); typedef uint32_t graph_execution_context;
#ifdef __cplusplus #ifdef __cplusplus
} }

View File

@ -20,10 +20,6 @@
#include "wasi_nn_types.h" #include "wasi_nn_types.h"
#include "wasm_export.h" #include "wasm_export.h"
#if WASM_ENABLE_WASI_EPHEMERAL_NN == 0
#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.)
#endif
#define HASHMAP_INITIAL_SIZE 20 #define HASHMAP_INITIAL_SIZE 20
#if defined(__APPLE__) #if defined(__APPLE__)
#define LIB_EXTENTION ".dylib" #define LIB_EXTENTION ".dylib"
@ -55,36 +51,65 @@ struct backends_api_functions {
NN_ERR_PRINTF("Error %s() -> %d", #func, wasi_error); \ NN_ERR_PRINTF("Error %s() -> %d", #func, wasi_error); \
} while (0) } while (0)
static void *wasi_nn_key; /* HashMap utils */
static HashMap *hashmap;
static uint32
hash_func(const void *key)
{
// fnv1a_hash
const uint32 FNV_PRIME = 16777619;
const uint32 FNV_OFFSET_BASIS = 2166136261U;
uint32 hash = FNV_OFFSET_BASIS;
const unsigned char *bytes = (const unsigned char *)key;
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
hash ^= bytes[i];
hash *= FNV_PRIME;
}
return hash;
}
static bool
key_equal_func(void *key1, void *key2)
{
return key1 == key2;
}
static void
key_destroy_func(void *key1)
{
/* key type is wasm_module_inst_t*. do nothing */
}
static void static void
wasi_nn_ctx_destroy(WASINNContext *wasi_nn_ctx) wasi_nn_ctx_destroy(WASINNContext *wasi_nn_ctx)
{ {
NN_DBG_PRINTF("[WASI NN] DEINIT...");
if (wasi_nn_ctx == NULL) { if (wasi_nn_ctx == NULL) {
NN_ERR_PRINTF(
"Error when deallocating memory. WASI-NN context is NULL");
return; return;
} }
NN_DBG_PRINTF("[WASI NN] DEINIT...");
NN_DBG_PRINTF("Freeing wasi-nn"); NN_DBG_PRINTF("Freeing wasi-nn");
NN_DBG_PRINTF("-> is_model_loaded: %d", wasi_nn_ctx->is_model_loaded); NN_DBG_PRINTF("-> is_model_loaded: %d", wasi_nn_ctx->is_model_loaded);
NN_DBG_PRINTF("-> current_encoding: %d", wasi_nn_ctx->backend); NN_DBG_PRINTF("-> current_encoding: %d", wasi_nn_ctx->backend);
bh_assert(!wasi_nn_ctx->busy);
/* deinit() the backend */ /* deinit() the backend */
if (wasi_nn_ctx->is_backend_ctx_initialized) { wasi_nn_error res;
wasi_nn_error res; call_wasi_nn_func(wasi_nn_ctx->backend, deinit, res,
call_wasi_nn_func(wasi_nn_ctx->backend, deinit, res, wasi_nn_ctx->backend_ctx);
wasi_nn_ctx->backend_ctx);
}
os_mutex_destroy(&wasi_nn_ctx->lock);
wasm_runtime_free(wasi_nn_ctx); wasm_runtime_free(wasi_nn_ctx);
} }
static void static void
dtor(wasm_module_inst_t inst, void *ctx) value_destroy_func(void *value)
{ {
wasi_nn_ctx_destroy(ctx); wasi_nn_ctx_destroy((WASINNContext *)value);
} }
bool bool
@ -97,9 +122,12 @@ wasi_nn_initialize()
return false; return false;
} }
wasi_nn_key = wasm_runtime_create_context_key(dtor); // hashmap { instance: wasi_nn_ctx }
if (wasi_nn_key == NULL) { hashmap = bh_hash_map_create(HASHMAP_INITIAL_SIZE, true, hash_func,
NN_ERR_PRINTF("Failed to create context key"); key_equal_func, key_destroy_func,
value_destroy_func);
if (hashmap == NULL) {
NN_ERR_PRINTF("Error while initializing hashmap");
os_mutex_destroy(&wasi_nn_lock); os_mutex_destroy(&wasi_nn_lock);
return false; return false;
} }
@ -120,11 +148,6 @@ wasi_nn_initialize_context()
} }
memset(wasi_nn_ctx, 0, sizeof(WASINNContext)); memset(wasi_nn_ctx, 0, sizeof(WASINNContext));
if (os_mutex_init(&wasi_nn_ctx->lock)) {
NN_ERR_PRINTF("Error when initializing a lock for WASI-NN context");
wasm_runtime_free(wasi_nn_ctx);
return NULL;
}
return wasi_nn_ctx; return wasi_nn_ctx;
} }
@ -133,59 +156,29 @@ static WASINNContext *
wasm_runtime_get_wasi_nn_ctx(wasm_module_inst_t instance) wasm_runtime_get_wasi_nn_ctx(wasm_module_inst_t instance)
{ {
WASINNContext *wasi_nn_ctx = WASINNContext *wasi_nn_ctx =
wasm_runtime_get_context(instance, wasi_nn_key); (WASINNContext *)bh_hash_map_find(hashmap, (void *)instance);
if (wasi_nn_ctx == NULL) { if (wasi_nn_ctx == NULL) {
WASINNContext *newctx = wasi_nn_initialize_context(); wasi_nn_ctx = wasi_nn_initialize_context();
if (newctx == NULL) if (wasi_nn_ctx == NULL)
return NULL;
bool ok =
bh_hash_map_insert(hashmap, (void *)instance, (void *)wasi_nn_ctx);
if (!ok) {
NN_ERR_PRINTF("Error while storing context");
wasi_nn_ctx_destroy(wasi_nn_ctx);
return NULL; return NULL;
os_mutex_lock(&wasi_nn_lock);
wasi_nn_ctx = wasm_runtime_get_context(instance, wasi_nn_key);
if (wasi_nn_ctx == NULL) {
wasm_runtime_set_context_spread(instance, wasi_nn_key, newctx);
wasi_nn_ctx = newctx;
newctx = NULL;
}
os_mutex_unlock(&wasi_nn_lock);
if (newctx != NULL) {
wasi_nn_ctx_destroy(newctx);
} }
} }
return wasi_nn_ctx;
}
static WASINNContext *
lock_ctx(wasm_module_inst_t instance)
{
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
if (wasi_nn_ctx == NULL) {
return NULL;
}
os_mutex_lock(&wasi_nn_ctx->lock);
if (wasi_nn_ctx->busy) {
os_mutex_unlock(&wasi_nn_ctx->lock);
return NULL;
}
wasi_nn_ctx->busy = true;
os_mutex_unlock(&wasi_nn_ctx->lock);
return wasi_nn_ctx; return wasi_nn_ctx;
} }
static void
unlock_ctx(WASINNContext *wasi_nn_ctx)
{
if (wasi_nn_ctx == NULL) {
return;
}
os_mutex_lock(&wasi_nn_ctx->lock);
bh_assert(wasi_nn_ctx->busy);
wasi_nn_ctx->busy = false;
os_mutex_unlock(&wasi_nn_ctx->lock);
}
void void
wasi_nn_destroy() wasi_nn_destroy()
{ {
wasm_runtime_destroy_context_key(wasi_nn_key); // destroy hashmap will destroy keys and values
bh_hash_map_destroy(hashmap);
// close backends' libraries and registered functions // close backends' libraries and registered functions
for (unsigned i = 0; i < sizeof(lookup) / sizeof(lookup[0]); i++) { for (unsigned i = 0; i < sizeof(lookup) / sizeof(lookup[0]); i++) {
@ -404,41 +397,6 @@ detect_and_load_backend(graph_encoding backend_hint,
return ret; return ret;
} }
static wasi_nn_error
ensure_backend(wasm_module_inst_t instance, graph_encoding encoding,
WASINNContext *wasi_nn_ctx)
{
wasi_nn_error res;
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(encoding, &loaded_backend)) {
res = invalid_encoding;
NN_ERR_PRINTF("load backend failed");
goto fail;
}
if (wasi_nn_ctx->is_backend_ctx_initialized) {
if (wasi_nn_ctx->backend != loaded_backend) {
res = unsupported_operation;
goto fail;
}
}
else {
wasi_nn_ctx->backend = loaded_backend;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
wasi_nn_ctx->is_backend_ctx_initialized = true;
}
return success;
fail:
return res;
}
/* WASI-NN implementation */ /* WASI-NN implementation */
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
@ -452,8 +410,6 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
graph_encoding encoding, execution_target target, graph *g) graph_encoding encoding, execution_target target, graph *g)
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ #endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
{ {
wasi_nn_error res;
NN_DBG_PRINTF("[WASI NN] LOAD [encoding=%d, target=%d]...", encoding, NN_DBG_PRINTF("[WASI NN] LOAD [encoding=%d, target=%d]...", encoding,
target); target);
@ -461,23 +417,18 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
if (!instance) if (!instance)
return runtime_error; return runtime_error;
WASINNContext *wasi_nn_ctx = lock_ctx(instance); wasi_nn_error res;
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
graph_builder_array builder_native = { 0 }; graph_builder_array builder_native = { 0 };
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (success if (success
!= (res = graph_builder_array_app_native( != (res = graph_builder_array_app_native(
instance, builder, builder_wasm_size, &builder_native))) instance, builder, builder_wasm_size, &builder_native)))
goto fail; return res;
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */ #else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
if (success if (success
!= (res = graph_builder_array_app_native(instance, builder, != (res = graph_builder_array_app_native(instance, builder,
&builder_native))) &builder_native)))
goto fail; return res;
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ #endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
if (!wasm_runtime_validate_native_addr(instance, g, if (!wasm_runtime_validate_native_addr(instance, g,
@ -487,7 +438,19 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
goto fail; goto fail;
} }
res = ensure_backend(instance, encoding, wasi_nn_ctx); graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(encoding, &loaded_backend)) {
res = invalid_encoding;
NN_ERR_PRINTF("load backend failed");
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success) if (res != success)
goto fail; goto fail;
@ -502,7 +465,6 @@ fail:
// XXX: Free intermediate structure pointers // XXX: Free intermediate structure pointers
if (builder_native.buf) if (builder_native.buf)
wasm_runtime_free(builder_native.buf); wasm_runtime_free(builder_native.buf);
unlock_ctx(wasi_nn_ctx);
return res; return res;
} }
@ -511,8 +473,6 @@ wasi_nn_error
wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len, wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len,
graph *g) graph *g)
{ {
wasi_nn_error res;
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env); wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
if (!instance) { if (!instance) {
return runtime_error; return runtime_error;
@ -536,26 +496,29 @@ wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len,
NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME %s...", name); NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME %s...", name);
WASINNContext *wasi_nn_ctx = lock_ctx(instance); graph_encoding loaded_backend = autodetect;
if (wasi_nn_ctx == NULL) { if (!detect_and_load_backend(autodetect, &loaded_backend)) {
res = busy; NN_ERR_PRINTF("load backend failed");
goto fail; return invalid_encoding;
} }
res = ensure_backend(instance, autodetect, wasi_nn_ctx); WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
wasi_nn_error res;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success) if (res != success)
goto fail; return res;
call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name, res, call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name, res,
wasi_nn_ctx->backend_ctx, name, name_len, g); wasi_nn_ctx->backend_ctx, name, name_len, g);
if (res != success) if (res != success)
goto fail; return res;
wasi_nn_ctx->is_model_loaded = true; wasi_nn_ctx->is_model_loaded = true;
res = success; return success;
fail:
unlock_ctx(wasi_nn_ctx);
return res;
} }
wasi_nn_error wasi_nn_error
@ -563,8 +526,6 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name,
int32_t name_len, char *config, int32_t name_len, char *config,
int32_t config_len, graph *g) int32_t config_len, graph *g)
{ {
wasi_nn_error res;
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env); wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
if (!instance) { if (!instance) {
return runtime_error; return runtime_error;
@ -593,28 +554,30 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name,
NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME_WITH_CONFIG %s %s...", name, config); NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME_WITH_CONFIG %s %s...", name, config);
WASINNContext *wasi_nn_ctx = lock_ctx(instance); graph_encoding loaded_backend = autodetect;
if (wasi_nn_ctx == NULL) { if (!detect_and_load_backend(autodetect, &loaded_backend)) {
res = busy; NN_ERR_PRINTF("load backend failed");
goto fail; return invalid_encoding;
} }
res = ensure_backend(instance, autodetect, wasi_nn_ctx); WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
wasi_nn_error res;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success) if (res != success)
goto fail; return res;
;
call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name_with_config, res, call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name_with_config, res,
wasi_nn_ctx->backend_ctx, name, name_len, config, wasi_nn_ctx->backend_ctx, name, name_len, config,
config_len, g); config_len, g);
if (res != success) if (res != success)
goto fail; return res;
wasi_nn_ctx->is_model_loaded = true; wasi_nn_ctx->is_model_loaded = true;
res = success; return success;
fail:
unlock_ctx(wasi_nn_ctx);
return res;
} }
wasi_nn_error wasi_nn_error
@ -628,27 +591,20 @@ wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph g,
return runtime_error; return runtime_error;
} }
wasi_nn_error res; WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx))) if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail; return res;
if (!wasm_runtime_validate_native_addr( if (!wasm_runtime_validate_native_addr(
instance, ctx, (uint64)sizeof(graph_execution_context))) { instance, ctx, (uint64)sizeof(graph_execution_context))) {
NN_ERR_PRINTF("ctx is invalid"); NN_ERR_PRINTF("ctx is invalid");
res = invalid_argument; return invalid_argument;
goto fail;
} }
call_wasi_nn_func(wasi_nn_ctx->backend, init_execution_context, res, call_wasi_nn_func(wasi_nn_ctx->backend, init_execution_context, res,
wasi_nn_ctx->backend_ctx, g, ctx); wasi_nn_ctx->backend_ctx, g, ctx);
fail:
unlock_ctx(wasi_nn_ctx);
return res; return res;
} }
@ -663,21 +619,17 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
return runtime_error; return runtime_error;
} }
wasi_nn_error res; WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx))) if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail; return res;
tensor input_tensor_native = { 0 }; tensor input_tensor_native = { 0 };
if (success if (success
!= (res = tensor_app_native(instance, input_tensor, != (res = tensor_app_native(instance, input_tensor,
&input_tensor_native))) &input_tensor_native)))
goto fail; return res;
call_wasi_nn_func(wasi_nn_ctx->backend, set_input, res, call_wasi_nn_func(wasi_nn_ctx->backend, set_input, res,
wasi_nn_ctx->backend_ctx, ctx, index, wasi_nn_ctx->backend_ctx, ctx, index,
@ -685,8 +637,7 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
// XXX: Free intermediate structure pointers // XXX: Free intermediate structure pointers
if (input_tensor_native.dimensions) if (input_tensor_native.dimensions)
wasm_runtime_free(input_tensor_native.dimensions); wasm_runtime_free(input_tensor_native.dimensions);
fail:
unlock_ctx(wasi_nn_ctx);
return res; return res;
} }
@ -700,20 +651,14 @@ wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx)
return runtime_error; return runtime_error;
} }
wasi_nn_error res; WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx))) if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail; return res;
call_wasi_nn_func(wasi_nn_ctx->backend, compute, res, call_wasi_nn_func(wasi_nn_ctx->backend, compute, res,
wasi_nn_ctx->backend_ctx, ctx); wasi_nn_ctx->backend_ctx, ctx);
fail:
unlock_ctx(wasi_nn_ctx);
return res; return res;
} }
@ -736,21 +681,16 @@ wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
return runtime_error; return runtime_error;
} }
wasi_nn_error res; WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx))) if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail; return res;
if (!wasm_runtime_validate_native_addr(instance, output_tensor_size, if (!wasm_runtime_validate_native_addr(instance, output_tensor_size,
(uint64)sizeof(uint32_t))) { (uint64)sizeof(uint32_t))) {
NN_ERR_PRINTF("output_tensor_size is invalid"); NN_ERR_PRINTF("output_tensor_size is invalid");
res = invalid_argument; return invalid_argument;
goto fail;
} }
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
@ -763,8 +703,6 @@ wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
wasi_nn_ctx->backend_ctx, ctx, index, output_tensor, wasi_nn_ctx->backend_ctx, ctx, index, output_tensor,
output_tensor_size); output_tensor_size);
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ #endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
fail:
unlock_ctx(wasi_nn_ctx);
return res; return res;
} }

View File

@ -26,25 +26,17 @@
* from 4. to 6. is the Inference Loop * from 4. to 6. is the Inference Loop
*/ */
/* these limits are arbitrary. */
#define MAX_GRAPHS 4
#define MAX_EXECUTION_CONTEXTS 4
typedef struct { typedef struct {
ov_core_t *core; ov_core_t *core;
/* keep input model files */ /* keep input model files */
struct OpenVINOGraph { void *weight_data;
void *weight_data; ov_tensor_t *weights_tensor;
ov_tensor_t *weights_tensor; ov_model_t *model;
ov_model_t *model; /* add prepostprocess */
ov_compiled_model_t *compiled_model; ov_model_t *new_model;
} graphs[MAX_GRAPHS]; ov_compiled_model_t *compiled_model;
struct OpenVINOExecutionContext { ov_infer_request_t *infer_request;
struct OpenVINOGraph *graph; ov_tensor_t *input_tensor;
ov_infer_request_t *infer_request;
} execution_contexts[MAX_EXECUTION_CONTEXTS];
unsigned int n_graphs;
unsigned int n_execution_contexts;
} OpenVINOContext; } OpenVINOContext;
/* /*
@ -189,29 +181,6 @@ wasi_nn_tensor_type_to_openvino_element_type(tensor_type wasi_nn_type)
return UNDEFINED; return UNDEFINED;
} }
static void
free_graph(struct OpenVINOGraph *graph)
{
if (graph->weight_data)
os_free(graph->weight_data);
if (graph->weights_tensor)
ov_tensor_free(graph->weights_tensor);
if (graph->model)
ov_model_free(graph->model);
if (graph->compiled_model)
ov_compiled_model_free(graph->compiled_model);
}
static void
free_execution_context(struct OpenVINOExecutionContext *c)
{
if (c->infer_request)
ov_infer_request_free(c->infer_request);
}
static wasi_nn_error static wasi_nn_error
uint32_array_to_int64_array(uint32_t array_size, uint32_t *src, int64_t **dst) uint32_array_to_int64_array(uint32_t array_size, uint32_t *src, int64_t **dst)
{ {
@ -231,8 +200,6 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g) execution_target target, graph *g)
{ {
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
unsigned int graph_idx;
wasi_nn_error ret = unsupported_operation; wasi_nn_error ret = unsupported_operation;
if (encoding != openvino) { if (encoding != openvino) {
@ -258,47 +225,39 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
graph_builder xml = builder->buf[0]; graph_builder xml = builder->buf[0];
graph_builder weight = builder->buf[1]; graph_builder weight = builder->buf[1];
graph_idx = ov_ctx->n_graphs; /* if xml is a String with a model in IR */
if (graph_idx >= MAX_GRAPHS) { if (!(xml.buf[xml.size] == '\0' && xml.buf[xml.size - 1] != '\0')) {
return runtime_error; NN_ERR_PRINTF("Invalid xml string.");
return invalid_argument;
} }
graph = &ov_ctx->graphs[graph_idx];
memset(graph, 0, sizeof(*graph));
/* transfer weight to an ov tensor */ /* transfer weight to an ov tensor */
{ {
graph->weight_data = os_malloc(weight.size); ov_ctx->weight_data = os_malloc(weight.size);
if (!graph->weight_data) if (!ov_ctx->weight_data)
goto fail; goto fail;
memcpy(graph->weight_data, weight.buf, weight.size); memcpy(ov_ctx->weight_data, weight.buf, weight.size);
ov_element_type_e type = U8; ov_element_type_e type = U8;
int64_t dims[1] = { weight.size }; int64_t dims[1] = { weight.size };
ov_shape_t shape = { 1, dims }; ov_shape_t shape = { 1, dims };
CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(type, shape, CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(type, shape,
graph->weight_data, ov_ctx->weight_data,
&graph->weights_tensor), &ov_ctx->weights_tensor),
ret); ret);
} }
/* load model from buffer */ /* load model from buffer */
CHECK_OV_STATUS(ov_core_read_model_from_memory_buffer( CHECK_OV_STATUS(ov_core_read_model_from_memory_buffer(
ov_ctx->core, (char *)xml.buf, xml.size, ov_ctx->core, (char *)xml.buf, xml.size,
graph->weights_tensor, &graph->model), ov_ctx->weights_tensor, &ov_ctx->model),
ret); ret);
#ifndef NDEBUG #ifndef NDEBUG
print_model_input_output_info(ov_ctx->model); print_model_input_output_info(ov_ctx->model);
#endif #endif
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0, ret = success;
&graph->compiled_model),
ret);
*g = graph_idx;
ov_ctx->n_graphs++;
return success;
fail: fail:
free_graph(graph);
return ret; return ret;
} }
@ -306,62 +265,20 @@ __attribute__((visibility("default"))) wasi_nn_error
load_by_name(void *ctx, const char *filename, uint32_t filename_len, graph *g) load_by_name(void *ctx, const char *filename, uint32_t filename_len, graph *g)
{ {
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
unsigned int graph_idx;
wasi_nn_error ret = unsupported_operation; wasi_nn_error ret = unsupported_operation;
graph_idx = ov_ctx->n_graphs;
if (graph_idx >= MAX_GRAPHS) {
return runtime_error;
}
graph = &ov_ctx->graphs[graph_idx];
memset(graph, 0, sizeof(*graph));
CHECK_OV_STATUS( CHECK_OV_STATUS(
ov_core_read_model(ov_ctx->core, filename, NULL, &graph->model), ret); ov_core_read_model(ov_ctx->core, filename, NULL, &ov_ctx->model), ret);
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0, ret = success;
&graph->compiled_model),
ret);
*g = graph_idx;
ov_ctx->n_graphs++;
return success;
fail: fail:
free_graph(graph);
return ret; return ret;
} }
__attribute__((visibility("default"))) wasi_nn_error __attribute__((visibility("default"))) wasi_nn_error
init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx) init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx)
{ {
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
struct OpenVINOExecutionContext *exec;
unsigned int exec_idx;
wasi_nn_error ret;
if (g >= ov_ctx->n_graphs)
return runtime_error;
graph = &ov_ctx->graphs[g];
exec_idx = ov_ctx->n_execution_contexts;
if (exec_idx >= MAX_EXECUTION_CONTEXTS)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_idx];
memset(exec, 0, sizeof(*exec));
exec->graph = graph;
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
graph->compiled_model, &exec->infer_request),
ret);
*exec_ctx = exec_idx;
ov_ctx->n_execution_contexts++;
return success; return success;
fail:
return ret;
} }
__attribute__((visibility("default"))) wasi_nn_error __attribute__((visibility("default"))) wasi_nn_error
@ -369,15 +286,19 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor *wasi_nn_tensor) tensor *wasi_nn_tensor)
{ {
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation; wasi_nn_error ret = unsupported_operation;
ov_shape_t input_shape = { 0 }; ov_shape_t input_shape = { 0 };
ov_tensor_t *input_tensor = NULL;
int64_t *ov_dims = NULL; int64_t *ov_dims = NULL;
if (exec_ctx >= ov_ctx->n_execution_contexts) ov_preprocess_prepostprocessor_t *ppp = NULL;
return runtime_error; ov_preprocess_input_info_t *input_info = NULL;
exec = &ov_ctx->execution_contexts[exec_ctx]; ov_preprocess_input_tensor_info_t *input_tensor_info = NULL;
ov_layout_t *input_layout = NULL;
ov_preprocess_preprocess_steps_t *input_process = NULL;
ov_preprocess_input_model_info_t *p_input_model = NULL;
ov_layout_t *model_layout = NULL;
ov_preprocess_output_info_t *output_info = NULL;
ov_preprocess_output_tensor_info_t *output_tensor_info = NULL;
/* wasi_nn_tensor -> ov_tensor */ /* wasi_nn_tensor -> ov_tensor */
{ {
@ -403,21 +324,95 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(input_type, input_shape, CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(input_type, input_shape,
wasi_nn_tensor->data, wasi_nn_tensor->data,
&input_tensor), &ov_ctx->input_tensor),
ret); ret);
} }
/* set preprocess based on wasi_nn_tensor */
{
CHECK_OV_STATUS(
ov_preprocess_prepostprocessor_create(ov_ctx->model, &ppp), ret);
/* reuse user' created tensor's info */
CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_input_info_by_index(
ppp, index, &input_info),
ret);
CHECK_OV_STATUS(ov_preprocess_input_info_get_tensor_info(
input_info, &input_tensor_info),
ret);
CHECK_OV_STATUS(ov_preprocess_input_tensor_info_set_from(
input_tensor_info, ov_ctx->input_tensor),
ret);
/* add RESIZE */
CHECK_OV_STATUS(ov_preprocess_input_info_get_preprocess_steps(
input_info, &input_process),
ret);
CHECK_OV_STATUS(
ov_preprocess_preprocess_steps_resize(input_process, RESIZE_LINEAR),
ret);
/* input model */
CHECK_OV_STATUS(
ov_preprocess_input_info_get_model_info(input_info, &p_input_model),
ret);
// TODO: what if not?
CHECK_OV_STATUS(ov_layout_create("NCHW", &model_layout), ret);
CHECK_OV_STATUS(ov_preprocess_input_model_info_set_layout(p_input_model,
model_layout),
ret);
/* output -> F32(possibility) */
CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_output_info_by_index(
ppp, index, &output_info),
ret);
CHECK_OV_STATUS(ov_preprocess_output_info_get_tensor_info(
output_info, &output_tensor_info),
ret);
CHECK_OV_STATUS(
ov_preprocess_output_set_element_type(output_tensor_info, F32),
ret);
CHECK_OV_STATUS(
ov_preprocess_prepostprocessor_build(ppp, &ov_ctx->new_model), ret);
}
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, ov_ctx->new_model,
"CPU", 0, &ov_ctx->compiled_model),
ret);
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
ov_ctx->compiled_model, &ov_ctx->infer_request),
ret);
/* install ov_tensor -> infer_request */ /* install ov_tensor -> infer_request */
CHECK_OV_STATUS(ov_infer_request_set_input_tensor_by_index( CHECK_OV_STATUS(ov_infer_request_set_input_tensor_by_index(
exec->infer_request, index, input_tensor), ov_ctx->infer_request, index, ov_ctx->input_tensor),
ret); ret);
ret = success; ret = success;
fail: fail:
if (ov_dims) if (ov_dims)
os_free(ov_dims); os_free(ov_dims);
if (input_tensor)
ov_tensor_free(input_tensor);
ov_shape_free(&input_shape); ov_shape_free(&input_shape);
if (ppp)
ov_preprocess_prepostprocessor_free(ppp);
if (input_info)
ov_preprocess_input_info_free(input_info);
if (input_tensor_info)
ov_preprocess_input_tensor_info_free(input_tensor_info);
if (input_layout)
ov_layout_free(input_layout);
if (input_process)
ov_preprocess_preprocess_steps_free(input_process);
if (p_input_model)
ov_preprocess_input_model_info_free(p_input_model);
if (model_layout)
ov_layout_free(model_layout);
if (output_info)
ov_preprocess_output_info_free(output_info);
if (output_tensor_info)
ov_preprocess_output_tensor_info_free(output_tensor_info);
return ret; return ret;
} }
@ -426,14 +421,9 @@ __attribute__((visibility("default"))) wasi_nn_error
compute(void *ctx, graph_execution_context exec_ctx) compute(void *ctx, graph_execution_context exec_ctx)
{ {
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation; wasi_nn_error ret = unsupported_operation;
if (exec_ctx >= ov_ctx->n_execution_contexts) CHECK_OV_STATUS(ov_infer_request_infer(ov_ctx->infer_request), ret);
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
CHECK_OV_STATUS(ov_infer_request_infer(exec->infer_request), ret);
ret = success; ret = success;
fail: fail:
return ret; return ret;
@ -444,27 +434,17 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size) tensor_data output_tensor, uint32_t *output_tensor_size)
{ {
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation; wasi_nn_error ret = unsupported_operation;
ov_tensor_t *ov_tensor = NULL; ov_tensor_t *ov_tensor = NULL;
void *data = NULL; void *data = NULL;
size_t byte_size = 0; size_t byte_size = 0;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
CHECK_OV_STATUS(ov_infer_request_get_output_tensor_by_index( CHECK_OV_STATUS(ov_infer_request_get_output_tensor_by_index(
exec->infer_request, index, &ov_tensor), ov_ctx->infer_request, index, &ov_tensor),
ret); ret);
CHECK_OV_STATUS(ov_tensor_get_byte_size(ov_tensor, &byte_size), ret); CHECK_OV_STATUS(ov_tensor_get_byte_size(ov_tensor, &byte_size), ret);
if (byte_size > *output_tensor_size) {
ret = too_large;
goto fail;
}
CHECK_OV_STATUS(ov_tensor_data(ov_tensor, &data), ret); CHECK_OV_STATUS(ov_tensor_data(ov_tensor, &data), ret);
memcpy(output_tensor, data, byte_size); memcpy(output_tensor, data, byte_size);
@ -521,16 +501,27 @@ __attribute__((visibility("default"))) wasi_nn_error
deinit_backend(void *ctx) deinit_backend(void *ctx)
{ {
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
unsigned int i;
if (!ov_ctx) if (!ov_ctx)
return invalid_argument; return invalid_argument;
for (i = 0; i < ov_ctx->n_execution_contexts; i++) if (ov_ctx->weight_data)
free_execution_context(&ov_ctx->execution_contexts[i]); os_free(ov_ctx->weight_data);
for (i = 0; i < ov_ctx->n_graphs; i++) if (ov_ctx->weights_tensor)
free_graph(&ov_ctx->graphs[i]); ov_tensor_free(ov_ctx->weights_tensor);
if (ov_ctx->input_tensor)
ov_tensor_free(ov_ctx->input_tensor);
if (ov_ctx->infer_request)
ov_infer_request_free(ov_ctx->infer_request);
if (ov_ctx->compiled_model)
ov_compiled_model_free(ov_ctx->compiled_model);
if (ov_ctx->model)
ov_model_free(ov_ctx->model);
if (ov_ctx->core) if (ov_ctx->core)
ov_core_free(ov_ctx->core); ov_core_free(ov_ctx->core);

View File

@ -9,12 +9,7 @@
#include "wasi_nn_types.h" #include "wasi_nn_types.h"
#include "wasm_export.h" #include "wasm_export.h"
#include "bh_platform.h"
typedef struct { typedef struct {
korp_mutex lock;
bool busy;
bool is_backend_ctx_initialized;
bool is_model_loaded; bool is_model_loaded;
graph_encoding backend; graph_encoding backend;
void *backend_ctx; void *backend_ctx;

View File

@ -281,11 +281,6 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
{ {
TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx; TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx;
if (input_tensor->type != fp32) {
NN_ERR_PRINTF("unsupported input tensor type %u", input_tensor->type);
return runtime_error;
}
wasi_nn_error res; wasi_nn_error res;
if (success != (res = is_valid_graph_execution_context(tfl_ctx, ctx))) if (success != (res = is_valid_graph_execution_context(tfl_ctx, ctx)))
return res; return res;
@ -389,34 +384,23 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
return too_large; return too_large;
} }
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
if (*output_tensor_size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
if (tensor->quantization.type == kTfLiteNoQuantization) { if (tensor->quantization.type == kTfLiteNoQuantization) {
NN_DBG_PRINTF("No quantization information"); NN_DBG_PRINTF("No quantization information");
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 float *ot =
if (*output_tensor_size < tensor->bytes) { tfl_ctx->interpreters[ctx].interpreter->typed_output_tensor<float>(
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index); index);
return too_large;
} int size = model_tensor_size * sizeof(float);
#else bh_memcpy_s(output_tensor, size, ot, size);
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
if (*output_tensor_size < tensor->bytes / sizeof(float)) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#endif
bh_memcpy_s(output_tensor, *output_tensor_size, tensor->data.data,
tensor->bytes);
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
*output_tensor_size = tensor->bytes;
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
*output_tensor_size = tensor->bytes / sizeof(float);
#endif
} }
else { // TODO: Assuming uint8 quantized networks. else { // TODO: Assuming uint8 quantized networks.
TfLiteAffineQuantization *quant_info = TfLiteAffineQuantization *quant_info =
@ -425,27 +409,6 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_ERR_PRINTF("Quantization per channel is not supported"); NN_ERR_PRINTF("Quantization per channel is not supported");
return runtime_error; return runtime_error;
} }
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (*output_tensor_size / sizeof(float) < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
if (*output_tensor_size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#endif
uint8_t *ot = tfl_ctx->interpreters[ctx] uint8_t *ot = tfl_ctx->interpreters[ctx]
.interpreter->typed_output_tensor<uint8_t>(index); .interpreter->typed_output_tensor<uint8_t>(index);
@ -458,18 +421,9 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
for (uint32_t i = 0; i < model_tensor_size; ++i) { for (uint32_t i = 0; i < model_tensor_size; ++i) {
output_tensor_f[i] = (ot[i] - zero_point) * scale; output_tensor_f[i] = (ot[i] - zero_point) * scale;
} }
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
*output_tensor_size = model_tensor_size * sizeof(float);
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
*output_tensor_size = model_tensor_size;
#endif
} }
*output_tensor_size = model_tensor_size;
return success; return success;
} }

View File

@ -3,17 +3,6 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved. # Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# on intel mac, this ends up with a lot of the following error.
#
# AttributeError: 'Sequential' object has no attribute '_get_save_spec'.
#
# * "pip install tensorflow" installs tensorflow 2.16.2 on intel mac.
# (because it's the last version before tf deprecated the target.)
# * keras 3 support in the version seems incomplete (thus the error)
# * a workaround: use keras 2 as mentioned in:
# https://github.com/tensorflow/tensorflow/releases/tag/v2.16.1
# https://blog.tensorflow.org/2024/03/whats-new-in-tensorflow-216.html
CURR_PATH=$(cd $(dirname $0) && pwd -P) CURR_PATH=$(cd $(dirname $0) && pwd -P)
# WASM application that uses WASI-NN # WASM application that uses WASI-NN

View File

@ -3,7 +3,7 @@
import tensorflow as tf import tensorflow as tf
import numpy as np import numpy as np
from tensorflow.keras.layers import AveragePooling2D, Conv2D from keras.layers import AveragePooling2D, Conv2D
from tensorflow.keras import Input, Model from tensorflow.keras import Input, Model

View File

@ -201,20 +201,10 @@ openat(int fd, const char *pathname, int flags, ...)
int ret; int ret;
char dir_path[DIR_PATH_LEN]; char dir_path[DIR_PATH_LEN];
char *full_path; char *full_path;
mode_t mode = 0;
bool has_mode = false;
if (flags & O_CREAT) {
va_list ap;
va_start(ap, flags);
mode = (mode_t)va_arg(ap, int);
va_end(ap);
has_mode = true;
}
ret = fcntl(fd, F_GETPATH, dir_path); ret = fcntl(fd, F_GETPATH, dir_path);
if (ret != 0) { if (ret != 0) {
errno = EINVAL; errno = -EINVAL;
return -1; return -1;
} }
@ -224,7 +214,7 @@ openat(int fd, const char *pathname, int flags, ...)
return -1; return -1;
} }
new_fd = has_mode ? open(full_path, flags, mode) : open(full_path, flags); new_fd = open(full_path, flags);
free(full_path); free(full_path);
return new_fd; return new_fd;

View File

@ -102,7 +102,6 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
### **Enable lib wasi-nn** ### **Enable lib wasi-nn**
- **WAMR_BUILD_WASI_NN**=1/0, default to disable if not set - **WAMR_BUILD_WASI_NN**=1/0, default to disable if not set
> Note: WAMR_BUILD_WASI_NN without WAMR_BUILD_WASI_EPHEMERAL_NN is deprecated and will likely be removed in future versions of WAMR. Please consider to enable WAMR_BUILD_WASI_EPHEMERAL_NN as well.
> Note: See [WASI-NN](../core/iwasm/libraries/wasi-nn) for more details. > Note: See [WASI-NN](../core/iwasm/libraries/wasi-nn) for more details.
### **Enable lib wasi-nn GPU mode** ### **Enable lib wasi-nn GPU mode**
@ -114,7 +113,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
- **WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH**=Path to the external delegate shared library (e.g. `libedgetpu.so.1.0` for Coral USB) - **WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH**=Path to the external delegate shared library (e.g. `libedgetpu.so.1.0` for Coral USB)
### **Enable lib wasi-nn with `wasi_ephemeral_nn` module support** ### **Enable lib wasi-nn with `wasi_ephemeral_nn` module support**
- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to enable if not set - **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to disable if not set
### **Disable boundary check with hardware trap** ### **Disable boundary check with hardware trap**
- **WAMR_DISABLE_HW_BOUND_CHECK**=1/0, default to enable if not set and supported by platform - **WAMR_DISABLE_HW_BOUND_CHECK**=1/0, default to enable if not set and supported by platform
@ -361,4 +360,4 @@ For Valgrind, begin with the following configurations and add additional ones as
-DWAMR_DISABLE_HW_BOUND_CHECK=0 \ -DWAMR_DISABLE_HW_BOUND_CHECK=0 \
-DWAMR_DISABLE_WRITE_GS_BASE=0 -DWAMR_DISABLE_WRITE_GS_BASE=0
#... #...
``` ```

View File

@ -6,7 +6,7 @@
### Pre-requisites ### Pre-requisites
#### Install requirements #### Install requirements
Before proceeding it is necessary to make sure your Python environment is correctly configured. To do this open a terminal session in this directory and perform the following: Before proceeding it is necessary to make sure your Python environment is correctly configured. To do ths open a terminal session in this directory and perfom the following:
```shell ```shell

View File

@ -353,12 +353,12 @@ writable and needs to be copied into a ctype array.
#### variable arguments #### variable arguments
A function with _variable arguments_ makes it hard to specify the required A function with _variable arugments_ makes it hard to specify the required
argument types for the function prototype. It leaves us one way to call it argument types for the function prototype. It leaves us one way to call it
directly without any arguments type checking. directly without any arguments type checking.
```python ```python
libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_double(3.14), "World!") libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_doulbe(3.14), "World!")
``` ```
#### Use `c_bool` to represent `wasm_mutability_t ` #### Use `c_bool` to represent `wasm_mutability_t `
@ -373,7 +373,7 @@ libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_double(3
### bindgen.py ### bindgen.py
`bindgen.py` is a tool to create WAMR python binding automatically. `binding.py` `bindge.py` is a tool to create WAMR python binding automatically. `binding.py`
is generated. We should avoid modification on it. Additional helpers should go is generated. We should avoid modification on it. Additional helpers should go
to `ffi.py`. to `ffi.py`.

View File

@ -114,12 +114,6 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}")
set (CMAKE_MACOSX_RPATH True) set (CMAKE_MACOSX_RPATH True)
# if enable wasi-nn, both wasi-nn-backends and iwasm
# need to use same WAMR (dynamic) libraries
if (WAMR_BUILD_WASI_NN EQUAL 1)
set (BUILD_SHARED_LIBS ON)
endif ()
set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..) set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake) include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake)

View File

@ -79,7 +79,7 @@ struct wamr_pal_create_process_args {
// Untrusted environment variable array pass to new process. // Untrusted environment variable array pass to new process.
// //
// The untrusted env vars to the command. And the last element of the array // The untrusted env vars to the command. And the last element of the array
// must be NULL to indicate the end of the array. // must be NULL to indicate the length of array.
// //
// Optional field. // Optional field.
const char **env; const char **env;

View File

@ -8,8 +8,6 @@ Refer to the `README.md` under each folder for how to build and run the benchmar
## Install `llvm-profdata` ## Install `llvm-profdata`
> PS: the `llvm-profdata` vesion needs to be the same major version with llvm libraries used to build wamrc.
The tool `llvm-profdata` is used when running the `test_pgo.sh` script under the benchmark folder. There are two ways to install it: The tool `llvm-profdata` is used when running the `test_pgo.sh` script under the benchmark folder. There are two ways to install it:
1. Refer to https://apt.llvm.org/, e.g. in Ubuntu 20.04, add lines below to /etc/apt/source.list 1. Refer to https://apt.llvm.org/, e.g. in Ubuntu 20.04, add lines below to /etc/apt/source.list
@ -20,22 +18,19 @@ deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal main
# 15 # 15
deb http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main deb http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main
deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main
# 18
deb http://apt.llvm.org/focal/ llvm-toolchain-focal-18 main
deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-18 main
``` ```
Then run `sudo apt update`, `sudo apt install llvm`. And after installing: Then run `sudo apt update`, `sudo apt install llvm`. And after installing:
```bash ```bash
cd /usr/bin cd /usr/bin
sudo ln -s llvm-profdata-18 llvm-profdata sudo ln -s llvm-profdata-15 llvm-profdata
``` ```
2. Build manually 2. Build manually
```bash ```bash
git clone --depth 1 --branch release/18.x https://github.com/llvm/llvm-project.git git clone --depth 1 --branch release/15.x https://github.com/llvm/llvm-project.git
cd llvm-project cd llvm-project
mkdir build && cd build mkdir build && cd build
cmake ../llvm \ cmake ../llvm \

View File

@ -2,7 +2,6 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved. # Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
PLATFORM=$(uname -s | tr A-Z a-z) PLATFORM=$(uname -s | tr A-Z a-z)

View File

@ -2,7 +2,6 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved. # Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
PLATFORM=$(uname -s | tr A-Z a-z) PLATFORM=$(uname -s | tr A-Z a-z)

View File

@ -60,7 +60,7 @@ class memory64_atomic_test_suite : public testing::TestWithParam<RunningMode>
return false; return false;
} }
void destroy_exec_env() void destory_exec_env()
{ {
wasm_runtime_destroy_exec_env(exec_env); wasm_runtime_destroy_exec_env(exec_env);
wasm_runtime_deinstantiate(module_inst); wasm_runtime_deinstantiate(module_inst);
@ -109,7 +109,7 @@ class memory64_atomic_test_suite : public testing::TestWithParam<RunningMode>
virtual void TearDown() virtual void TearDown()
{ {
if (cleanup) { if (cleanup) {
destroy_exec_env(); destory_exec_env();
wasm_runtime_destroy(); wasm_runtime_destroy();
cleanup = false; cleanup = false;
} }
@ -339,8 +339,8 @@ TEST_P(memory64_atomic_test_suite, atomic_opcodes_i64_rmw_cmpxchg)
PUT_I64_TO_ADDR(wasm_argv + 2, 0x100F0E0D0C0B0A09); PUT_I64_TO_ADDR(wasm_argv + 2, 0x100F0E0D0C0B0A09);
// new // new
PUT_I64_TO_ADDR(wasm_argv + 4, 0xdeadcafebeefdead); PUT_I64_TO_ADDR(wasm_argv + 4, 0xdeadcafebeefdead);
ASSERT_TRUE(wasm_runtime_call_wasm( ASSERT_TRUE(wasm_runtime_call_wasm(exec_env, func_map["i64_atomic_rmw_cmpxchg"],
exec_env, func_map["i64_atomic_rmw_cmpxchg"], 6, wasm_argv)); 6, wasm_argv));
i64 = 0x100F0E0D0C0B0A09; i64 = 0x100F0E0D0C0B0A09;
ASSERT_EQ(i64, GET_U64_FROM_ADDR(wasm_argv)); ASSERT_EQ(i64, GET_U64_FROM_ADDR(wasm_argv));

View File

@ -361,39 +361,39 @@ function sightglass_test()
function setup_wabt() function setup_wabt()
{ {
# please sync with .github/actions/install-wasi-sdk-wabt/action.yml WABT_VERSION=1.0.37
case ${PLATFORM} in
cosmopolitan)
;;
linux)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz
WABT_VERSION=1.0.37
;;
darwin)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz
WABT_VERSION=1.0.36
;;
windows)
WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-windows.tar.gz
WABT_VERSION=1.0.37
;;
*)
echo "wabt platform for ${PLATFORM} in unknown"
exit 1
;;
esac
if [ ${WABT_BINARY_RELEASE} == "YES" ]; then if [ ${WABT_BINARY_RELEASE} == "YES" ]; then
echo "download a binary release and install" echo "download a binary release and install"
local WAT2WASM=${WORK_DIR}/wabt/out/gcc/Release/wat2wasm local WAT2WASM=${WORK_DIR}/wabt/out/gcc/Release/wat2wasm
if [ ! -f ${WAT2WASM} ]; then if [ ! -f ${WAT2WASM} ]; then
pushd /tmp case ${PLATFORM} in
wget -O wabt-tar.gz --progress=dot:giga ${WABT_URL} cosmopolitan)
tar xf wabt-tar.gz ;;
popd linux)
WABT_PLATFORM=ubuntu-20.04
;;
darwin)
WABT_PLATFORM=macos-12
;;
windows)
WABT_PLATFORM=windows
;;
*)
echo "wabt platform for ${PLATFORM} in unknown"
exit 1
;;
esac
if [ ! -f /tmp/wabt-${WABT_VERSION}-${WABT_PLATFORM}.tar.gz ]; then
curl -L \
https://github.com/WebAssembly/wabt/releases/download/${WABT_VERSION}/wabt-${WABT_VERSION}-${WABT_PLATFORM}.tar.gz \
-o /tmp/wabt-${WABT_VERSION}-${WABT_PLATFORM}.tar.gz
fi
mkdir -p ${WORK_DIR}/wabt/out/gcc/Release/ cd /tmp \
cp /tmp/wabt-${WABT_VERSION}/bin/* ${WORK_DIR}/wabt/out/gcc/Release/ && tar zxf wabt-${WABT_VERSION}-${WABT_PLATFORM}.tar.gz \
&& mkdir -p ${WORK_DIR}/wabt/out/gcc/Release/ \
&& install wabt-${WABT_VERSION}/bin/* ${WORK_DIR}/wabt/out/gcc/Release/ \
&& cd -
fi fi
else else
echo "download source code and compile and install" echo "download source code and compile and install"
@ -478,9 +478,9 @@ function spec_test()
fi fi
# As of version 1.0.36, wabt is still unable to correctly handle the GC proposal. # As of version 1.0.36, wabt is still unable to correctly handle the GC proposal.
# #
# $ $ /opt/wabt-1.0.36/bin/wast2json --enable-all ../spec/test/core/br_if.wast # $ $ /opt/wabt-1.0.36/bin/wast2json --enable-all ../spec/test/core/br_if.wast
# #
# ../spec/test/core/br_if.wast:670:26: error: unexpected token "null", expected a numeric index or a name (e.g. 12 or $foo). # ../spec/test/core/br_if.wast:670:26: error: unexpected token "null", expected a numeric index or a name (e.g. 12 or $foo).
# (func $f (param (ref null $t)) (result funcref) (local.get 0)) # (func $f (param (ref null $t)) (result funcref) (local.get 0))
# #
@ -536,9 +536,6 @@ function spec_test()
popd popd
echo $(pwd) echo $(pwd)
#TODO: remove it when we can assume wabt is installed
# especially for CI Or there is installation script in the project
# that we can rely on
setup_wabt setup_wabt
ln -sf ${WORK_DIR}/../spec-test-script/all.py . ln -sf ${WORK_DIR}/../spec-test-script/all.py .
@ -625,8 +622,8 @@ function spec_test()
function wamr_compiler_test() function wamr_compiler_test()
{ {
if [[ $1 != "aot" ]]; then if [[ $1 != "aot" ]]; then
echo "WAMR compiler tests only support AOT mode, skip $1" echo "WAMR compiler tests only support AOT mode"
return 0 exit 1
fi fi
echo "Now start WAMR compiler tests" echo "Now start WAMR compiler tests"
@ -880,12 +877,51 @@ function do_execute_in_running_mode()
{ {
local RUNNING_MODE="$1" local RUNNING_MODE="$1"
# filter out uncompatible running mode based on targeting proposal features if [[ ${ENABLE_MULTI_MEMORY} -eq 1 ]]; then
# keep alpha order if [[ "${RUNNING_MODE}" != "classic-interp" \
&& "${RUNNING_MODE}" != "aot" ]]; then
echo "support multi-memory in classic-interp mode and aot mode"
return 0
fi
fi
if [[ ${ENABLE_EH} -eq 1 ]]; then if [[ ${ENABLE_MEMORY64} -eq 1 ]]; then
if [[ "${RUNNING_MODE}" != "classic-interp" ]]; then if [[ "${RUNNING_MODE}" != "classic-interp" \
echo "support exception handling in classic-interp" && "${RUNNING_MODE}" != "aot" ]]; then
echo "support memory64(wasm64) in classic-interp mode and aot mode"
return 0
fi
fi
if [[ ${ENABLE_MULTI_MODULE} -eq 1 ]]; then
if [[ "${RUNNING_MODE}" != "classic-interp" \
&& "${RUNNING_MODE}" != "fast-interp" \
&& "${RUNNING_MODE}" != "aot" ]]; then
echo "support multi-module in both interp modes"
return 0
fi
fi
if [[ ${SGX_OPT} == "--sgx" ]]; then
if [[ "${RUNNING_MODE}" != "classic-interp" \
&& "${RUNNING_MODE}" != "fast-interp" \
&& "${RUNNING_MODE}" != "aot" \
&& "${RUNNING_MODE}" != "fast-jit" ]]; then
echo "support sgx in both interp modes, fast-jit mode and aot mode"
return 0
fi
fi
if [[ ${ENABLE_SIMD} -eq 1 ]]; then
if [[ "${RUNNING_MODE}" != "jit" && "${RUNNING_MODE}" != "aot" && "${RUNNING_MODE}" != "fast-interp" ]]; then
echo "support simd in llvm-jit, aot and fast-interp mode"
return 0;
fi
fi
if [[ ${TARGET} == "X86_32" ]]; then
if [[ "${RUNNING_MODE}" == "jit" || "${RUNNING_MODE}" == "fast-jit" ]]; then
echo "both llvm-jit mode and fast-jit mode do not support X86_32 target"
return 0; return 0;
fi fi
fi fi
@ -900,67 +936,9 @@ function do_execute_in_running_mode()
fi fi
fi fi
if [[ ${ENABLE_MEMORY64} -eq 1 ]]; then if [[ ${ENABLE_EH} -eq 1 ]]; then
if [[ "${RUNNING_MODE}" != "classic-interp" \
&& "${RUNNING_MODE}" != "aot" ]]; then
echo "support memory64(wasm64) in classic-interp mode and aot mode"
return 0
fi
fi
if [[ ${ENABLE_MULTI_MEMORY} -eq 1 ]]; then
if [[ "${RUNNING_MODE}" != "classic-interp" ]]; then if [[ "${RUNNING_MODE}" != "classic-interp" ]]; then
echo "support multi-memory in classic-interp mode mode" echo "support exception handling in classic-interp"
return 0
fi
fi
if [[ ${ENABLE_MULTI_MODULE} -eq 1 ]]; then
if [[ "${RUNNING_MODE}" != "classic-interp" \
&& "${RUNNING_MODE}" != "fast-interp" \
&& "${RUNNING_MODE}" != "aot" ]]; then
echo "support multi-module in both interp modes"
return 0
fi
fi
if [[ ${ENABLE_SIMD} -eq 1 ]]; then
if [[ "${RUNNING_MODE}" != "jit" && "${RUNNING_MODE}" != "aot" && "${RUNNING_MODE}" != "fast-interp" ]]; then
echo "support simd in llvm-jit, aot and fast-interp mode"
return 0;
fi
fi
# filter out uncompatible running mode based on SGX support
if [[ ${SGX_OPT} == "--sgx" ]]; then
if [[ "${RUNNING_MODE}" != "classic-interp" \
&& "${RUNNING_MODE}" != "fast-interp" \
&& "${RUNNING_MODE}" != "aot" \
&& "${RUNNING_MODE}" != "fast-jit" ]]; then
echo "support sgx in both interp modes, fast-jit mode and aot mode"
return 0
fi
fi
# filter out uncompatible running mode based on architecture
if [[ ${TARGET} == "X86_32" ]]; then
if [[ "${RUNNING_MODE}" == "jit" || "${RUNNING_MODE}" == "fast-jit" || "${RUNNING_MODE}" == "multi-tier-jit" ]]; then
echo "both llvm-jit, fast-jit and multi-tier-jit mode do not support X86_32 target"
return 0;
fi
if [[ ${ENABLE_MEMORY64} -eq 1 ]]; then
echo "memory64 does not support X86_32 target"
return 0;
fi
if [[ ${ENABLE_MULTI_MEMORY} -eq 1 ]]; then
echo "multi-memory does not support X86_32 target"
return 0;
fi
if [[ ${ENABLE_SIMD} -eq 1 ]]; then
echo "simd does not support X86_32 target"
return 0; return 0;
fi fi
fi fi

View File

@ -6,4 +6,3 @@ cmake_minimum_required (VERSION 3.14)
project(wamr-wasi-extensions LANGUAGES C) project(wamr-wasi-extensions LANGUAGES C)
add_subdirectory(nn) add_subdirectory(nn)
add_subdirectory(socket)

View File

@ -1,12 +0,0 @@
# Copyright (C) 2025 Midokura Japan KK. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
cmake_minimum_required(VERSION 3.14)
set(CMAKE_C_STANDARD 99)
set(CMAKE_C_STANDARD_REQUIRED YES)
project(nn-cli LANGUAGES C)
add_executable(nn-cli main.c fileio.c map.c)
find_package(wamr-wasi-nn REQUIRED)
target_link_libraries(nn-cli wamr-wasi-nn)

View File

@ -1,73 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/*
* modified copy-and-paste from:
* https://github.com/yamt/toywasm/blob/0eaad8cacd0cc7692946ff19b25994f106113be8/lib/fileio.c
*/
#include <sys/stat.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <unistd.h>
#include "fileio.h"
int
map_file(const char *path, void **pp, size_t *sizep)
{
void *p;
size_t size;
ssize_t ssz;
int fd;
int ret;
fd = open(path, O_RDONLY);
if (fd == -1) {
ret = errno;
assert(ret != 0);
return ret;
}
struct stat st;
ret = fstat(fd, &st);
if (ret == -1) {
ret = errno;
assert(ret != 0);
close(fd);
return ret;
}
size = st.st_size;
if (size > 0) {
p = malloc(size);
}
else {
/* Avoid a confusing error */
p = malloc(1);
}
if (p == NULL) {
close(fd);
return ENOMEM;
}
ssz = read(fd, p, size);
if (ssz != size) {
ret = errno;
assert(ret != 0);
close(fd);
return ret;
}
close(fd);
*pp = p;
*sizep = size;
return 0;
}
void
unmap_file(void *p, size_t sz)
{
free(p);
}

View File

@ -1,14 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/*
* modified copy-and-paste from:
* https://github.com/yamt/toywasm/blob/0eaad8cacd0cc7692946ff19b25994f106113be8/lib/fileio.h
*/
int
map_file(const char *filename, void **pp, size_t *szp);
void
unmap_file(void *p, size_t sz);

View File

@ -1,497 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <sys/stat.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <wamr/wasi_ephemeral_nn.h>
#include "fileio.h"
#include "map.h"
static struct map graphs;
static struct map contexts;
static void
load_graph(char *options)
{
int target = wasi_ephemeral_nn_target_cpu;
int encoding = wasi_ephemeral_nn_encoding_openvino;
const char *id = "default";
wasi_ephemeral_nn_graph_builder *builders = NULL;
size_t nbuilders = 0;
enum {
opt_id,
opt_file,
opt_encoding,
opt_target,
};
static char *const keylistp[] = {
[opt_id] = "id",
[opt_file] = "file",
[opt_encoding] = "encoding",
[opt_target] = "target",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
id = value;
break;
case opt_file:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
builders =
realloc(builders, (nbuilders + 1) * sizeof(*builders));
if (builders == NULL) {
exit(1);
}
wasi_ephemeral_nn_graph_builder *b = &builders[nbuilders++];
int ret = map_file(value, (void *)&b->buf, (void *)&b->size);
if (ret != 0) {
fprintf(stderr, "map_file \"%s\" failed: %s\n", value,
strerror(ret));
exit(1);
}
break;
case opt_encoding:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
encoding = atoi(value);
break;
case opt_target:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
target = atoi(value);
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
wasi_ephemeral_nn_error nnret;
wasi_ephemeral_nn_graph g;
nnret = wasi_ephemeral_nn_load(builders, nbuilders, encoding, target, &g);
size_t i;
for (i = 0; i < nbuilders; i++) {
wasi_ephemeral_nn_graph_builder *b = &builders[i];
unmap_file(b->buf, b->size);
}
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "load failed with %d\n", (int)nnret);
exit(1);
}
map_set(&graphs, id, g);
}
static void
init_execution_context(char *options)
{
const char *id = "default";
const char *graph_id = "default";
enum {
opt_id,
opt_graph_id,
};
static char *const keylistp[] = {
[opt_id] = "id",
[opt_graph_id] = "graph-id",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
id = value;
break;
case opt_graph_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
graph_id = value;
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
wasi_ephemeral_nn_graph g = map_get(&graphs, graph_id);
wasi_ephemeral_nn_graph_execution_context c;
wasi_ephemeral_nn_error nnret;
nnret = wasi_ephemeral_nn_init_execution_context(g, &c);
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "init_execution_context failed with %d\n", (int)nnret);
exit(1);
}
map_set(&contexts, id, c);
}
static void
set_input(char *options)
{
int ret;
const char *context_id = "default";
uint32_t idx = 0;
wasi_ephemeral_nn_tensor tensor = {
.dimensions = { .buf = NULL, .size = 0, },
.type = wasi_ephemeral_nn_type_fp32,
.data = NULL,
};
void *buf = NULL;
size_t sz = 0;
enum {
opt_context_id,
opt_dim,
opt_type,
opt_idx,
opt_file,
};
static char *const keylistp[] = {
[opt_context_id] = "context-id",
[opt_dim] = "dim",
[opt_type] = "type",
[opt_idx] = "idx",
[opt_file] = "file",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_context_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
context_id = value;
break;
case opt_dim:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
wasi_ephemeral_nn_tensor_dimensions *dims = &tensor.dimensions;
dims->buf =
realloc(dims->buf, (dims->size + 1) * sizeof(*dims->buf));
if (dims->buf == NULL) {
exit(1);
}
dims->buf[dims->size++] = atoi(value);
break;
case opt_type:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
tensor.type = atoi(value);
break;
case opt_file:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
if (buf != NULL) {
fprintf(stderr, "duplicated tensor data\n");
exit(2);
}
ret = map_file(value, &buf, &sz);
if (ret != 0) {
fprintf(stderr, "map_file \"%s\" failed: %s\n", value,
strerror(ret));
exit(1);
}
break;
case opt_idx:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
idx = atoi(value);
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
if (tensor.dimensions.size == 0) {
fprintf(stderr, "no dimension is given\n");
exit(2);
}
if (buf == NULL) {
fprintf(stderr, "no tensor is given\n");
exit(2);
}
/*
* REVISIT: we can check the tensor size against type/dimensions
* and warn the user if unexpected.
*/
wasi_ephemeral_nn_error nnret;
wasi_ephemeral_nn_graph_execution_context c =
map_get(&contexts, context_id);
tensor.data.buf = buf;
tensor.data.size = sz;
nnret = wasi_ephemeral_nn_set_input(c, idx, &tensor);
unmap_file(buf, sz);
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "set_input failed with %d\n", (int)nnret);
exit(1);
}
}
static void
compute(char *options)
{
const char *context_id = "default";
enum {
opt_context_id,
};
static char *const keylistp[] = {
[opt_context_id] = "context-id",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_context_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
context_id = value;
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
wasi_ephemeral_nn_graph_execution_context c =
map_get(&contexts, context_id);
wasi_ephemeral_nn_error nnret;
nnret = wasi_ephemeral_nn_compute(c);
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "compute failed with %d\n", (int)nnret);
exit(1);
}
}
static void
get_output(char *options)
{
int ret;
const char *outfile = NULL;
const char *context_id = "default";
uint32_t idx = 0;
enum {
opt_context_id,
opt_idx,
opt_file,
};
static char *const keylistp[] = {
[opt_context_id] = "context-id",
[opt_idx] = "idx",
[opt_file] = "file",
NULL,
};
while (*options) {
extern char *suboptarg;
char *value;
const char *saved = options;
switch (getsubopt(&options, keylistp, &value)) {
case opt_context_id:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
context_id = value;
break;
case opt_file:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
outfile = value;
break;
case opt_idx:
if (value == NULL) {
fprintf(stderr, "no value for %s\n", saved);
exit(2);
}
idx = atoi(value);
break;
case -1:
fprintf(stderr, "unknown subopt %s\n", saved);
exit(2);
}
}
int outfd = -1;
if (outfile != NULL) {
outfd = open(outfile, O_CREAT | O_TRUNC | O_WRONLY);
if (outfd == -1) {
fprintf(stderr, "failed to open output file \"%s\": %s\n", outfile,
strerror(errno));
exit(1);
}
}
wasi_ephemeral_nn_error nnret;
wasi_ephemeral_nn_graph_execution_context c =
map_get(&contexts, context_id);
void *resultbuf = NULL;
size_t resultbufsz = 256;
uint32_t resultsz;
retry:
resultbuf = realloc(resultbuf, resultbufsz);
if (resultbuf == NULL) {
exit(1);
}
nnret =
wasi_ephemeral_nn_get_output(c, 0, resultbuf, resultbufsz, &resultsz);
if (nnret == wasi_ephemeral_nn_error_too_large) {
resultbufsz *= 2;
goto retry;
}
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "get_output failed with %d\n", (int)nnret);
exit(1);
}
if (outfd != -1) {
ssize_t written = write(outfd, resultbuf, resultsz);
if (written == -1) {
fprintf(stderr, "failed to write: %s\n", strerror(errno));
exit(1);
}
if (written == -1) {
fprintf(stderr, "unexpetecd write length %zu (expected %zu)\n",
written, (size_t)resultsz);
exit(1);
}
ret = close(outfd);
if (ret != 0) {
fprintf(stderr, "failed to close: %s\n", strerror(errno));
exit(1);
}
}
else {
fprintf(stderr, "WARNING: discarding %zu bytes output\n",
(size_t)resultsz);
}
}
enum longopt {
opt_load_graph = 0x100,
opt_init_execution_context,
opt_set_input,
opt_compute,
opt_get_output,
};
static const struct option longopts[] = {
{
"load-graph",
required_argument,
NULL,
opt_load_graph,
},
{
"init-execution-context",
optional_argument,
NULL,
opt_init_execution_context,
},
{
"set-input",
required_argument,
NULL,
opt_set_input,
},
{
"compute",
optional_argument,
NULL,
opt_compute,
},
{
"get-output",
optional_argument,
NULL,
opt_get_output,
},
{
NULL,
0,
NULL,
0,
},
};
int
main(int argc, char **argv)
{
extern char *optarg;
int ch;
int longidx;
while ((ch = getopt_long(argc, argv, "", longopts, &longidx)) != -1) {
switch (ch) {
case opt_load_graph:
load_graph(optarg);
break;
case opt_init_execution_context:
init_execution_context(optarg ? optarg : "");
break;
case opt_set_input:
set_input(optarg);
break;
case opt_compute:
compute(optarg ? optarg : "");
break;
case opt_get_output:
get_output(optarg ? optarg : "");
break;
default:
exit(2);
}
}
exit(0);
}

View File

@ -1,58 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
#include "map.h"
static uintmax_t *
map_find_slot(struct map *m, const char *name)
{
size_t i;
for (i = 0; i < m->nentries; i++) {
if (!strcmp(m->entries[i].k, name)) {
return &m->entries[i].v;
}
}
return NULL;
}
static void
map_append(struct map *m, const char *k, uintmax_t v)
{
m->entries = realloc(m->entries, (m->nentries + 1) * sizeof(*m->entries));
if (m->entries == NULL) {
exit(1);
}
struct map_entry *e = &m->entries[m->nentries++];
e->k = k;
e->v = v;
}
void
map_set(struct map *m, const char *k, uintmax_t v)
{
uintmax_t *p = map_find_slot(m, k);
if (p != NULL) {
fprintf(stderr, "duplicated id \"%s\"\n", k);
exit(1);
}
map_append(m, k, v);
}
uintmax_t
map_get(struct map *m, const char *k)
{
uintmax_t *p = map_find_slot(m, k);
if (p == NULL) {
fprintf(stderr, "id \"%s\" not found\n", k);
exit(1);
}
return *p;
}

View File

@ -1,19 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdint.h>
struct map {
struct map_entry {
const char *k;
uintmax_t v;
} * entries;
size_t nentries;
};
void
map_set(struct map *m, const char *k, uintmax_t v);
uintmax_t
map_get(struct map *m, const char *k);

View File

@ -93,7 +93,7 @@ print_result(const float *result, size_t sz)
int int
main(int argc, char **argv) main(int argc, char **argv)
{ {
wasi_ephemeral_nn_error nnret; wasi_nn_error nnret;
int ret; int ret;
void *xml; void *xml;
size_t xmlsz; size_t xmlsz;
@ -112,27 +112,25 @@ main(int argc, char **argv)
exit(1); exit(1);
} }
/* note: openvino takes two buffers, namely IR and weights */ /* note: openvino takes two buffers, namely IR and weights */
wasi_ephemeral_nn_graph_builder builders[2] = { { graph_builder builders[2] = { {
.buf = xml, .buf = xml,
.size = xmlsz, .size = xmlsz,
}, },
{ {
.buf = weights, .buf = weights,
.size = weightssz, .size = weightssz,
} }; } };
wasi_ephemeral_nn_graph g; graph g;
nnret = nnret = load(builders, 2, openvino, cpu, &g);
wasi_ephemeral_nn_load(builders, 2, wasi_ephemeral_nn_encoding_openvino,
wasi_ephemeral_nn_target_cpu, &g);
unmap_file(xml, xmlsz); unmap_file(xml, xmlsz);
unmap_file(weights, weightssz); unmap_file(weights, weightssz);
if (nnret != wasi_ephemeral_nn_error_success) { if (nnret != success) {
fprintf(stderr, "load failed with %d\n", (int)nnret); fprintf(stderr, "load failed with %d\n", (int)nnret);
exit(1); exit(1);
} }
wasi_ephemeral_nn_graph_execution_context ctx; graph_execution_context ctx;
nnret = wasi_ephemeral_nn_init_execution_context(g, &ctx); nnret = init_execution_context(g, &ctx);
if (nnret != wasi_ephemeral_nn_error_success) { if (nnret != success) {
fprintf(stderr, "init_execution_context failed with %d\n", (int)nnret); fprintf(stderr, "init_execution_context failed with %d\n", (int)nnret);
exit(1); exit(1);
} }
@ -144,28 +142,26 @@ main(int argc, char **argv)
strerror(ret)); strerror(ret));
exit(1); exit(1);
} }
wasi_ephemeral_nn_tensor tensor = { tensor tensor = {
.dimensions = { .buf = (uint32_t[]){1, 3, 224, 224,}, .size = 4, }, .dimensions = { .buf = (uint32_t[]){1, 3, 224, 224,}, .size = 4, },
.type = wasi_ephemeral_nn_type_fp32, .type = fp32,
.data.buf = tensordata, .data = tensordata,
.data.size = tensordatasz,
}; };
nnret = wasi_ephemeral_nn_set_input(ctx, 0, &tensor); nnret = set_input(ctx, 0, &tensor);
unmap_file(tensordata, tensordatasz); unmap_file(tensordata, tensordatasz);
if (nnret != wasi_ephemeral_nn_error_success) { if (nnret != success) {
fprintf(stderr, "set_input failed with %d\n", (int)nnret); fprintf(stderr, "set_input failed with %d\n", (int)nnret);
exit(1); exit(1);
} }
nnret = wasi_ephemeral_nn_compute(ctx); nnret = compute(ctx);
if (nnret != wasi_ephemeral_nn_error_success) { if (nnret != success) {
fprintf(stderr, "compute failed with %d\n", (int)nnret); fprintf(stderr, "compute failed with %d\n", (int)nnret);
exit(1); exit(1);
} }
float result[1001]; float result[1001];
uint32_t resultsz; uint32_t resultsz;
nnret = wasi_ephemeral_nn_get_output(ctx, 0, (void *)result, sizeof(result), nnret = get_output(ctx, 0, (void *)result, sizeof(result), &resultsz);
&resultsz); if (nnret != success) {
if (nnret != wasi_ephemeral_nn_error_success) {
fprintf(stderr, "get_output failed with %d\n", (int)nnret); fprintf(stderr, "get_output failed with %d\n", (int)nnret);
exit(1); exit(1);
} }

View File

@ -1,11 +0,0 @@
cmake_minimum_required(VERSION 3.14)
set(CMAKE_C_STANDARD 99)
set(CMAKE_C_STANDARD_REQUIRED YES)
set(CMAKE_C_EXTENSIONS NO)
project(socket-nslookup LANGUAGES C)
add_executable(socket-nslookup ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/iwasm/libraries/lib-socket/test/nslookup.c)
find_package(wamr-wasi-socket REQUIRED)
target_link_libraries(socket-nslookup wamr-wasi-socket)
target_link_options(socket-nslookup PRIVATE -Wl,--max-memory=262144)

View File

@ -1,10 +0,0 @@
cmake_minimum_required(VERSION 3.14)
set(CMAKE_C_STANDARD 99)
set(CMAKE_C_STANDARD_REQUIRED YES)
project(socket-tcp-udp LANGUAGES C)
add_executable(socket-tcp-udp ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/iwasm/libraries/lib-socket/test/tcp_udp.c)
find_package(wamr-wasi-socket REQUIRED)
target_link_libraries(socket-tcp-udp wamr-wasi-socket)
target_link_options(socket-tcp-udp PRIVATE -Wl,--max-memory=262144)

View File

@ -1,26 +0,0 @@
set(wasi_socket_dir ${CMAKE_CURRENT_SOURCE_DIR}/../../core/iwasm/libraries/lib-socket)
set(wasi_socket_header_dir ${wasi_socket_dir}/inc)
set(srcs ${wasi_socket_dir}/src/wasi/wasi_socket_ext.c)
set(headers
${wasi_socket_header_dir}/wasi_socket_ext.h
)
add_library(wamr-wasi-socket STATIC ${srcs})
set_property(TARGET wamr-wasi-socket PROPERTY PUBLIC_HEADER ${headers})
target_include_directories(wamr-wasi-socket
PUBLIC
$<BUILD_INTERFACE:${wasi_socket_header_dir}>
$<INSTALL_INTERFACE:include>)
# as this is a library, be extra conservative about wasm features
# to improve compatibilities. as this particular library is just a
# simple static stub, extra wasm features won't benefit us much anyway.
# note that LLVM-19 enables reference-types by default.
target_compile_options(wamr-wasi-socket PRIVATE -mno-reference-types)
install(TARGETS wamr-wasi-socket
EXPORT wamr-wasi-socket-config
PUBLIC_HEADER DESTINATION include)
install(EXPORT wamr-wasi-socket-config
DESTINATION lib/cmake/wamr-wasi-socket)

View File

@ -3,8 +3,6 @@
# Copyright (C) 2025 Midokura Japan KK. All rights reserved. # Copyright (C) 2025 Midokura Japan KK. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
PREFIX=/tmp/wamr PREFIX=/tmp/wamr
WASI_SDK=${WASI_SDK:-/opt/wasi-sdk} WASI_SDK=${WASI_SDK:-/opt/wasi-sdk}
@ -19,21 +17,3 @@ cmake -B build-app-nn \
-DCMAKE_PREFIX_PATH=${PREFIX} \ -DCMAKE_PREFIX_PATH=${PREFIX} \
samples/nn samples/nn
cmake --build build-app-nn cmake --build build-app-nn
cmake -B build-app-nn-cli \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/nn-cli
cmake --build build-app-nn-cli
cmake -B build-app-socket-nslookup \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/socket-nslookup
cmake --build build-app-socket-nslookup
cmake -B build-app-socket-tcp-udp \
-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \
-DCMAKE_PREFIX_PATH=${PREFIX} \
samples/socket-tcp-udp
cmake --build build-app-socket-tcp-udp