Compare commits

..

2 Commits

Author SHA1 Message Date
liang.he@intel.com
a09b6ad6b9 for test 2025-05-19 01:53:50 +00:00
liang.he@intel.com
8413c13115 Use a customized codeql configration
- Specifying directories to scan
- Refactor build script for WAMR project
  - add functions for wamrc and iwasm builds
  - streamline options handling
  - include LLVM installation steps.
- Filter out source code related to dependencies, testing,
  and wasm applications
- Exclude unimportant issues and coding style problems
2025-05-19 01:47:47 +00:00
127 changed files with 1228 additions and 3141 deletions

View File

@ -30,23 +30,14 @@ runs:
if: ${{ startsWith(inputs.os, 'ubuntu') }}
shell: bash
run: |
echo "Downloading wasi-sdk for Ubuntu..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-linux.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-x86_64-linux/ wasi-sdk
echo "Downloading wabt for Ubuntu..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.37 wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on ubuntu"
working-directory: /opt
@ -54,23 +45,14 @@ runs:
if: ${{ inputs.os == 'macos-13' }}
shell: bash
run: |
echo "Downloading wasi-sdk for macOS-13..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-macos.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-x86_64-macos wasi-sdk
echo "Downloading wabt for macOS-13..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.36 wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.36 installed on macos-13"
working-directory: /opt
@ -78,48 +60,21 @@ runs:
if: ${{ inputs.os == 'macos-14' }}
shell: bash
run: |
echo "Downloading wasi-sdk for macOS-14..."
sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-arm64-macos.tar.gz
echo "Extracting wasi-sdk..."
sudo tar -xf wasi-sdk.tar.gz
sudo ln -sf wasi-sdk-25.0-arm64-macos wasi-sdk
echo "Downloading wabt for macOS-14..."
sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-macos-14.tar.gz
echo "Extracting wabt..."
sudo tar -xf wabt.tar.gz
sudo ln -sf wabt-1.0.37 wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on macos-14"
working-directory: /opt
#TODO: Add support for Windows
- name: Set up wasi-sdk and wabt on Windows
if: ${{ startsWith(inputs.os, 'windows') }}
shell: bash
shell: powershell
run: |
choco install -y wget
mkdir -p /opt/wasi-sdk
mkdir -p /opt/wabt
echo "Downloading wasi-sdk for Windows..."
wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-windows.tar.gz
echo "Extracting wasi-sdk..."
tar --strip-components=1 -xf wasi-sdk.tar.gz -C /opt/wasi-sdk
echo "Downloading wabt for Windows..."
wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-windows.tar.gz
echo "Extracting wabt..."
tar --strip-components=1 -xf wabt.tar.gz -C /opt/wabt
/opt/wasi-sdk/bin/clang --version
/opt/wabt/bin/wasm-interp --version
echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on Windows"
echo "::notice::Support for Windows is not implemented yet"
exit 1

View File

@ -11,18 +11,13 @@
sudo apt -qq update
sudo apt install -y -qq build-essential cmake g++-multilib libgcc-12-dev lib32gcc-12-dev ccache ninja-build
LLVM_VER=18.1.8
LLVM_VER=15.0.6
pushd /opt
sudo wget --progress=dot:giga -O clang+llvm-x86_64-linux-gnu.tar.xz https://github.com/llvm/llvm-project/releases/download/llvmorg-${LLVM_VER}/clang+llvm-${LLVM_VER}-x86_64-linux-gnu-ubuntu-18.04.tar.xz \
&& tar -xf clang+llvm-x86_64-linux-gnu.tar.xz \
&& mv clang+llvm-${LLVM_VER}-x86_64-linux-gnu-ubuntu-18.04 llvm-${LLVM_VER}
popd
# libtinfo.so.5 for /opt/llvm-18.1.8/lib/libomptarget.rtl.amdgpu.so.18.1
sudo apt -qq update
wget http://security.ubuntu.com/ubuntu/pool/universe/n/ncurses/libtinfo5_6.3-2ubuntu0.1_amd64.deb
sudo apt install -y -qq ./libtinfo5_6.3-2ubuntu0.1_amd64.deb
# Start the build process
WAMR_DIR=${PWD}
LLVM_DIR=/opt/llvm-${LLVM_VER}/lib/cmake/llvm

View File

@ -23,7 +23,7 @@ on:
type: string
required: true
upload_url:
description: upload binary assets to the URL of release
description: a semantic version number. it is required when `release` is true.
type: string
required: false
ver_num:
@ -137,8 +137,7 @@ jobs:
- name: compress the binary on non-Windows
if: inputs.runner != 'windows-latest'
run: |
# Follow the symlink to the actual binary file
tar --dereference -czf iwasm${{ matrix.suffix }}-${{ inputs.ver_num }}-${{ inputs.runner }}.tar.gz iwasm
tar czf iwasm${{ matrix.suffix }}-${{ inputs.ver_num }}-${{ inputs.runner }}.tar.gz iwasm
zip iwasm${{ matrix.suffix }}-${{ inputs.ver_num }}-${{ inputs.runner }}.zip iwasm
working-directory: ${{ inputs.cwd }}/build

View File

@ -1,57 +0,0 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
name: build wamr_wasi_extensions release
on:
workflow_call:
inputs:
upload_url:
description: upload binary assets to the URL of release
type: string
required: false
ver_num:
description: a semantic version number. it is required when `release` is true.
type: string
required: false
permissions:
contents: read
jobs:
build_wamr_wasi_extensions:
runs-on: ${{ matrix.os }}
permissions:
contents: write # for uploading release artifacts
strategy:
matrix:
os: [ubuntu-22.04]
steps:
- name: checkout
uses: actions/checkout@v4
- name: install-wasi-sdk-wabt
uses: ./.github/actions/install-wasi-sdk-wabt
with:
os: ${{ matrix.os }}
- name: Build wamr-wasi-extensions
run: |
mkdir dist
./build_libs.sh $(pwd)/dist/wamr-wasi-extensions
working-directory: wamr-wasi-extensions
- name: Compress the binary
run: |
zip -r wamr-wasi-extensions-${{ inputs.ver_num }}.zip wamr-wasi-extensions
working-directory: wamr-wasi-extensions/dist
- name: Upload release zip
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ inputs.upload_url }}
asset_path: wamr-wasi-extensions/dist/wamr-wasi-extensions-${{ inputs.ver_num }}.zip
asset_name: wamr-wasi-extensions-${{ inputs.ver_num }}.zip
asset_content_type: application/zip

View File

@ -23,7 +23,7 @@ on:
type: string
required: true
upload_url:
description: upload binary assets to the URL of release
description: a semantic version number. it is required when `release` is true.
type: string
required: false
ver_num:
@ -73,8 +73,7 @@ jobs:
- name: compress the binary on non-Windows
if: inputs.runner != 'windows-latest' && inputs.release
run: |
# Follow the symlink to the actual binary file
tar --dereference -czf wamrc-${{ inputs.ver_num }}-${{ inputs.runner }}.tar.gz wamrc
tar czf wamrc-${{ inputs.ver_num }}-${{ inputs.runner }}.tar.gz wamrc
zip wamrc-${{ inputs.ver_num }}-${{ inputs.runner }}.zip wamrc
working-directory: wamr-compiler/build

View File

@ -19,14 +19,13 @@ jobs:
analyze:
# only run this job if the repository is not a fork
# if want to run this job on a fork, please remove the if condition
if: github.repository == 'bytecodealliance/wasm-micro-runtime'
# if: github.repository == 'bytecodealliance/wasm-micro-runtime'
name: Analyze
# Runner size impacts CodeQL analysis time. To learn more, please see:
# - https://gh.io/recommended-hardware-resources-for-running-codeql
# - https://gh.io/supported-runners-and-hardware-resources
# - https://gh.io/using-larger-runners
# Consider using larger runners for possible analysis time improvements.
# But it is not free, so please be aware of the cost.
runs-on: ubuntu-22.04
timeout-minutes: 360
@ -49,7 +48,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3.29.1
uses: github/codeql-action/init@v3.28.17
with:
languages: ${{ matrix.language }}
# For more details on CodeQL's query packs, refer to: https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning#using-queries-in-ql-packs
@ -61,36 +60,29 @@ jobs:
./.github/scripts/codeql_buildscript.sh
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3.29.1
uses: github/codeql-action/analyze@v3.28.17
with:
category: "/language:${{matrix.language}}"
upload: false
id: step1
# - cpp/alloca-in-loop is about touch_pages() which is intended to
# - cpp/command-line-injection is about bh_system() which is used to
# - cpp/path-injection is used in bh_read_file_to_buffer() to load a .wasm.
# or operate a stack usage file which is not sensitive or generate a .aot
# - cpp/suspicious-pointer-scaling
# - wasm_runtime_invoke_native() used to trivial registers
# - cpp/uncontrolled-process-operation is about dlopen() which is used by
# native libraries registrations.
# - cpp/world-writable-file-creation is about fopen() a temporary file
# for perf-PID.map or .aot(wamrc). The permission isn't sensitive.
# file.
# TODO: need to reconsider whether to filter out
# those rules after cpp/use-of-goto
#
# cpp/uncontrolled-process-operation is about dlopen() which is used by
# native libraries registrations.
#
# cpp/alloca-in-loop is about touch_pages() which is intended to
#
# execute customized compiler
- name: Filter out unwanted errors and warnings
uses: advanced-security/filter-sarif@v1
with:
patterns: |
## Exclude files and directories
-**/build/**
-**/core/deps/**
-**/cmake*/Modules/**
-**/test*/**
-**/wasm-app*/**
## Exclude rules 1. Related to formatting, style
-**:cpp/commented-out-code
-**:cpp/complex-condition
-**:cpp/empty-if
@ -103,18 +95,24 @@ jobs:
-**:cpp/unused-static-function
-**:cpp/unused-static-variable
-**:cpp/use-of-goto
## Exclude rules 2. Related to special usage of APIs
-**:cpp/alloca-in-loop
-**:cpp/command-line-injection
-product-mini/platforms/posix/main.c:cpp/uncontrolled-process-operation
-wamr-compiler/main.c:cpp/uncontrolled-process-operation
-core/shared/platform/common/posix/posix_thread.c:cpp/alloca-in-loop
-**:cpp/path-injection
-core/iwasm/common/wasm_runtime_common.c:cpp/suspicious-pointer-scaling
-**:cpp/uncontrolled-process-operation
-**:cpp/world-writable-file-creation
-**:cpp/potentially-dangerous-function
-**:cpp/integer-multiplication-cast-to-long
-**:cpp/comparison-with-wider-type
-**:cpp/leap-year/*
-**:cpp/ambiguously-signed-bit-field
-**:cpp/suspicious-pointer-scaling
-**:cpp/suspicious-pointer-scaling-void
-**:cpp/unsigne-comparison-zero
input: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif
output: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif
- name: Upload CodeQL results to code scanning
uses: github/codeql-action/upload-sarif@v3.29.1
uses: github/codeql-action/upload-sarif@v3.28.17
with:
sarif_file: ${{ steps.step1.outputs.sarif-output }}
category: "/language:${{matrix.language}}"

View File

@ -315,10 +315,6 @@ jobs:
fail-fast: false
matrix:
os: [ubuntu-22.04]
build_target: [
"X86_64",
"X86_32",
]
include:
- os: ubuntu-22.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
@ -355,16 +351,10 @@ jobs:
cmake --build . --config Release --parallel 4
working-directory: wamr-compiler
- name: Install dependencies for X86_32
if: matrix.build_target == 'X86_32'
run: |
sudo apt-get update
sudo apt-get install -y g++-multilib
- name: Build and run unit tests
run: |
mkdir build && cd build
cmake .. -DWAMR_BUILD_TARGET=${{ matrix.build_target }}
cmake ..
cmake --build . --config Release --parallel 4
ctest
working-directory: tests/unit
@ -437,11 +427,6 @@ jobs:
ctest --test-dir build --output-on-failure
working-directory: samples/wasm-c-api
- name: Build Sample [printversion]
run: |
./test.sh
working-directory: samples/printversion
build_samples_others:
needs:
[
@ -618,6 +603,49 @@ jobs:
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
running_mode: aot
test_option: $WAMR_COMPILER_TEST_OPTIONS
exclude:
# incompatible modes and features
# classic-interp doesn't support simd
- running_mode: "classic-interp"
test_option: $SIMD_TEST_OPTIONS
# llvm jit doesn't support multi module
- running_mode: "jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
# fast-jit doesn't support multi module, simd
- running_mode: "fast-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $SIMD_TEST_OPTIONS
# multi-tier-jit doesn't support multi module, simd
- running_mode: "multi-tier-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $SIMD_TEST_OPTIONS
# fast-jit and multi-tier-jit don't support GC
- running_mode: "fast-jit"
test_option: $GC_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $GC_TEST_OPTIONS
# fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Memory64
- running_mode: "fast-interp"
test_option: $MEMORY64_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $MEMORY64_TEST_OPTIONS
- running_mode: "jit"
test_option: $MEMORY64_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $MEMORY64_TEST_OPTIONS
# aot, fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Multi Memory
- running_mode: "aot"
test_option: $MULTI_MEMORY_TEST_OPTIONS
- running_mode: "fast-interp"
test_option: $MULTI_MEMORY_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $MULTI_MEMORY_TEST_OPTIONS
- running_mode: "jit"
test_option: $MULTI_MEMORY_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $MULTI_MEMORY_TEST_OPTIONS
steps:
- name: checkout
@ -639,6 +667,15 @@ jobs:
with:
os: ${{ matrix.os }}
# It is a temporary solution until new wasi-sdk that includes bug fixes is released
- name: build wasi-libc from source
if: matrix.test_option == '$WASI_TEST_OPTIONS'
run: |
git clone https://github.com/WebAssembly/wasi-libc
cd wasi-libc
make -j AR=/opt/wasi-sdk/bin/llvm-ar NM=/opt/wasi-sdk/bin/llvm-nm CC=/opt/wasi-sdk/bin/clang THREAD_MODEL=posix
echo "SYSROOT_PATH=$PWD/sysroot" >> $GITHUB_ENV
- name: set env variable(if llvm are used)
if: matrix.running_mode == 'aot' || matrix.running_mode == 'jit' || matrix.running_mode == 'multi-tier-jit'
run: echo "USE_LLVM=true" >> $GITHUB_ENV
@ -675,7 +712,7 @@ jobs:
- name: Build WASI thread tests
if: matrix.test_option == '$WASI_TEST_OPTIONS'
run: bash build.sh
run: bash build.sh --sysroot "$SYSROOT_PATH"
working-directory: ./core/iwasm/libraries/lib-wasi-threads/test/
- name: build socket api tests
@ -721,3 +758,123 @@ jobs:
eval $(opam env)
./test_wamr.sh ${{ env.X86_32_TARGET_TEST_OPTIONS }} ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
working-directory: ./tests/wamr-test-suites
test-wamr-ide:
needs:
[
build_iwasm
]
runs-on: ubuntu-22.04
env:
PYTHON_VERSION: '3.10'
PYTHON_UBUNTU_STANDALONE_BUILD: https://github.com/indygreg/python-build-standalone/releases/download/20230507/cpython-3.10.11+20230507-x86_64-unknown-linux-gnu-install_only.tar.gz
steps:
- name: checkout
uses: actions/checkout@v4
- name: install dependencies
run: |
rustup target add wasm32-wasip1
sudo apt update && sudo apt-get install -y lld ninja-build
npm install
working-directory: test-tools/wamr-ide/VSCode-Extension
- name: code style check
run: |
npm install --save-dev prettier
npm run prettier-format-check
working-directory: test-tools/wamr-ide/VSCode-Extension
- name: build iwasm with source debugging feature
run: |
mkdir build
cd build
cmake .. -DWAMR_BUILD_DEBUG_INTERP=1 -DWAMR_BUILD_REF_TYPES=1
make
working-directory: product-mini/platforms/linux
- name: Cache LLDB
id: cache-lldb
uses: actions/cache@v4
env:
cache-name: cache-lldb-vscode
with:
path: test-tools/wamr-ide/VSCode-Extension/resource/debug/linux
key: ${{ env.cache-name }}-${{ hashFiles('build-scripts/lldb_wasm.patch') }}-${{ env.PYTHON_UBUNTU_STANDALONE_BUILD }}
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: get stand-alone python ubuntu
run: |
wget ${{ env.PYTHON_UBUNTU_STANDALONE_BUILD }} -O python.tar.gz
tar -xvf python.tar.gz
working-directory: core/deps
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: download llvm
run: |
wget https://github.com/llvm/llvm-project/archive/1f27fe6128769f00197925c3b8f6abb9d0e5cd2e.zip
unzip -q 1f27fe6128769f00197925c3b8f6abb9d0e5cd2e.zip
mv llvm-project-1f27fe6128769f00197925c3b8f6abb9d0e5cd2e llvm-project
working-directory: core/deps
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: apply wamr patch
run: |
git init
git config user.email "action@github.com"
git config user.name "github action"
git apply ../../../build-scripts/lldb_wasm.patch
working-directory: core/deps/llvm-project
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: build lldb ubuntu
run: |
echo "start to build lldb..."
mkdir -p wamr-lldb
cmake -S ./llvm -B build \
-G Ninja \
-DCMAKE_INSTALL_PREFIX=../wamr-lldb \
-DCMAKE_BUILD_TYPE:STRING="Release" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DLLVM_ENABLE_PROJECTS="clang;lldb" \
-DLLVM_TARGETS_TO_BUILD:STRING="X86;WebAssembly" \
-DLLVM_BUILD_BENCHMARKS:BOOL=OFF \
-DLLVM_BUILD_DOCS:BOOL=OFF \
-DLLVM_BUILD_EXAMPLES:BOOL=OFF \
-DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF \
-DLLVM_BUILD_TESTS:BOOL=OFF \
-DLLVM_INCLUDE_BENCHMARKS:BOOL=OFF \
-DLLVM_INCLUDE_DOCS:BOOL=OFF \
-DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \
-DLLVM_INCLUDE_TESTS:BOOL=OFF \
-DLLVM_ENABLE_BINDINGS:BOOL=OFF \
-DLLVM_ENABLE_LIBXML2:BOOL=ON \
-DLLVM_ENABLE_LLD:BOOL=ON \
-DLLDB_ENABLE_PYTHON:BOOL=ON \
-DLLDB_EMBED_PYTHON_HOME=ON \
-DLLDB_PYTHON_HOME=.. \
-DLLDB_PYTHON_RELATIVE_PATH=lib/lldb-python \
-DPython3_EXECUTABLE="$(pwd)/../python/bin/python${{ env.PYTHON_VERSION }}"
cmake --build build --target lldb install --parallel $(nproc)
working-directory: core/deps/llvm-project
- if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }}
name: copy lldb to extension folder
run: |
mkdir -p bin
mkdir -p lib
cp ../../../../../../core/deps/llvm-project/lldb/tools/lldb-vscode/package.json ./
cp -r ../../../../../../core/deps/llvm-project/lldb/tools/lldb-vscode/syntaxes/ ./
cp ../../../../../../core/deps/llvm-project/build/bin/lldb* bin
cp ../../../../../../core/deps/llvm-project/build/lib/liblldb*.so lib
cp ../../../../../../core/deps/llvm-project/build/lib/liblldb*.so.* lib
cp -R ../../../../../../core/deps/llvm-project/build/lib/lldb-python lib
cp -R ../../../../../../core/deps/python/lib/python* lib
cp ../../../../../../core/deps/python/lib/libpython${{ env.PYTHON_VERSION }}.so.1.0 lib
working-directory: test-tools/wamr-ide/VSCode-Extension/resource/debug/linux
- name: run tests
timeout-minutes: 5
run: xvfb-run npm run test
working-directory: test-tools/wamr-ide/VSCode-Extension

View File

@ -282,11 +282,6 @@ jobs:
ctest --test-dir build --output-on-failure
working-directory: samples/wasm-c-api
- name: Build Sample [printversion]
run: |
./test.sh
working-directory: samples/printversion
build_samples_others:
needs: [build_iwasm, build_wamrc, build_llvm_libraries_on_intel_macos, build_llvm_libraries_on_arm_macos]
runs-on: ${{ matrix.os }}

View File

@ -290,6 +290,28 @@ jobs:
- name: run spec tests
run: |
set +e
source /opt/intel/sgxsdk/environment
attempts=0
max_attempts=3
while [ $attempts -lt $max_attempts ]; do
./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
exitcode="$?"
if [ $exitcode -eq 0 ]; then
echo "Spec test passed"
exit 0
elif [ $exitcode -ne 143 ]; then
echo "Spec test failed with error code $exitcode"
exit 1
fi
echo "$exitcode is a known GitHub-hosted runner issue"
echo "::notice::Re-running the spec test due to error code 143"
attempts=$((attempts + 1))
done
echo "::notice::Report an error with code 143 in SGX CI after $max_attempts attempts"
exit 143
working-directory: ./tests/wamr-test-suites

View File

@ -57,21 +57,11 @@ permissions:
contents: read
jobs:
build_llvm_libraries_on_windows:
permissions:
contents: read
actions: write
uses: ./.github/workflows/build_llvm_libraries.yml
with:
os: "windows-latest"
arch: "AArch64 ARM Mips RISCV X86"
build_iwasm:
build:
runs-on: windows-latest
strategy:
matrix:
build_options:
[
build_options: [
"-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_INTERP=0",
"-DWAMR_BUILD_AOT=0",
"-DWAMR_BUILD_TAIL_CALL=1",
@ -82,7 +72,7 @@ jobs:
"-DWAMR_BUILD_DEBUG_INTERP=1",
"-DWAMR_BUILD_LIB_PTHREAD=1",
"-DWAMR_BUILD_LIB_WASI_THREADS=1",
"-DWAMR_BUILD_LIBC_UVWASI=0 -DWAMR_BUILD_LIBC_WASI=1",
"-DWAMR_BUILD_LIBC_UVWASI=0 -DWAMR_BUILD_LIBC_WASI=1"
]
steps:
- uses: actions/checkout@v4
@ -99,49 +89,17 @@ jobs:
cmake .. ${{ matrix.build_options }}
cmake --build . --config Release --parallel 4
build_wamrc:
needs: [build_llvm_libraries_on_windows]
runs-on: ${{ matrix.os }}
strategy:
matrix:
include:
- os: windows-latest
llvm_cache_key: ${{ needs.build_llvm_libraries_on_windows.outputs.cache_key }}
steps:
- name: checkout
uses: actions/checkout@v4
# since jobs.id can't contain the dot character
# it is hard to use `format` to assemble the cache key
- name: Get LLVM libraries
id: retrieve_llvm_libs
uses: actions/cache@v4
with:
path: |
./core/deps/llvm/build/bin
./core/deps/llvm/build/include
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ matrix.llvm_cache_key }}
- name: Quit if cache miss
if: steps.retrieve_llvm_libs.outputs.cache-hit != 'true'
run: echo "::error::can not get prebuilt llvm libraries" && exit 1
- name: Build wamrc
run: |
cmake -S . -B build
cmake --build build --config Release --parallel 4
working-directory: wamr-compiler
test:
runs-on: windows-latest
needs: [build_iwasm, build_wamrc]
needs: [build]
strategy:
fail-fast: false
matrix:
running_mode: ["classic-interp", "fast-interp"]
running_mode:
[
"classic-interp",
"fast-interp",
]
test_option:
[
$DEFAULT_TEST_OPTIONS,
@ -172,10 +130,6 @@ jobs:
run: ./build.sh
working-directory: ./core/iwasm/libraries/lib-wasi-threads/test/
- name: install wget
shell: bash
run: choco install wget
- name: run tests
shell: bash
timeout-minutes: 20

View File

@ -36,11 +36,12 @@ env:
LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0"
MULTI_TIER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1"
# For Spec Test
DEFAULT_TEST_OPTIONS: "-s spec -b -P"
MULTI_MODULES_TEST_OPTIONS: "-s spec -b -P -M"
SIMD_TEST_OPTIONS: "-s spec -b -P -S"
THREADS_TEST_OPTIONS: "-s spec -b -P -p"
X86_32_TARGET_TEST_OPTIONS: "-m x86_32"
# FIXME: use binary release(adding -b) instead of building from source after upgrading to 22.04
DEFAULT_TEST_OPTIONS: "-s spec -P"
MULTI_MODULES_TEST_OPTIONS: "-s spec -M -P"
SIMD_TEST_OPTIONS: "-s spec -S -P"
THREADS_TEST_OPTIONS: "-s spec -p -P"
X86_32_TARGET_TEST_OPTIONS: "-m x86_32 -P"
WASI_TEST_OPTIONS: "-s wasi_certification -w"
permissions:
@ -616,21 +617,42 @@ jobs:
sanitizer: tsan
- running_mode: "multi-tier-jit"
sanitizer: tsan
# simd128.h brings ubsan errors
# like: negation of XXXcannot be represented in type 'long int';
# cast to an unsigned type to negate this value to itself
# classic-interp and fast-interp don't support simd
- running_mode: "classic-interp"
test_option: $SIMD_TEST_OPTIONS
- running_mode: "fast-interp"
sanitizer: ubsan
test_option: $SIMD_TEST_OPTIONS
# llvm jit doesn't support multi module
- running_mode: "jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
# fast-jit doesn't support multi module, simd
- running_mode: "fast-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $SIMD_TEST_OPTIONS
# multi-tier-jit doesn't support multi module, simd
- running_mode: "multi-tier-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $SIMD_TEST_OPTIONS
steps:
- name: checkout
uses: actions/checkout@v4
- name: install-wasi-sdk-wabt
if: matrix.test_option == '$WASI_TEST_OPTIONS'
uses: ./.github/actions/install-wasi-sdk-wabt
with:
os: ${{ matrix.os }}
# It is a temporary solution until new wasi-sdk that includes bug fixes is released
- name: build wasi-libc from source
if: matrix.test_option == '$WASI_TEST_OPTIONS'
run: |
git clone https://github.com/WebAssembly/wasi-libc
cd wasi-libc
make -j AR=/opt/wasi-sdk/bin/llvm-ar NM=/opt/wasi-sdk/bin/llvm-nm CC=/opt/wasi-sdk/bin/clang THREAD_MODEL=posix
echo "SYSROOT_PATH=$PWD/sysroot" >> $GITHUB_ENV
- name: set env variable(if llvm are used)
if: matrix.running_mode == 'aot' || matrix.running_mode == 'jit' || matrix.running_mode == 'multi-tier-jit'
run: echo "USE_LLVM=true" >> $GITHUB_ENV
@ -675,12 +697,12 @@ jobs:
- name: Build WASI thread tests
if: matrix.test_option == '$WASI_TEST_OPTIONS'
run: bash build.sh
run: bash build.sh --sysroot "$SYSROOT_PATH"
working-directory: ./core/iwasm/libraries/lib-wasi-threads/test/
- name: Build WASI thread stress tests
if: matrix.test_option == '$WASI_TEST_OPTIONS'
run: bash build.sh
run: bash build.sh --sysroot "$SYSROOT_PATH"
working-directory: ./core/iwasm/libraries/lib-wasi-threads/stress-test/
- name: build socket api tests

View File

@ -239,12 +239,3 @@ jobs:
arch: universal
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver}}
release_wamr_wasi_extensions:
permissions:
contents: write # upload release artifact
needs: [create_tag, create_release]
uses: ./.github/workflows/build_wamr_wasi_extensions.yml
with:
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver }}

View File

@ -39,7 +39,7 @@ jobs:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
with:
results_file: results.sarif
results_format: sarif
@ -60,6 +60,6 @@ jobs:
# Upload the results to GitHub's code scanning dashboard.
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@4c57370d0304fbff638216539f81d9163f77712a
uses: github/codeql-action/upload-sarif@5eb3ed6614230b1931d5c08df9e096e4ba524f21
with:
sarif_file: results.sarif

View File

@ -1,57 +0,0 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
name: wamr_wasi_extensions
on:
pull_request:
types:
- opened
- synchronize
paths:
- ".github/workflows/wamr_wasi_extensions.yml"
- "wamr_wasi_extensios/**"
- "core/iwasm/libraries/wasi-nn/include/**"
- "core/iwasm/libraries/lib-socket/**"
# allow to be triggered manually
workflow_dispatch:
# Cancel any in-flight jobs for the same PR/branch so there's only one active
# at a time
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build_wamr_wasi_extensions:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-22.04, macos-13, macos-14]
steps:
- name: checkout
uses: actions/checkout@v4
- name: install-wasi-sdk-wabt
uses: ./.github/actions/install-wasi-sdk-wabt
with:
os: ${{ matrix.os }}
- name: Build wamr-wasi-extensions
run: |
mkdir dist
./build_libs.sh $(pwd)/dist/wamr-wasi-extensions
working-directory: wamr-wasi-extensions
- name: Build wamr-wasi-extensions samples
run: |
./build_samples.sh $(pwd)/dist/wamr-wasi-extensions
working-directory: wamr-wasi-extensions
- name: Upload artifacts
if: matrix.os == 'macos-14'
uses: actions/upload-artifact@v4
with:
name: wamr-wasi-extensions
path: wamr-wasi-extensions/dist
retention-days: 10

View File

@ -99,9 +99,9 @@ if (NOT DEFINED WAMR_BUILD_LIB_WASI_THREADS)
set (WAMR_BUILD_LIB_WASI_THREADS 0)
endif ()
if (NOT DEFINED WAMR_BUILD_COPY_CALL_STACK)
if (NOT DEFINED WAMR_ENABLE_COPY_CALLSTACK)
# Disable copy callstack by default
set (WAMR_BUILD_COPY_CALL_STACK 0)
set (WAMR_ENABLE_COPY_CALLSTACK 0)
endif()
if (NOT DEFINED WAMR_BUILD_MINI_LOADER)
@ -152,11 +152,15 @@ include (${SHARED_DIR}/utils/uncommon/shared_uncommon.cmake)
set (THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
if (MSVC)
add_definitions(-DCOMPILING_WASM_RUNTIME_API=1)
endif ()
add_library (vmlib ${WAMR_RUNTIME_LIB_SOURCE})
set_target_properties (vmlib PROPERTIES OUTPUT_NAME iwasm)
target_include_directories(vmlib INTERFACE
$<BUILD_INTERFACE:${WAMR_ROOT_DIR}/core/iwasm/include>
$<INSTALL_INTERFACE:include>
$<INSTALL_INTERFACE:include/iwasm>
)
target_link_libraries (vmlib PUBLIC ${LLVM_AVAILABLE_LIBS} ${UV_A_LIBS} -lm -ldl ${CMAKE_THREAD_LIBS_INIT})
@ -185,7 +189,7 @@ set_version_info (vmlib)
install (TARGETS vmlib
EXPORT iwasmTargets
LIBRARY DESTINATION lib
PUBLIC_HEADER DESTINATION include
PUBLIC_HEADER DESTINATION include/iwasm
)
install_iwasm_package ()

View File

@ -21,7 +21,7 @@ WebAssembly Micro Runtime (WAMR) is a lightweight standalone WebAssembly (Wasm)
### Key features
- Full compliant to the W3C Wasm MVP
- Small runtime binary size (core vmlib on cortex-m4f with tail-call/bulk memory/shared memory support, text size from bloaty)
- Small runtime binary size (core vmlib on cortex-m4f with tail-call/bulk memroy/shared memroy support, text size from bloaty)
* ~58.9K for fast interpreter
* ~56.3K for classic interpreter
* ~29.4K for aot runtime

View File

@ -1,35 +1,3 @@
## WAMR-2.3.1
### Breaking Changes
- Revert the location to install public headers (#4295). This restores compatibility (of installed headers) with WAMR-2.2.0 and earlier.
### New Features
- feat: Add instruction metering for interpreter (#4122)
### Bug Fixes
- updating WASI stdio handle initialization and build options for UVWASI (#4260)
- Fix SIMD load lane to avoid incompatible pointer types (#4278)
- Fixed unit tests on X86_32 (#4279)
- Improve Embedding WAMR guideline (#4284)
- Fix Compiler Error C2491 (#4286)
- Enhance type checking for function types in loader and improve error handling (#4294)
- Dockerfile.vx-delegate build error fix (#4273)
- Enable runtime API exposure for MSVC builds (#4287)
### Enhancements
- feat(yml): Add ESP32-P4 and ESP32-C5 support (#4270)
- add a sample to use cmake package (#4291)
### Others
- build(deps): Bump github/codeql-action from 3.28.17 to 3.28.18 (#4285)
---
## WAMR-2.3.0
### Breaking changes
@ -497,7 +465,7 @@
- wasm loader: Fix handling if block without op else (#3404)
- ref-types: Correct default value for function local variables (#3397)
- aot compiler: Fix the length type passed to aot_memmove/aot_memset (#3378)
- Fix loader and mini-loader select potential error (#3374)
- Fix loader and mini-loader select potiential error (#3374)
- Fix aot debugger compilation error on windows (#3370)
- A few native stack detection fixes for macOS/arm64 (#3368)
- Fix ESP32-S3 compiling error (#3359)

View File

@ -334,10 +334,15 @@ if (WAMR_BUILD_SHARED_HEAP EQUAL 1)
add_definitions (-DWASM_ENABLE_SHARED_HEAP=1)
message (" Shared heap enabled")
endif()
if (WAMR_BUILD_COPY_CALL_STACK EQUAL 1)
add_definitions (-DWASM_ENABLE_COPY_CALL_STACK=1)
if (WAMR_ENABLE_COPY_CALLSTACK EQUAL 1)
add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=1)
message(" Copy callstack enabled")
else ()
add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=0)
message(" Copy callstack disabled")
endif()
if (WAMR_BUILD_MEMORY64 EQUAL 1)
# if native is 32-bit or cross-compiled to 32-bit
if (NOT WAMR_BUILD_TARGET MATCHES ".*64.*")
@ -534,9 +539,6 @@ if (WAMR_BUILD_WASI_NN EQUAL 1)
if (DEFINED WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH)
add_definitions (-DWASM_WASI_NN_EXTERNAL_DELEGATE_PATH="${WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH}")
endif ()
if (NOT DEFINED WAMR_BUILD_WASI_EPHEMERAL_NN)
set(WAMR_BUILD_WASI_EPHEMERAL_NN 1)
endif()
if (WAMR_BUILD_WASI_EPHEMERAL_NN EQUAL 1)
message (" WASI-NN: use 'wasi_ephemeral_nn' instead of 'wasi-nn'")
add_definitions (-DWASM_ENABLE_WASI_EPHEMERAL_NN=1)
@ -669,10 +671,6 @@ if (WAMR_BUILD_AOT_VALIDATOR EQUAL 1)
message (" AOT validator enabled")
add_definitions (-DWASM_ENABLE_AOT_VALIDATOR=1)
endif ()
if (WAMR_BUILD_INSTRUCTION_METERING EQUAL 1)
message (" Instruction metering enabled")
add_definitions (-DWASM_ENABLE_INSTRUCTION_METERING=1)
endif ()
########################################
# Show Phase4 Wasm proposals status.

View File

@ -5,11 +5,7 @@
if (NOT CMAKE_BUILD_EARLY_EXPANSION)
if (CONFIG_IDF_TARGET_ARCH_RISCV)
if (CONFIG_IDF_TARGET_ESP32P4)
set (WAMR_BUILD_TARGET "RISCV32_ILP32F")
else ()
set (WAMR_BUILD_TARGET "RISCV32_ILP32")
endif ()
set (WAMR_BUILD_TARGET "RISCV32")
elseif (CONFIG_IDF_TARGET_ARCH_XTENSA)
set (WAMR_BUILD_TARGET "XTENSA")
else ()
@ -93,11 +89,7 @@ idf_component_register(SRCS ${srcs}
target_compile_options(${COMPONENT_LIB} PRIVATE "-Wno-format")
if (CONFIG_IDF_TARGET_ARCH_RISCV)
if (CONFIG_IDF_TARGET_ESP32P4)
target_compile_definitions(${COMPONENT_LIB} PUBLIC -DBUILD_TARGET_RISCV32_ILP32F=1)
else ()
target_compile_definitions(${COMPONENT_LIB} PUBLIC -DBUILD_TARGET_RISCV32_ILP32=1)
endif ()
elseif (CONFIG_IDF_TARGET_ARCH_XTENSA)
target_compile_definitions(${COMPONENT_LIB} PUBLIC -DBUILD_TARGET_XTENSA=1)
endif ()

View File

@ -1 +1 @@
requests==2.32.4
requests==2.32.3

View File

@ -106,7 +106,6 @@ endif ()
if (WAMR_BUILD_WASI_NN EQUAL 1)
include (${IWASM_DIR}/libraries/wasi-nn/cmake/wasi_nn.cmake)
set (WAMR_BUILD_MODULE_INST_CONTEXT 1)
endif ()
if (WAMR_BUILD_LIB_PTHREAD EQUAL 1)
@ -194,13 +193,6 @@ else()
enable_language (ASM)
endif()
# it will expose the runtime APIs.
# you'll use the following command to check the exported APIs
# dumpbin.exe /EXPORTS xxx
if (MSVC)
add_compile_definitions(COMPILING_WASM_RUNTIME_API=1)
endif ()
include (${SHARED_PLATFORM_CONFIG})
include (${SHARED_DIR}/mem-alloc/mem_alloc.cmake)
include (${IWASM_DIR}/common/iwasm_common.cmake)

View File

@ -8,7 +8,7 @@ endif()
set(WAMR_VERSION_MAJOR 2)
set(WAMR_VERSION_MINOR 3)
set(WAMR_VERSION_PATCH 1)
set(WAMR_VERSION_PATCH 0)
message("-- WAMR version: ${WAMR_VERSION_MAJOR}.${WAMR_VERSION_MINOR}.${WAMR_VERSION_PATCH}")

View File

@ -4,6 +4,7 @@
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
import argparse
import re
from pathlib import Path
import re
import shlex
@ -38,7 +39,7 @@ INVALID_FILE_NAME_SEGMENT = r"([a-zA-Z0-9]+\-[a-zA-Z0-9]+)"
def locate_command(command: str) -> bool:
if not shutil.which(command):
print(f"Command '{command}' not found")
print(f"Command '{command}'' not found")
return False
return True

View File

@ -193,8 +193,8 @@
#error "Heap aux stack allocation must be enabled for WASI threads"
#endif
#ifndef WASM_ENABLE_COPY_CALL_STACK
#define WASM_ENABLE_COPY_CALL_STACK 0
#ifndef WAMR_ENABLE_COPY_CALLSTACK
#define WAMR_ENABLE_COPY_CALLSTACK 0
#endif
#ifndef WASM_ENABLE_BASE_LIB
@ -716,8 +716,4 @@ unless used elsewhere */
#define WASM_ENABLE_AOT_VALIDATOR 0
#endif
#ifndef WASM_ENABLE_INSTRUCTION_METERING
#define WASM_ENABLE_INSTRUCTION_METERING 0
#endif
#endif /* end of _CONFIG_H_ */

View File

@ -1309,13 +1309,6 @@ load_init_expr(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
read_uint32(buf, buf_end, type_idx);
read_uint32(buf, buf_end, length);
if (type_idx >= module->type_count
|| !wasm_type_is_array_type(module->types[type_idx])) {
set_error_buf(error_buf, error_buf_size,
"invalid or non-array type index.");
goto fail;
}
if (init_expr_type == INIT_EXPR_TYPE_ARRAY_NEW_DEFAULT) {
expr->u.array_new_default.type_index = type_idx;
expr->u.array_new_default.length = length;
@ -1730,12 +1723,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
(void)u8;
read_uint32(buf, buf_end, j);
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (j >= module->type_count) {
set_error_buf(error_buf, error_buf_size, "invalid type index");
goto fail;
}
#endif
if (module->types[j]->ref_count == UINT16_MAX) {
set_error_buf(error_buf, error_buf_size,
"wasm type's ref count too large");
@ -1999,13 +1986,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
AOTType *cur_type = module->types[j];
parent_type_idx = cur_type->parent_type_idx;
if (parent_type_idx != (uint32)-1) { /* has parent */
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (parent_type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"invalid parent type index");
goto fail;
}
#endif
AOTType *parent_type = module->types[parent_type_idx];
module->types[j]->parent_type = parent_type;
@ -2029,13 +2009,6 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module,
AOTType *cur_type = module->types[j];
parent_type_idx = cur_type->parent_type_idx;
if (parent_type_idx != (uint32)-1) { /* has parent */
#if WASM_ENABLE_AOT_VALIDATOR != 0
if (parent_type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"invalid parent type index");
goto fail;
}
#endif
AOTType *parent_type = module->types[parent_type_idx];
/* subtyping has been checked during compilation */
bh_assert(wasm_type_is_subtype_of(
@ -3350,7 +3323,7 @@ do_data_relocation(AOTModule *module, AOTRelocationGroup *group,
uint8 *data_addr;
uint32 data_size = 0, i;
AOTRelocation *relocation = group->relocations;
void *symbol_addr = NULL;
void *symbol_addr;
char *symbol, *data_section_name;
if (!strncmp(group->section_name, ".rela.", 6)) {
@ -4155,16 +4128,10 @@ create_module(char *name, char *error_buf, uint32 error_buf_size)
#endif
#if WASM_ENABLE_LIBC_WASI != 0
#if WASM_ENABLE_UVWASI == 0
module->wasi_args.stdio[0] = os_invalid_raw_handle();
module->wasi_args.stdio[1] = os_invalid_raw_handle();
module->wasi_args.stdio[2] = os_invalid_raw_handle();
#else
module->wasi_args.stdio[0] = os_get_invalid_handle();
module->wasi_args.stdio[1] = os_get_invalid_handle();
module->wasi_args.stdio[2] = os_get_invalid_handle();
#endif /* WASM_ENABLE_UVWASI == 0 */
#endif /* WASM_ENABLE_LIBC_WASI != 0 */
#endif
return module;
#if WASM_ENABLE_GC != 0

View File

@ -3639,7 +3639,7 @@ aot_get_module_inst_mem_consumption(const AOTModuleInstance *module_inst,
for (i = 0; i < module_inst->memory_count; i++) {
AOTMemoryInstance *mem_inst = module_inst->memories[i];
mem_conspn->memories_size +=
(uint64)mem_inst->num_bytes_per_page * mem_inst->cur_page_count;
mem_inst->num_bytes_per_page * mem_inst->cur_page_count;
mem_conspn->app_heap_size =
mem_inst->heap_data_end - mem_inst->heap_data;
/* size of app heap structure */
@ -4137,9 +4137,9 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame)
}
#endif /* end of WASM_ENABLE_AOT_STACK_FRAME != 0 */
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n,
char *error_buf, uint32 error_buf_size)
{
@ -4193,7 +4193,7 @@ aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
uint32
aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n,
char *error_buf, uint32_t error_buf_size)
{
@ -4243,7 +4243,7 @@ aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
uint32
aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -4265,7 +4265,7 @@ aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
error_buf, error_buf_size);
}
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
#if WASM_ENABLE_DUMP_CALL_STACK != 0
bool
@ -4877,8 +4877,8 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf,
}
prof_header.magic = 0xFF6C70726F667281LL;
/* Version 9 */
prof_header.version = 0x0000000000000009LL;
/* Version 8 */
prof_header.version = 0x0000000000000008LL;
/* with VARIANT_MASK_IR_PROF (IR Instrumentation) */
prof_header.version |= 0x1ULL << 56;
/* with VARIANT_MASK_MEMPROF (Memory Profile) */
@ -4887,19 +4887,14 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf,
prof_header.num_prof_counters = num_prof_counters;
prof_header.names_size = prof_names_size;
prof_header.value_kind_last = 1;
/* __llvm_prf_bits won't be used in PGO, set dummy value here */
prof_header.num_prof_bitmaps = 0;
prof_header.bitmap_delta = 0;
if (!is_little_endian()) {
aot_exchange_uint64((uint8 *)&prof_header.magic);
aot_exchange_uint64((uint8 *)&prof_header.version);
aot_exchange_uint64((uint8 *)&prof_header.num_prof_data);
aot_exchange_uint64((uint8 *)&prof_header.num_prof_counters);
aot_exchange_uint64((uint8 *)&prof_header.num_prof_bitmaps);
aot_exchange_uint64((uint8 *)&prof_header.names_size);
aot_exchange_uint64((uint8 *)&prof_header.counters_delta);
aot_exchange_uint64((uint8 *)&prof_header.bitmap_delta);
aot_exchange_uint64((uint8 *)&prof_header.value_kind_last);
}
@ -4917,23 +4912,19 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf,
prof_data_64->func_md5 = prof_data->func_md5;
prof_data_64->func_hash = prof_data->func_hash;
prof_data_64->offset_counters = prof_data->offset_counters;
prof_data_64->offset_bitmaps = prof_data->offset_bitmaps;
prof_data_64->func_ptr = prof_data->func_ptr;
prof_data_64->values = (uint64)(uintptr_t)prof_data->values;
prof_data_64->num_counters = prof_data->num_counters;
/* __llvm_prf_bits won't be used in PGO, set dummy value here */
prof_data_64->num_bitmaps = 0;
prof_data_64->num_value_sites[0] = prof_data->num_value_sites[0];
prof_data_64->num_value_sites[1] = prof_data->num_value_sites[1];
if (!is_little_endian()) {
aot_exchange_uint64((uint8 *)&prof_data_64->func_hash);
aot_exchange_uint64((uint8 *)&prof_data_64->offset_counters);
aot_exchange_uint64((uint8 *)&prof_data_64->offset_bitmaps);
aot_exchange_uint64((uint8 *)&prof_data_64->offset_counters);
aot_exchange_uint64((uint8 *)&prof_data_64->func_ptr);
aot_exchange_uint64((uint8 *)&prof_data_64->values);
aot_exchange_uint32((uint8 *)&prof_data_64->num_counters);
aot_exchange_uint32((uint8 *)&prof_data_64->num_bitmaps);
aot_exchange_uint16((uint8 *)&prof_data_64->num_value_sites[0]);
aot_exchange_uint16((uint8 *)&prof_data_64->num_value_sites[1]);
}

View File

@ -437,9 +437,6 @@ typedef struct AOTFrame {
} AOTFrame;
#if WASM_ENABLE_STATIC_PGO != 0
/* The bitmaps fields in LLVMProfileRawHeader, LLVMProfileData,
* LLVMProfileData_64 all dummy fields, it's used in MC/DC code coverage
* instead of PGO. See https://llvm.org/docs/InstrProfileFormat.html#bitmap */
typedef struct LLVMProfileRawHeader {
uint64 magic;
uint64 version;
@ -448,11 +445,8 @@ typedef struct LLVMProfileRawHeader {
uint64 padding_bytes_before_counters;
uint64 num_prof_counters;
uint64 padding_bytes_after_counters;
uint64 num_prof_bitmaps;
uint64 padding_bytes_after_bitmaps;
uint64 names_size;
uint64 counters_delta;
uint64 bitmap_delta;
uint64 names_delta;
uint64 value_kind_last;
} LLVMProfileRawHeader;
@ -470,12 +464,10 @@ typedef struct LLVMProfileData {
uint64 func_md5;
uint64 func_hash;
uint64 offset_counters;
uint64 offset_bitmaps;
uintptr_t func_ptr;
ValueProfNode **values;
uint32 num_counters;
uint16 num_value_sites[2];
uint32 num_bitmaps;
} LLVMProfileData;
/* The profiling data for writing to the output file, the width of
@ -485,12 +477,10 @@ typedef struct LLVMProfileData_64 {
uint64 func_md5;
uint64 func_hash;
uint64 offset_counters;
uint64 offset_bitmaps;
uint64 func_ptr;
uint64 values;
uint32 num_counters;
uint16 num_value_sites[2];
uint32 num_bitmaps;
} LLVMProfileData_64;
#endif /* end of WASM_ENABLE_STATIC_PGO != 0 */
@ -787,12 +777,12 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame);
bool
aot_create_call_stack(struct WASMExecEnv *exec_env);
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
/**
* @brief Dump wasm call stack or get the size

View File

@ -1145,7 +1145,7 @@ wasm_reftype_is_subtype_of(uint8 type1, const WASMRefType *ref_type1,
return true;
else {
int32 heap_type = ref_type1->ref_ht_common.heap_type;
// We don't care whether type2 is nullable or not. So
// We dont care whether type2 is nullable or not. So
// we normalize it into its related one-byte type.
if (type2 == REF_TYPE_HT_NULLABLE
|| type2 == REF_TYPE_HT_NON_NULLABLE) {

View File

@ -85,10 +85,6 @@ wasm_exec_env_create_internal(struct WASMModuleInstanceCommon *module_inst,
wasm_runtime_dump_exec_env_mem_consumption(exec_env);
#endif
#if WASM_ENABLE_INSTRUCTION_METERING != 0
exec_env->instructions_to_execute = -1;
#endif
return exec_env;
#ifdef OS_ENABLE_HW_BOUND_CHECK

View File

@ -87,11 +87,6 @@ typedef struct WASMExecEnv {
uint8 *bottom;
} wasm_stack;
#if WASM_ENABLE_INSTRUCTION_METERING != 0
/* instructions to execute */
int instructions_to_execute;
#endif
#if WASM_ENABLE_FAST_JIT != 0
/**
* Cache for

View File

@ -1504,7 +1504,7 @@ wasm_runtime_load_ex(uint8 *buf, uint32 size, const LoadArgs *args,
error_buf_size);
}
bool
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_resolve_symbols(WASMModuleCommon *module)
{
#if WASM_ENABLE_INTERP != 0
@ -1743,9 +1743,9 @@ wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env)
wasm_exec_env_destroy(exec_env);
}
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -1780,7 +1780,7 @@ wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
strncpy(error_buf, err_msg, error_buf_size);
return 0;
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
bool
wasm_runtime_init_thread_env(void)
@ -2285,15 +2285,6 @@ wasm_runtime_access_exce_check_guard_page()
}
#endif
#if WASM_ENABLE_INSTRUCTION_METERING != 0
void
wasm_runtime_set_instruction_count_limit(WASMExecEnv *exec_env,
int instructions_to_execute)
{
exec_env->instructions_to_execute = instructions_to_execute;
}
#endif
WASMFuncType *
wasm_runtime_get_function_type(const WASMFunctionInstanceCommon *function,
uint32 module_type)
@ -3886,15 +3877,11 @@ wasm_runtime_init_wasi(WASMModuleInstanceCommon *module_inst,
init_options.allocator = &uvwasi_allocator;
init_options.argc = argc;
init_options.argv = (const char **)argv;
init_options.in = (stdinfd != os_get_invalid_handle())
? (uvwasi_fd_t)stdinfd
: init_options.in;
init_options.out = (stdoutfd != os_get_invalid_handle())
? (uvwasi_fd_t)stdoutfd
: init_options.out;
init_options.err = (stderrfd != os_get_invalid_handle())
? (uvwasi_fd_t)stderrfd
: init_options.err;
init_options.in = (stdinfd != -1) ? (uvwasi_fd_t)stdinfd : init_options.in;
init_options.out =
(stdoutfd != -1) ? (uvwasi_fd_t)stdoutfd : init_options.out;
init_options.err =
(stderrfd != -1) ? (uvwasi_fd_t)stderrfd : init_options.err;
if (dir_count > 0) {
init_options.preopenc = dir_count;
@ -7849,7 +7836,7 @@ wasm_runtime_detect_native_stack_overflow_size(WASMExecEnv *exec_env,
return true;
}
bool
WASM_RUNTIME_API_EXTERN bool
wasm_runtime_is_underlying_binary_freeable(WASMModuleCommon *const module)
{
#if WASM_ENABLE_INTERP != 0

View File

@ -758,12 +758,12 @@ wasm_runtime_create_exec_env(WASMModuleInstanceCommon *module_inst,
WASM_RUNTIME_API_EXTERN void
wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env);
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
WASM_RUNTIME_API_EXTERN uint32_t
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32 length, const uint32 skip_n, char *error_buf,
uint32 error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN WASMModuleInstanceCommon *
@ -791,17 +791,9 @@ WASM_RUNTIME_API_EXTERN void
wasm_runtime_set_native_stack_boundary(WASMExecEnv *exec_env,
uint8 *native_stack_boundary);
#if WASM_ENABLE_INSTRUCTION_METERING != 0
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN void
wasm_runtime_set_instruction_count_limit(WASMExecEnv *exec_env,
int instructions_to_execute);
#endif
#if WASM_CONFIGURABLE_BOUNDS_CHECKS != 0
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN
void
WASM_RUNTIME_API_EXTERN void
wasm_runtime_set_bounds_checks(WASMModuleInstanceCommon *module_inst,
bool enable);

View File

@ -48,7 +48,7 @@ typedef struct AOTSymbolList {
} AOTSymbolList;
/* AOT object data */
struct AOTObjectData {
typedef struct AOTObjectData {
AOTCompContext *comp_ctx;
LLVMMemoryBufferRef mem_buf;
@ -82,7 +82,7 @@ struct AOTObjectData {
const char *stack_sizes_section_name;
uint32 stack_sizes_offset;
uint32 *stack_sizes;
};
} AOTObjectData;
#if 0
static void dump_buf(uint8 *buf, uint32 size, char *title)
@ -302,8 +302,8 @@ get_init_expr_size(const AOTCompContext *comp_ctx, const AOTCompData *comp_data,
/* array_elem_type + type_index + len + elems */
size += sizeof(uint32) * 3
+ (uint64)wasm_value_type_size_internal(
array_type->elem_type, comp_ctx->pointer_size)
+ wasm_value_type_size_internal(array_type->elem_type,
comp_ctx->pointer_size)
* value_count;
break;
}
@ -3378,12 +3378,6 @@ aot_resolve_object_data_sections(AOTObjectData *obj_data)
bh_memcpy_s(data_section->name, size, buf, size);
data_section->is_name_allocated = true;
}
else if (obj_data->comp_ctx->enable_llvm_pgo
&& !strcmp(name, "__llvm_prf_bits")) {
LOG_WARNING("__llvm_prf_bits section is not supported and "
"shouldn't be used in PGO.");
return false;
}
if (obj_data->comp_ctx->enable_llvm_pgo
&& !strcmp(name, "__llvm_prf_names")) {

View File

@ -347,8 +347,7 @@ call_aot_invoke_c_api_native(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
/* Get &c_api_func_imports[func_idx], note size of CApiFuncImport
is pointer_size * 3 */
offset = I32_CONST((unsigned long long)comp_ctx->pointer_size * 3
* import_func_idx);
offset = I32_CONST((comp_ctx->pointer_size * 3) * import_func_idx);
CHECK_LLVM_CONST(offset);
c_api_func_import =
LLVMBuildInBoundsGEP2(comp_ctx->builder, INT8_TYPE, c_api_func_imports,

View File

@ -3204,21 +3204,6 @@ aot_create_comp_context(const AOTCompData *comp_data, aot_comp_option_t option)
#if WASM_ENABLE_WAMR_COMPILER != 0
WASMModule *wasm_module = (WASMModule *)comp_data->wasm_module;
bool is_memory64 = false;
/* TODO: multi-memories for now assuming the memory64 flag of a memory is
* consistent across multi-memories */
if (wasm_module->import_memory_count > 0)
is_memory64 = !!(wasm_module->import_memories[0].u.memory.mem_type.flags
& MEMORY64_FLAG);
else if (wasm_module->memory_count > 0)
is_memory64 = !!(wasm_module->memories[0].flags & MEMORY64_FLAG);
if (!(option->bounds_checks == 1 || option->bounds_checks == 0)
&& is_memory64) {
/* For memory64, the boundary check default value is true */
comp_ctx->enable_bound_check = true;
}
/* Return error if SIMD is disabled by command line but SIMD instructions
* are used */
@ -3999,7 +3984,7 @@ aot_get_func_from_table(const AOTCompContext *comp_ctx, LLVMValueRef base,
if (!(func =
LLVMBuildBitCast(comp_ctx->builder, func, func_type, "func"))) {
aot_set_last_error("cast function failed.");
aot_set_last_error("cast function fialed.");
goto fail;
}
@ -4068,7 +4053,7 @@ aot_load_const_from_table(AOTCompContext *comp_ctx, LLVMValueRef base,
if (!(const_addr = LLVMBuildBitCast(comp_ctx->builder, const_addr,
const_ptr_type, "const_addr"))) {
aot_set_last_error("cast const failed.");
aot_set_last_error("cast const fialed.");
return NULL;
}

View File

@ -139,6 +139,8 @@ typedef struct wasm_frame_t {
uint32_t *lp;
} WASMCApiFrame;
typedef WASMCApiFrame wasm_frame_t;
/* WASM section */
typedef struct wasm_section_t {
struct wasm_section_t *next;
@ -902,7 +904,7 @@ wasm_runtime_destroy_exec_env(wasm_exec_env_t exec_env);
* @return number of copied frames
*/
WASM_RUNTIME_API_EXTERN uint32_t
wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer,
wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer,
const uint32_t length, const uint32_t skip_n,
char *error_buf, uint32_t error_buf_size);
@ -1819,20 +1821,6 @@ WASM_RUNTIME_API_EXTERN void
wasm_runtime_set_native_stack_boundary(wasm_exec_env_t exec_env,
uint8_t *native_stack_boundary);
/**
* Set the instruction count limit to the execution environment.
* By default the instruction count limit is -1, which means no limit.
* However, if the instruction count limit is set to a positive value,
* the execution will be terminated when the instruction count reaches
* the limit.
*
* @param exec_env the execution environment
* @param instruction_count the instruction count limit
*/
WASM_RUNTIME_API_EXTERN void
wasm_runtime_set_instruction_count_limit(wasm_exec_env_t exec_env,
int instruction_count);
/**
* Dump runtime memory consumption, including:
* Exec env memory consumption

View File

@ -1243,7 +1243,7 @@ wasm_value_type_size_internal(uint8 value_type, uint8 pointer_size)
return sizeof(int16);
#endif
else {
bh_assert(0 && "Unknown value type. It should be handled ahead.");
bh_assert(0);
}
#if WASM_ENABLE_GC == 0
(void)pointer_size;

View File

@ -1516,13 +1516,10 @@ wasm_interp_call_func_import(WASMModuleInstance *module_inst,
} \
os_mutex_unlock(&exec_env->wait_lock); \
} \
CHECK_INSTRUCTION_LIMIT(); \
goto *handle_table[*frame_ip++]; \
} while (0)
#else
#define HANDLE_OP_END() \
CHECK_INSTRUCTION_LIMIT(); \
FETCH_OPCODE_AND_DISPATCH()
#define HANDLE_OP_END() FETCH_OPCODE_AND_DISPATCH()
#endif
#else /* else of WASM_ENABLE_LABELS_AS_VALUES */
@ -1545,12 +1542,9 @@ wasm_interp_call_func_import(WASMModuleInstance *module_inst,
} \
os_mutex_unlock(&exec_env->wait_lock); \
} \
CHECK_INSTRUCTION_LIMIT(); \
continue;
#else
#define HANDLE_OP_END() \
CHECK_INSTRUCTION_LIMIT(); \
continue;
#define HANDLE_OP_END() continue
#endif
#endif /* end of WASM_ENABLE_LABELS_AS_VALUES */
@ -1568,18 +1562,6 @@ get_global_addr(uint8 *global_data, WASMGlobalInstance *global)
#endif
}
#if WASM_ENABLE_INSTRUCTION_METERING != 0
#define CHECK_INSTRUCTION_LIMIT() \
if (instructions_left == 0) { \
wasm_set_exception(module, "instruction limit exceeded"); \
goto got_exception; \
} \
else if (instructions_left > 0) \
instructions_left--;
#else
#define CHECK_INSTRUCTION_LIMIT() (void)0
#endif
static void
wasm_interp_call_func_bytecode(WASMModuleInstance *module,
WASMExecEnv *exec_env,
@ -1623,14 +1605,6 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
uint32 local_idx, local_offset, global_idx;
uint8 local_type, *global_addr;
uint32 cache_index, type_index, param_cell_num, cell_num;
#if WASM_ENABLE_INSTRUCTION_METERING != 0
int instructions_left = -1;
if (exec_env) {
instructions_left = exec_env->instructions_to_execute;
}
#endif
#if WASM_ENABLE_EXCE_HANDLING != 0
int32_t exception_tag_index;
#endif
@ -4088,7 +4062,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
case WASM_OP_STRING_ENCODE_LOSSY_UTF8_ARRAY:
case WASM_OP_STRING_ENCODE_WTF8_ARRAY:
{
uint32 start, array_len;
uint32 start, array_len, count;
int32 bytes_written;
EncodingFlag flag = WTF8;
WASMArrayType *array_type;

View File

@ -105,19 +105,6 @@ typedef float64 CellType_F64;
goto unaligned_atomic; \
} while (0)
#if WASM_ENABLE_INSTRUCTION_METERING != 0
#define CHECK_INSTRUCTION_LIMIT() \
if (instructions_left == 0) { \
wasm_set_exception(module, "instruction limit exceeded"); \
goto got_exception; \
} \
else if (instructions_left > 0) \
instructions_left--;
#else
#define CHECK_INSTRUCTION_LIMIT() (void)0
#endif
static inline uint32
rotl32(uint32 n, uint32 c)
{
@ -1454,7 +1441,6 @@ wasm_interp_dump_op_count()
do { \
const void *p_label_addr = *(void **)frame_ip; \
frame_ip += sizeof(void *); \
CHECK_INSTRUCTION_LIMIT(); \
goto *p_label_addr; \
} while (0)
#else
@ -1466,7 +1452,6 @@ wasm_interp_dump_op_count()
/* int32 relative offset was emitted in 64-bit target */ \
p_label_addr = label_base + (int32)LOAD_U32_WITH_2U16S(frame_ip); \
frame_ip += sizeof(int32); \
CHECK_INSTRUCTION_LIMIT(); \
goto *p_label_addr; \
} while (0)
#else
@ -1477,7 +1462,6 @@ wasm_interp_dump_op_count()
/* uint32 label address was emitted in 32-bit target */ \
p_label_addr = (void *)(uintptr_t)LOAD_U32_WITH_2U16S(frame_ip); \
frame_ip += sizeof(int32); \
CHECK_INSTRUCTION_LIMIT(); \
goto *p_label_addr; \
} while (0)
#endif
@ -1554,13 +1538,6 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
uint8 *maddr = NULL;
uint32 local_idx, local_offset, global_idx;
uint8 opcode = 0, local_type, *global_addr;
#if WASM_ENABLE_INSTRUCTION_METERING != 0
int instructions_left = -1;
if (exec_env) {
instructions_left = exec_env->instructions_to_execute;
}
#endif
#if !defined(OS_ENABLE_HW_BOUND_CHECK) \
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
#if WASM_CONFIGURABLE_BOUNDS_CHECKS != 0
@ -6124,9 +6101,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
#if WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS != 0
#define SIMD_LANE_HANDLE_UNALIGNED_ACCESS()
#else
#define SIMD_LANE_HANDLE_UNALIGNED_ACCESS() (void)*frame_ip++
#endif /* WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS != 0 */
#define SIMD_LANE_HANDLE_UNALIGNED_ACCESS() *frame_ip++;
#endif
#define SIMD_EXTRACT_LANE_OP(register, return_type, push_elem) \
do { \
uint8 lane = *frame_ip++; \
@ -6543,7 +6519,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
addr_ret = GET_OFFSET(); \
CHECK_MEMORY_OVERFLOW(width / 8); \
if (width == 64) { \
vec.register[lane] = GET_I64_FROM_ADDR((uint32 *)maddr); \
vec.register[lane] = GET_I64_FROM_ADDR(maddr); \
} \
else { \
vec.register[lane] = *(uint##width *)(maddr); \

View File

@ -379,6 +379,7 @@ memory_realloc(void *mem_old, uint32 size_old, uint32 size_new, char *error_buf,
mem = mem_new; \
} while (0)
#if WASM_ENABLE_GC != 0
static bool
check_type_index(const WASMModule *module, uint32 type_count, uint32 type_index,
char *error_buf, uint32 error_buf_size)
@ -391,7 +392,6 @@ check_type_index(const WASMModule *module, uint32 type_count, uint32 type_index,
return true;
}
#if WASM_ENABLE_GC != 0
static bool
check_array_type(const WASMModule *module, uint32 type_index, char *error_buf,
uint32 error_buf_size)
@ -409,29 +409,6 @@ check_array_type(const WASMModule *module, uint32 type_index, char *error_buf,
}
#endif
/*
* if no GC is enabled, an valid type is always a function type.
* but if GC is enabled, we need to check the type flag
*/
static bool
check_function_type(const WASMModule *module, uint32 type_index,
char *error_buf, uint32 error_buf_size)
{
if (!check_type_index(module, module->type_count, type_index, error_buf,
error_buf_size)) {
return false;
}
#if WASM_ENABLE_GC != 0
if (module->types[type_index]->type_flag != WASM_TYPE_FUNC) {
set_error_buf(error_buf, error_buf_size, "unknown function type");
return false;
}
#endif
return true;
}
static bool
check_function_index(const WASMModule *module, uint32 function_index,
char *error_buf, uint32 error_buf_size)
@ -831,24 +808,19 @@ load_init_expr(WASMModule *module, const uint8 **p_buf, const uint8 *buf_end,
{
uint8 type1;
#if WASM_ENABLE_GC == 0
CHECK_BUF(p, p_end, 1);
type1 = read_uint8(p);
#if WASM_ENABLE_GC == 0
cur_value.ref_index = NULL_REF;
if (!push_const_expr_stack(&const_expr_ctx, flag, type1,
&cur_value, error_buf,
error_buf_size))
goto fail;
#else
int32 heap_type;
read_leb_int32(p, p_end, heap_type);
type1 = (uint8)((int32)0x80 + heap_type);
cur_value.gc_obj = NULL_REF;
if (!is_byte_a_type(type1)
|| !wasm_is_valid_heap_type(heap_type)
|| wasm_is_type_multi_byte_type(type1)) {
p--;
read_leb_uint32(p, p_end, type_idx);
@ -2042,9 +2014,9 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
"recursive type count too large");
return false;
}
module->type_count += rec_count - 1;
new_total_size =
sizeof(WASMFuncType *)
* (uint64)(module->type_count + rec_count - 1);
sizeof(WASMFuncType *) * (uint64)module->type_count;
if (new_total_size > UINT32_MAX) {
set_error_buf(error_buf, error_buf_size,
"allocate memory failed");
@ -2052,7 +2024,6 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
}
MEM_REALLOC(module->types, (uint32)total_size,
(uint32)new_total_size);
module->type_count += rec_count - 1;
total_size = new_total_size;
}
@ -2508,8 +2479,8 @@ load_function_import(const uint8 **p_buf, const uint8 *buf_end,
read_leb_uint32(p, p_end, declare_type_index);
*p_buf = p;
if (!check_function_type(parent_module, declare_type_index, error_buf,
error_buf_size)) {
if (declare_type_index >= parent_module->type_count) {
set_error_buf(error_buf, error_buf_size, "unknown type");
return false;
}
@ -2589,8 +2560,7 @@ load_table_import(const uint8 **p_buf, const uint8 *buf_end,
error_buf_size)) {
return false;
}
if (!wasm_is_type_reftype(ref_type.ref_type)
|| wasm_is_reftype_htref_non_nullable(ref_type.ref_type)) {
if (wasm_is_reftype_htref_non_nullable(ref_type.ref_type)) {
set_error_buf(error_buf, error_buf_size, "type mismatch");
return false;
}
@ -2923,8 +2893,8 @@ load_tag_import(const uint8 **p_buf, const uint8 *buf_end,
/* get type */
read_leb_uint32(p, p_end, declare_type_index);
/* compare against module->types */
if (!check_function_type(parent_module, declare_type_index, error_buf,
error_buf_size)) {
if (declare_type_index >= parent_module->type_count) {
set_error_buf(error_buf, error_buf_size, "unknown tag type");
goto fail;
}
@ -3116,15 +3086,6 @@ load_table(const uint8 **p_buf, const uint8 *buf_end, WASMModule *module,
error_buf_size)) {
return false;
}
/*
* TODO: add this validator
* `wasm_is_reftype_htref_non_nullable(ref_type.ref_type)`
* after sync up with the latest GC spec
*/
if (!wasm_is_type_reftype(ref_type.ref_type)) {
set_error_buf(error_buf, error_buf_size, "type mismatch");
return false;
}
table->table_type.elem_type = ref_type.ref_type;
if (need_ref_type_map) {
if (!(table->table_type.elem_ref_type =
@ -3298,13 +3259,6 @@ load_import_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
CHECK_BUF(p, p_end, 1);
/* 0x70 */
u8 = read_uint8(p);
#if WASM_ENABLE_GC != 0
if (wasm_is_reftype_htref_nullable(u8)) {
int32 heap_type;
read_leb_int32(p, p_end, heap_type);
(void)heap_type;
}
#endif
read_leb_uint32(p, p_end, flags);
read_leb_uint32(p, p_end, u32);
if (flags & 1)
@ -3352,8 +3306,7 @@ load_import_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module,
/* valtype */
CHECK_BUF(p, p_end, 1);
global_type = read_uint8(p);
if (wasm_is_reftype_htref_nullable(global_type)
|| wasm_is_reftype_htref_non_nullable(global_type)) {
if (wasm_is_type_multi_byte_type(global_type)) {
int32 heap_type;
read_leb_int32(p, p_end, heap_type);
(void)heap_type;
@ -3610,9 +3563,8 @@ load_function_section(const uint8 *buf, const uint8 *buf_end,
for (i = 0; i < func_count; i++) {
/* Resolve function type */
read_leb_uint32(p, p_end, type_index);
if (!check_function_type(module, type_index, error_buf,
error_buf_size)) {
if (type_index >= module->type_count) {
set_error_buf(error_buf, error_buf_size, "unknown type");
return false;
}
@ -3714,7 +3666,7 @@ load_function_section(const uint8 *buf, const uint8 *buf_end,
* we shall make a copy of code body [p_code, p_code + code_size]
* when we are worrying about inappropriate releasing behaviour.
* all code bodies are actually in a buffer which user allocates in
* their embedding environment and we don't have power over them.
* his embedding environment and we don't have power on them.
* it will be like:
* code_body_cp = malloc(code_size);
* memcpy(code_body_cp, p_code, code_size);
@ -5018,8 +4970,8 @@ load_tag_section(const uint8 *buf, const uint8 *buf_end, const uint8 *buf_code,
/* get type */
read_leb_uint32(p, p_end, tag_type);
/* compare against module->types */
if (!check_function_type(module, tag_type, error_buf,
error_buf_size)) {
if (tag_type >= module->type_count) {
set_error_buf(error_buf, error_buf_size, "unknown type");
return false;
}
@ -6428,16 +6380,10 @@ create_module(char *name, char *error_buf, uint32 error_buf_size)
#endif
#if WASM_ENABLE_LIBC_WASI != 0
#if WASM_ENABLE_UVWASI == 0
module->wasi_args.stdio[0] = os_invalid_raw_handle();
module->wasi_args.stdio[1] = os_invalid_raw_handle();
module->wasi_args.stdio[2] = os_invalid_raw_handle();
#else
module->wasi_args.stdio[0] = os_get_invalid_handle();
module->wasi_args.stdio[1] = os_get_invalid_handle();
module->wasi_args.stdio[2] = os_get_invalid_handle();
#endif /* WASM_ENABLE_UVWASI == 0 */
#endif /* WASM_ENABLE_LIBC_WASI != 0 */
#endif
(void)ret;
return module;
@ -10531,7 +10477,7 @@ wasm_loader_check_br(WASMLoaderContext *loader_ctx, uint32 depth, uint8 opcode,
* match block type. */
if (cur_block->is_stack_polymorphic) {
#if WASM_ENABLE_GC != 0
int32 j = (int32)reftype_map_count - 1;
int32 j = reftype_map_count - 1;
#endif
for (i = (int32)arity - 1; i >= 0; i--) {
#if WASM_ENABLE_GC != 0
@ -10834,7 +10780,7 @@ check_block_stack(WASMLoaderContext *loader_ctx, BranchBlock *block,
* match block type. */
if (block->is_stack_polymorphic) {
#if WASM_ENABLE_GC != 0
int32 j = (int32)return_reftype_map_count - 1;
int32 j = return_reftype_map_count - 1;
#endif
for (i = (int32)return_count - 1; i >= 0; i--) {
#if WASM_ENABLE_GC != 0
@ -11603,17 +11549,15 @@ re_scan:
}
else {
int32 type_index;
/* Resolve the leb128 encoded type index as block type */
p--;
p_org = p - 1;
pb_read_leb_int32(p, p_end, type_index);
if (!check_function_type(module, type_index, error_buf,
error_buf_size)) {
if ((uint32)type_index >= module->type_count) {
set_error_buf(error_buf, error_buf_size,
"unknown type");
goto fail;
}
block_type.is_value_type = false;
block_type.u.type =
(WASMFuncType *)module->types[type_index];
@ -12663,8 +12607,8 @@ re_scan:
/* skip elem idx */
POP_TBL_ELEM_IDX();
if (!check_function_type(module, type_idx, error_buf,
error_buf_size)) {
if (type_idx >= module->type_count) {
set_error_buf(error_buf, error_buf_size, "unknown type");
goto fail;
}
@ -15025,6 +14969,8 @@ re_scan:
case WASM_OP_STRING_NEW_LOSSY_UTF8:
case WASM_OP_STRING_NEW_WTF8:
{
uint32 memidx;
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true;
#endif
@ -15036,6 +14982,7 @@ re_scan:
POP_I32();
POP_I32();
PUSH_REF(REF_TYPE_STRINGREF);
(void)memidx;
break;
}
case WASM_OP_STRING_CONST:
@ -15063,6 +15010,8 @@ re_scan:
case WASM_OP_STRING_ENCODE_LOSSY_UTF8:
case WASM_OP_STRING_ENCODE_WTF8:
{
uint32 memidx;
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true;
#endif
@ -15074,6 +15023,7 @@ re_scan:
POP_I32();
POP_STRINGREF();
PUSH_I32();
(void)memidx;
break;
}
case WASM_OP_STRING_CONCAT:
@ -15114,6 +15064,8 @@ re_scan:
case WASM_OP_STRINGVIEW_WTF8_ENCODE_LOSSY_UTF8:
case WASM_OP_STRINGVIEW_WTF8_ENCODE_WTF8:
{
uint32 memidx;
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true;
#endif
@ -15128,6 +15080,7 @@ re_scan:
POP_REF(REF_TYPE_STRINGVIEWWTF8);
PUSH_I32();
PUSH_I32();
(void)memidx;
break;
}
case WASM_OP_STRINGVIEW_WTF8_SLICE:
@ -15159,6 +15112,8 @@ re_scan:
}
case WASM_OP_STRINGVIEW_WTF16_ENCODE:
{
uint32 memidx;
#if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0
func->has_memory_operations = true;
#endif
@ -15172,6 +15127,7 @@ re_scan:
POP_I32();
POP_REF(REF_TYPE_STRINGVIEWWTF16);
PUSH_I32();
(void)memidx;
break;
}
case WASM_OP_STRINGVIEW_WTF16_SLICE:

View File

@ -1226,7 +1226,7 @@ load_function_section(const uint8 *buf, const uint8 *buf_end,
* we shall make a copy of code body [p_code, p_code + code_size]
* when we are worrying about inappropriate releasing behaviour.
* all code bodies are actually in a buffer which user allocates in
* their embedding environment and we don't have power over them.
* his embedding environment and we don't have power on them.
* it will be like:
* code_body_cp = malloc(code_size);
* memcpy(code_body_cp, p_code, code_size);
@ -3140,16 +3140,10 @@ create_module(char *name, char *error_buf, uint32 error_buf_size)
#endif
#if WASM_ENABLE_LIBC_WASI != 0
#if WASM_ENABLE_LIBC_UVWASI == 0
module->wasi_args.stdio[0] = os_invalid_raw_handle();
module->wasi_args.stdio[1] = os_invalid_raw_handle();
module->wasi_args.stdio[2] = os_invalid_raw_handle();
#else
module->wasi_args.stdio[0] = os_get_invalid_handle();
module->wasi_args.stdio[1] = os_get_invalid_handle();
module->wasi_args.stdio[2] = os_get_invalid_handle();
#endif /* WASM_ENABLE_UVWASI == 0 */
#endif /* WASM_ENABLE_LIBC_WASI != 0 */
#endif
(void)ret;
return module;

View File

@ -2668,7 +2668,7 @@ wasm_instantiate(WASMModule *module, WASMModuleInstance *parent,
}
STORE_PTR((void **)global_data, func_obj);
global_data += sizeof(void *);
/* Also update the initial_value since other globals may
/* Also update the inital_value since other globals may
* refer to this */
global->initial_value.gc_obj = (wasm_obj_t)func_obj;
break;
@ -4161,7 +4161,7 @@ wasm_get_module_inst_mem_consumption(const WASMModuleInstance *module_inst,
sizeof(WASMMemoryInstance *) * module_inst->memory_count;
for (i = 0; i < module_inst->memory_count; i++) {
WASMMemoryInstance *memory = module_inst->memories[i];
size = (uint64)memory->num_bytes_per_page * memory->cur_page_count;
size = memory->num_bytes_per_page * memory->cur_page_count;
mem_conspn->memories_size += size;
mem_conspn->app_heap_size += memory->heap_data_end - memory->heap_data;
/* size of app heap structure */
@ -4195,9 +4195,9 @@ wasm_get_module_inst_mem_consumption(const WASMModuleInstance *module_inst,
#endif /* end of (WASM_ENABLE_MEMORY_PROFILING != 0) \
|| (WASM_ENABLE_MEMORY_TRACING != 0) */
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
uint32 length, uint32 skip_n, char *error_buf,
uint32_t error_buf_size)
{
@ -4242,7 +4242,7 @@ wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
}
return count >= skip_n ? count - skip_n : 0;
}
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
#if WASM_ENABLE_DUMP_CALL_STACK != 0
bool

View File

@ -731,12 +731,12 @@ wasm_get_table_inst(const WASMModuleInstance *module_inst, uint32 tbl_idx)
#if WASM_ENABLE_DUMP_CALL_STACK != 0
#if WASM_ENABLE_COPY_CALL_STACK != 0
#if WAMR_ENABLE_COPY_CALLSTACK != 0
uint32
wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer,
wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer,
uint32 length, uint32 skip_n, char *error_buf,
uint32_t error_buf_size);
#endif // WASM_ENABLE_COPY_CALL_STACK
#endif // WAMR_ENABLE_COPY_CALLSTACK
bool
wasm_interp_create_call_stack(struct WASMExecEnv *exec_env);

View File

@ -743,7 +743,7 @@ wasm_debug_instance_get_obj_mem(WASMDebugInstance *instance, uint64 offset,
module_inst = (WASMModuleInstance *)exec_env->module_inst;
if (offset + *size > module_inst->module->load_size) {
LOG_VERBOSE("wasm_debug_instance_get_data_mem size overflow!\n");
LOG_VERBOSE("wasm_debug_instance_get_data_mem size over flow!\n");
*size = module_inst->module->load_size >= offset
? module_inst->module->load_size - offset
: 0;
@ -797,7 +797,7 @@ wasm_debug_instance_get_linear_mem(WASMDebugInstance *instance, uint64 offset,
num_bytes_per_page = memory->num_bytes_per_page;
linear_mem_size = num_bytes_per_page * memory->cur_page_count;
if (offset + *size > linear_mem_size) {
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size overflow!\n");
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size over flow!\n");
*size = linear_mem_size >= offset ? linear_mem_size - offset : 0;
}
bh_memcpy_s(buf, (uint32)*size, memory->memory_data + offset,
@ -830,7 +830,7 @@ wasm_debug_instance_set_linear_mem(WASMDebugInstance *instance, uint64 offset,
num_bytes_per_page = memory->num_bytes_per_page;
linear_mem_size = num_bytes_per_page * memory->cur_page_count;
if (offset + *size > linear_mem_size) {
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size overflow!\n");
LOG_VERBOSE("wasm_debug_instance_get_linear_mem size over flow!\n");
*size = linear_mem_size >= offset ? linear_mem_size - offset : 0;
}
bh_memcpy_s(memory->memory_data + offset, (uint32)*size, buf,

View File

@ -175,19 +175,6 @@ process_wasm_global(WASMGDBServer *server, char *args)
os_mutex_unlock(&tmpbuf_lock);
}
/* TODO: let server send an empty/error reply.
Original issue: 4265
Not tested yet, but it should work.
*/
static void
send_reply(WASMGDBServer *server, const char *err)
{
if (!err || !*err)
write_packet(server, "");
else
write_packet(server, err);
}
void
handle_general_query(WASMGDBServer *server, char *payload)
{
@ -227,7 +214,6 @@ handle_general_query(WASMGDBServer *server, char *payload)
if (!args) {
LOG_ERROR("payload parse error during handle_general_query");
send_reply(server, "");
return;
}
@ -398,7 +384,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
if (status == 0) {
os_mutex_lock(&tmpbuf_lock);
(void)snprintf(tmpbuf, MAX_PACKET_SIZE, "W%02" PRIx32, status);
send_reply(server, tmpbuf);
write_packet(server, tmpbuf);
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -417,7 +403,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
"T%02" PRIx32 "thread:%" PRIx64 ";name:%s;", gdb_status,
(uint64)(uintptr_t)tid, "nobody");
if (len < 0 || len >= MAX_PACKET_SIZE) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -425,7 +410,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
if (tids_count > 0) {
int n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "threads:");
if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -442,7 +426,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
}
if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -469,7 +452,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
"thread-pcs:%" PRIx64 ";00:%s;reason:%s;description:", pc,
pc_string, "exception");
if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -480,7 +462,6 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid)
n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "%02x",
exception[i]);
if (n < 0 || n >= MAX_PACKET_SIZE - len) {
send_reply(server, "E01");
os_mutex_unlock(&tmpbuf_lock);
return;
}
@ -611,7 +592,7 @@ handle_get_register(WASMGDBServer *server, char *payload)
int32 i = strtol(payload, NULL, 16);
if (i != 0) {
send_reply(server, "E01");
write_packet(server, "E01");
return;
}
regdata = wasm_debug_instance_get_pc(
@ -767,7 +748,7 @@ handle_add_break(WASMGDBServer *server, char *payload)
if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length))
!= 3) {
LOG_ERROR("Unsupported number of add break arguments %d", arg_c);
send_reply(server, "");
write_packet(server, "");
return;
}
@ -802,7 +783,7 @@ handle_remove_break(WASMGDBServer *server, char *payload)
if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length))
!= 3) {
LOG_ERROR("Unsupported number of remove break arguments %d", arg_c);
send_reply(server, "");
write_packet(server, "");
return;
}
@ -854,7 +835,6 @@ handle_malloc(WASMGDBServer *server, char *payload)
}
else {
LOG_ERROR("Payload parse error during handle malloc");
send_reply(server, "");
return;
}

View File

@ -12,26 +12,6 @@
#include <wasi/api.h>
#include <wasi_socket_ext.h>
/*
* Avoid direct TLS access to allow a single library to be
* linked to both of threaded and non-threaded applications.
*
* wasi-libc's errno is a TLS variable, exposed directly via
* errno.h. if we use it here, LLVM may lower it differently,
* depending on enabled features like atomcs and bulk-memory.
* we tweak the way to access errno here in order to make us
* compatible with both of threaded and non-threaded applications.
* __errno_location() should be reasonably stable because
* it was introduced as an alternative ABI for non-C software.
* https://github.com/WebAssembly/wasi-libc/pull/347
*/
#if defined(errno)
#undef errno
#endif
int *
__errno_location(void);
#define errno (*__errno_location())
#define HANDLE_ERROR(error) \
if (error != __WASI_ERRNO_SUCCESS) { \
errno = error; \

View File

@ -43,7 +43,7 @@ else()
FetchContent_Declare(
uvwasi
GIT_REPOSITORY https://github.com/nodejs/uvwasi.git
GIT_TAG 392e1f1c1c8a2d2102c9f2e0b9f35959a149d133
GIT_TAG v0.0.21
)
FetchContent_MakeAvailable(uvwasi)
include_directories("${uvwasi_SOURCE_DIR}/include")

View File

@ -890,6 +890,24 @@ wasi_path_symlink(wasm_exec_env_t exec_env, const char *old_path,
if (!uvwasi)
return (wasi_errno_t)-1;
/*
* check if old_path is valid.
* if it is a symlink, follow it.
*
* this is a workaround for the fact that
* uvwasi_path_symlink does not check if the old_path is valid
*
* the goal is trigger uvwasi__resolve_path() to check
*/
{
uvwasi_filestat_t filestat = { 0 };
wasi_errno_t err =
uvwasi_path_filestat_get(uvwasi, fd, UVWASI_LOOKUP_SYMLINK_FOLLOW,
old_path, old_path_len, &filestat);
if (err)
return err;
}
return uvwasi_path_symlink(uvwasi, old_path, old_path_len, fd, new_path,
new_path_len);
}

View File

@ -301,8 +301,7 @@ wasm_cluster_create(WASMExecEnv *exec_env)
aux_stack_start -= cluster->stack_size;
for (i = 0; i < cluster_max_thread_num; i++) {
cluster->stack_tops[i] =
aux_stack_start - (uint64)cluster->stack_size * i;
cluster->stack_tops[i] = aux_stack_start - cluster->stack_size * i;
}
}
#endif

View File

@ -37,7 +37,7 @@ There is a big difference between the two sets of functions, `tensor_type`.
```c
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
typedef enum { fp16 = 0, fp32, fp64, u8, i32, i64 } tensor_type;
typedef enum { fp16 = 0, fp32, fp64, bf16, u8, i32, i64 } tensor_type;
#else
typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type;
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */

View File

@ -1,12 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#define WASM_ENABLE_WASI_EPHEMERAL_NN 1
#define WASI_NN_NAME(name) wasi_ephemeral_nn_##name
#include "wasi_nn.h"
#undef WASM_ENABLE_WASI_EPHEMERAL_NN
#undef WASI_NN_NAME

View File

@ -15,43 +15,23 @@
#include <stdint.h>
#include "wasi_nn_types.h"
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
#define WASI_NN_IMPORT(name) \
__attribute__((import_module("wasi_ephemeral_nn"), import_name(name)))
#else
#define WASI_NN_IMPORT(name) \
__attribute__((import_module("wasi_nn"), import_name(name)))
#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.)
#endif
/**
* @brief Load an opaque sequence of bytes to use for inference.
*
* @param builder Model builder.
* @param builder_len The size of model builder.
* @param encoding Model encoding.
* @param target Execution target.
* @param g Graph.
* @return wasi_nn_error Execution status.
*/
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
WASI_NN_ERROR_TYPE
WASI_NN_NAME(load)
(WASI_NN_NAME(graph_builder) * builder, uint32_t builder_len,
WASI_NN_NAME(graph_encoding) encoding, WASI_NN_NAME(execution_target) target,
WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load");
#else
WASI_NN_ERROR_TYPE
WASI_NN_NAME(load)
(WASI_NN_NAME(graph_builder_array) * builder,
WASI_NN_NAME(graph_encoding) encoding, WASI_NN_NAME(execution_target) target,
WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load");
#endif
wasi_nn_error
load(graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g)
__attribute__((import_module("wasi_nn")));
WASI_NN_ERROR_TYPE
WASI_NN_NAME(load_by_name)
(const char *name, uint32_t name_len, WASI_NN_NAME(graph) * g)
WASI_NN_IMPORT("load_by_name");
wasi_nn_error
load_by_name(const char *name, graph *g)
__attribute__((import_module("wasi_nn")));
/**
* INFERENCE
@ -65,10 +45,9 @@ WASI_NN_NAME(load_by_name)
* @param ctx Execution context.
* @return wasi_nn_error Execution status.
*/
WASI_NN_ERROR_TYPE
WASI_NN_NAME(init_execution_context)
(WASI_NN_NAME(graph) g, WASI_NN_NAME(graph_execution_context) * ctx)
WASI_NN_IMPORT("init_execution_context");
wasi_nn_error
init_execution_context(graph g, graph_execution_context *ctx)
__attribute__((import_module("wasi_nn")));
/**
* @brief Define the inputs to use for inference.
@ -78,10 +57,9 @@ WASI_NN_NAME(init_execution_context)
* @param tensor Input tensor.
* @return wasi_nn_error Execution status.
*/
WASI_NN_ERROR_TYPE
WASI_NN_NAME(set_input)
(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index,
WASI_NN_NAME(tensor) * tensor) WASI_NN_IMPORT("set_input");
wasi_nn_error
set_input(graph_execution_context ctx, uint32_t index, tensor *tensor)
__attribute__((import_module("wasi_nn")));
/**
* @brief Compute the inference on the given inputs.
@ -89,9 +67,8 @@ WASI_NN_NAME(set_input)
* @param ctx Execution context.
* @return wasi_nn_error Execution status.
*/
WASI_NN_ERROR_TYPE
WASI_NN_NAME(compute)
(WASI_NN_NAME(graph_execution_context) ctx) WASI_NN_IMPORT("compute");
wasi_nn_error
compute(graph_execution_context ctx) __attribute__((import_module("wasi_nn")));
/**
* @brief Extract the outputs after inference.
@ -105,17 +82,9 @@ WASI_NN_NAME(compute)
* copied number of bytes.
* @return wasi_nn_error Execution status.
*/
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
WASI_NN_ERROR_TYPE
WASI_NN_NAME(get_output)
(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index,
uint8_t *output_tensor, uint32_t output_tensor_max_size,
uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output");
#else
WASI_NN_ERROR_TYPE
WASI_NN_NAME(get_output)
(graph_execution_context ctx, uint32_t index, uint8_t *output_tensor,
uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output");
#endif
wasi_nn_error
get_output(graph_execution_context ctx, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size)
__attribute__((import_module("wasi_nn")));
#endif

View File

@ -13,48 +13,42 @@
extern "C" {
#endif
/* our host logic doesn't use any prefix. neither legacy wasi_nn.h does. */
#if !defined(__wasm__) || !defined(WASI_NN_NAME)
#define WASI_NN_NAME(name) name
#define WASI_NN_ERROR_NAME(name) name
#define WASI_NN_TYPE_NAME(name) name
#define WASI_NN_ENCODING_NAME(name) name
#define WASI_NN_TARGET_NAME(name) name
#define WASI_NN_ERROR_TYPE wasi_nn_error
#else
#define WASI_NN_ERROR_NAME(name) WASI_NN_NAME(error_##name)
#define WASI_NN_TYPE_NAME(name) WASI_NN_NAME(type_##name)
#define WASI_NN_ENCODING_NAME(name) WASI_NN_NAME(encoding_##name)
#define WASI_NN_TARGET_NAME(name) WASI_NN_NAME(target_##name)
#define WASI_NN_ERROR_TYPE WASI_NN_NAME(error);
#endif
/**
* ERRORS
*
*/
// sync up with
// https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L5-L17
// Error codes returned by functions in this API.
// https://github.com/WebAssembly/wasi-nn/blob/main/wit/wasi-nn.wit#L136 Error
// codes returned by functions in this API.
typedef enum {
WASI_NN_ERROR_NAME(success) = 0,
WASI_NN_ERROR_NAME(invalid_argument),
WASI_NN_ERROR_NAME(invalid_encoding),
WASI_NN_ERROR_NAME(missing_memory),
WASI_NN_ERROR_NAME(busy),
WASI_NN_ERROR_NAME(runtime_error),
WASI_NN_ERROR_NAME(unsupported_operation),
WASI_NN_ERROR_NAME(too_large),
WASI_NN_ERROR_NAME(not_found),
// No error occurred.
success = 0,
// Caller module passed an invalid argument.
invalid_argument,
// Invalid encoding.
invalid_encoding,
// The operation timed out.
timeout,
// Runtime Error.
runtime_error,
// Unsupported operation.
unsupported_operation,
// Graph is too large.
too_large,
// Graph not found.
not_found,
// The operation is insecure or has insufficient privilege to be performed.
// e.g., cannot access a hardware feature requested
security,
// The operation failed for an unspecified reason.
unknown,
// for WasmEdge-wasi-nn
WASI_NN_ERROR_NAME(end_of_sequence) = 100, // End of Sequence Found.
WASI_NN_ERROR_NAME(context_full) = 101, // Context Full.
WASI_NN_ERROR_NAME(prompt_tool_long) = 102, // Prompt Too Long.
WASI_NN_ERROR_NAME(model_not_found) = 103, // Model Not Found.
} WASI_NN_ERROR_TYPE;
end_of_sequence = 100, // End of Sequence Found.
context_full = 101, // Context Full.
prompt_tool_long = 102, // Prompt Too Long.
model_not_found = 103, // Model Not Found.
} wasi_nn_error;
/**
* TENSOR
@ -68,27 +62,15 @@ typedef enum {
typedef struct {
uint32_t *buf;
uint32_t size;
} WASI_NN_NAME(tensor_dimensions);
} tensor_dimensions;
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
// sync up with
// https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L19-L28
// https://github.com/WebAssembly/wasi-nn/blob/main/wit/wasi-nn.wit#L27
// The type of the elements in a tensor.
typedef enum {
WASI_NN_TYPE_NAME(fp16) = 0,
WASI_NN_TYPE_NAME(fp32),
WASI_NN_TYPE_NAME(fp64),
WASI_NN_TYPE_NAME(u8),
WASI_NN_TYPE_NAME(i32),
WASI_NN_TYPE_NAME(i64),
} WASI_NN_NAME(tensor_type);
typedef enum { fp16 = 0, fp32, fp64, bf16, u8, i32, i64 } tensor_type;
#else
typedef enum {
WASI_NN_TYPE_NAME(fp16) = 0,
WASI_NN_TYPE_NAME(fp32),
WASI_NN_TYPE_NAME(up8),
WASI_NN_TYPE_NAME(ip32),
} WASI_NN_NAME(tensor_type);
typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type;
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
// The tensor data.
@ -99,31 +81,19 @@ typedef enum {
// 4-byte f32 elements would have a data array of length 16). Naturally, this
// representation requires some knowledge of how to lay out data in
// memory--e.g., using row-major ordering--and could perhaps be improved.
#if !defined(__wasm__) || WASM_ENABLE_WASI_EPHEMERAL_NN != 0
typedef struct {
uint8_t *buf;
uint32_t size;
} WASI_NN_NAME(tensor_data);
#else
typedef uint8_t *WASI_NN_NAME(tensor_data);
#endif
typedef uint8_t *tensor_data;
// A tensor.
typedef struct {
// Describe the size of the tensor (e.g., 2x2x2x2 -> [2, 2, 2, 2]). To
// represent a tensor containing a single value, use `[1]` for the tensor
// dimensions.
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 && defined(__wasm__)
WASI_NN_NAME(tensor_dimensions) dimensions;
#else
WASI_NN_NAME(tensor_dimensions) * dimensions;
#endif
tensor_dimensions *dimensions;
// Describe the type of element in the tensor (e.g., f32).
uint8_t type;
uint8_t _pad[3];
tensor_type type;
// Contains the tensor data.
WASI_NN_NAME(tensor_data) data;
} WASI_NN_NAME(tensor);
tensor_data data;
} tensor;
/**
* GRAPH
@ -138,15 +108,15 @@ typedef struct {
typedef struct {
uint8_t *buf;
uint32_t size;
} WASI_NN_NAME(graph_builder);
} graph_builder;
typedef struct {
WASI_NN_NAME(graph_builder) * buf;
graph_builder *buf;
uint32_t size;
} WASI_NN_NAME(graph_builder_array);
} graph_builder_array;
// An execution graph for performing inference (i.e., a model).
typedef uint32_t WASI_NN_NAME(graph);
typedef uint32_t graph;
// sync up with
// https://github.com/WebAssembly/wasi-nn/blob/main/wit/wasi-nn.wit#L75
@ -154,25 +124,56 @@ typedef uint32_t WASI_NN_NAME(graph);
// various backends that encode (i.e., serialize) their graph IR with different
// formats.
typedef enum {
WASI_NN_ENCODING_NAME(openvino) = 0,
WASI_NN_ENCODING_NAME(onnx),
WASI_NN_ENCODING_NAME(tensorflow),
WASI_NN_ENCODING_NAME(pytorch),
WASI_NN_ENCODING_NAME(tensorflowlite),
WASI_NN_ENCODING_NAME(ggml),
WASI_NN_ENCODING_NAME(autodetect),
WASI_NN_ENCODING_NAME(unknown_backend),
} WASI_NN_NAME(graph_encoding);
openvino = 0,
onnx,
tensorflow,
pytorch,
tensorflowlite,
ggml,
autodetect,
unknown_backend,
} graph_encoding;
// Define where the graph should be executed.
typedef enum WASI_NN_NAME(execution_target) {
WASI_NN_TARGET_NAME(cpu) = 0,
WASI_NN_TARGET_NAME(gpu),
WASI_NN_TARGET_NAME(tpu),
} WASI_NN_NAME(execution_target);
typedef enum execution_target { cpu = 0, gpu, tpu } execution_target;
// Bind a `graph` to the input and output tensors for an inference.
typedef uint32_t WASI_NN_NAME(graph_execution_context);
typedef uint32_t graph_execution_context;
/* Definition of 'wasi_nn.h' structs in WASM app format (using offset) */
typedef wasi_nn_error (*LOAD)(void *, graph_builder_array *, graph_encoding,
execution_target, graph *);
typedef wasi_nn_error (*LOAD_BY_NAME)(void *, const char *, uint32_t, graph *);
typedef wasi_nn_error (*LOAD_BY_NAME_WITH_CONFIG)(void *, const char *,
uint32_t, void *, uint32_t,
graph *);
typedef wasi_nn_error (*INIT_EXECUTION_CONTEXT)(void *, graph,
graph_execution_context *);
typedef wasi_nn_error (*SET_INPUT)(void *, graph_execution_context, uint32_t,
tensor *);
typedef wasi_nn_error (*COMPUTE)(void *, graph_execution_context);
typedef wasi_nn_error (*GET_OUTPUT)(void *, graph_execution_context, uint32_t,
tensor_data, uint32_t *);
/* wasi-nn general APIs */
typedef wasi_nn_error (*BACKEND_INITIALIZE)(void **);
typedef wasi_nn_error (*BACKEND_DEINITIALIZE)(void *);
typedef struct {
LOAD load;
LOAD_BY_NAME load_by_name;
LOAD_BY_NAME_WITH_CONFIG load_by_name_with_config;
INIT_EXECUTION_CONTEXT init_execution_context;
SET_INPUT set_input;
COMPUTE compute;
GET_OUTPUT get_output;
BACKEND_INITIALIZE init;
BACKEND_DEINITIALIZE deinit;
} api_function;
void
wasi_nn_dump_tensor_dimension(tensor_dimensions *dim, int32_t output_len,
char *output);
#ifdef __cplusplus
}

View File

@ -99,8 +99,7 @@ graph_builder_array_app_native(wasm_module_inst_t instance,
static wasi_nn_error
tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements,
tensor_wasm *input_tensor_wasm, void **data,
uint32_t *size)
tensor_wasm *input_tensor_wasm, tensor_data *data)
{
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
#define data_size input_tensor_wasm->data_size
@ -114,9 +113,8 @@ tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements,
NN_ERR_PRINTF("input_tensor_wasm->data_offset is invalid");
return invalid_argument;
}
*data = wasm_runtime_addr_app_to_native(
*data = (tensor_data)wasm_runtime_addr_app_to_native(
instance, (uint64)input_tensor_wasm->data_offset);
*size = data_size;
return success;
#undef data_size
}
@ -190,19 +188,16 @@ tensor_app_native(wasm_module_inst_t instance, tensor_wasm *input_tensor_wasm,
NN_DBG_PRINTF("Tensor type: %d", input_tensor_wasm->type);
NN_DBG_PRINTF("Total number of elements: %d", total_elements);
void *data = NULL;
uint32_t datasize;
tensor_data data = NULL;
if (success
!= (res =
tensor_data_app_native(instance, total_elements,
input_tensor_wasm, &data, &datasize))) {
!= (res = tensor_data_app_native(instance, total_elements,
input_tensor_wasm, &data))) {
wasm_runtime_free(dimensions);
return res;
}
input_tensor->type = input_tensor_wasm->type;
input_tensor->dimensions = dimensions;
input_tensor->data.buf = data;
input_tensor->data.size = datasize;
input_tensor->data = data;
return success;
}

View File

@ -20,29 +20,12 @@
#include "wasi_nn_types.h"
#include "wasm_export.h"
#if WASM_ENABLE_WASI_EPHEMERAL_NN == 0
#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.)
#endif
#define HASHMAP_INITIAL_SIZE 20
#if defined(__APPLE__)
#define LIB_EXTENTION ".dylib"
#else
#define LIB_EXTENTION ".so"
#endif
#define TFLITE_BACKEND_LIB "libwasi_nn_tflite" LIB_EXTENTION
#define OPENVINO_BACKEND_LIB "libwasi_nn_openvino" LIB_EXTENTION
#define LLAMACPP_BACKEND_LIB "libwasi_nn_llamacpp" LIB_EXTENTION
#define TFLITE_BACKEND_LIB "libwasi_nn_tflite.so"
#define OPENVINO_BACKEND_LIB "libwasi_nn_openvino.so"
#define LLAMACPP_BACKEND_LIB "libwasi_nn_llamacpp.so"
/* Global variables */
static korp_mutex wasi_nn_lock;
/*
* the "lookup" table is protected by wasi_nn_lock.
*
* an exception: during wasm_runtime_destroy, wasi_nn_destroy tears down
* the table without acquiring the lock. it's ok because there should be
* no other threads using the runtime at this point.
*/
struct backends_api_functions {
void *backend_handle;
api_function functions;
@ -55,36 +38,65 @@ struct backends_api_functions {
NN_ERR_PRINTF("Error %s() -> %d", #func, wasi_error); \
} while (0)
static void *wasi_nn_key;
/* HashMap utils */
static HashMap *hashmap;
static uint32
hash_func(const void *key)
{
// fnv1a_hash
const uint32 FNV_PRIME = 16777619;
const uint32 FNV_OFFSET_BASIS = 2166136261U;
uint32 hash = FNV_OFFSET_BASIS;
const unsigned char *bytes = (const unsigned char *)key;
for (size_t i = 0; i < sizeof(uintptr_t); ++i) {
hash ^= bytes[i];
hash *= FNV_PRIME;
}
return hash;
}
static bool
key_equal_func(void *key1, void *key2)
{
return key1 == key2;
}
static void
key_destroy_func(void *key1)
{
/* key type is wasm_module_inst_t*. do nothing */
}
static void
wasi_nn_ctx_destroy(WASINNContext *wasi_nn_ctx)
{
NN_DBG_PRINTF("[WASI NN] DEINIT...");
if (wasi_nn_ctx == NULL) {
NN_ERR_PRINTF(
"Error when deallocating memory. WASI-NN context is NULL");
return;
}
NN_DBG_PRINTF("[WASI NN] DEINIT...");
NN_DBG_PRINTF("Freeing wasi-nn");
NN_DBG_PRINTF("-> is_model_loaded: %d", wasi_nn_ctx->is_model_loaded);
NN_DBG_PRINTF("-> current_encoding: %d", wasi_nn_ctx->backend);
bh_assert(!wasi_nn_ctx->busy);
/* deinit() the backend */
if (wasi_nn_ctx->is_backend_ctx_initialized) {
wasi_nn_error res;
call_wasi_nn_func(wasi_nn_ctx->backend, deinit, res,
wasi_nn_ctx->backend_ctx);
}
os_mutex_destroy(&wasi_nn_ctx->lock);
wasm_runtime_free(wasi_nn_ctx);
}
static void
dtor(wasm_module_inst_t inst, void *ctx)
value_destroy_func(void *value)
{
wasi_nn_ctx_destroy(ctx);
wasi_nn_ctx_destroy((WASINNContext *)value);
}
bool
@ -92,15 +104,12 @@ wasi_nn_initialize()
{
NN_DBG_PRINTF("[WASI NN General] Initializing wasi-nn");
if (os_mutex_init(&wasi_nn_lock)) {
NN_ERR_PRINTF("Error while initializing global lock");
return false;
}
wasi_nn_key = wasm_runtime_create_context_key(dtor);
if (wasi_nn_key == NULL) {
NN_ERR_PRINTF("Failed to create context key");
os_mutex_destroy(&wasi_nn_lock);
// hashmap { instance: wasi_nn_ctx }
hashmap = bh_hash_map_create(HASHMAP_INITIAL_SIZE, true, hash_func,
key_equal_func, key_destroy_func,
value_destroy_func);
if (hashmap == NULL) {
NN_ERR_PRINTF("Error while initializing hashmap");
return false;
}
@ -120,11 +129,6 @@ wasi_nn_initialize_context()
}
memset(wasi_nn_ctx, 0, sizeof(WASINNContext));
if (os_mutex_init(&wasi_nn_ctx->lock)) {
NN_ERR_PRINTF("Error when initializing a lock for WASI-NN context");
wasm_runtime_free(wasi_nn_ctx);
return NULL;
}
return wasi_nn_ctx;
}
@ -133,59 +137,29 @@ static WASINNContext *
wasm_runtime_get_wasi_nn_ctx(wasm_module_inst_t instance)
{
WASINNContext *wasi_nn_ctx =
wasm_runtime_get_context(instance, wasi_nn_key);
(WASINNContext *)bh_hash_map_find(hashmap, (void *)instance);
if (wasi_nn_ctx == NULL) {
WASINNContext *newctx = wasi_nn_initialize_context();
if (newctx == NULL)
wasi_nn_ctx = wasi_nn_initialize_context();
if (wasi_nn_ctx == NULL)
return NULL;
os_mutex_lock(&wasi_nn_lock);
wasi_nn_ctx = wasm_runtime_get_context(instance, wasi_nn_key);
if (wasi_nn_ctx == NULL) {
wasm_runtime_set_context_spread(instance, wasi_nn_key, newctx);
wasi_nn_ctx = newctx;
newctx = NULL;
}
os_mutex_unlock(&wasi_nn_lock);
if (newctx != NULL) {
wasi_nn_ctx_destroy(newctx);
}
}
return wasi_nn_ctx;
}
static WASINNContext *
lock_ctx(wasm_module_inst_t instance)
{
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
if (wasi_nn_ctx == NULL) {
bool ok =
bh_hash_map_insert(hashmap, (void *)instance, (void *)wasi_nn_ctx);
if (!ok) {
NN_ERR_PRINTF("Error while storing context");
wasi_nn_ctx_destroy(wasi_nn_ctx);
return NULL;
}
os_mutex_lock(&wasi_nn_ctx->lock);
if (wasi_nn_ctx->busy) {
os_mutex_unlock(&wasi_nn_ctx->lock);
return NULL;
}
wasi_nn_ctx->busy = true;
os_mutex_unlock(&wasi_nn_ctx->lock);
return wasi_nn_ctx;
}
static void
unlock_ctx(WASINNContext *wasi_nn_ctx)
{
if (wasi_nn_ctx == NULL) {
return;
}
os_mutex_lock(&wasi_nn_ctx->lock);
bh_assert(wasi_nn_ctx->busy);
wasi_nn_ctx->busy = false;
os_mutex_unlock(&wasi_nn_ctx->lock);
return wasi_nn_ctx;
}
void
wasi_nn_destroy()
{
wasm_runtime_destroy_context_key(wasi_nn_key);
// destroy hashmap will destroy keys and values
bh_hash_map_destroy(hashmap);
// close backends' libraries and registered functions
for (unsigned i = 0; i < sizeof(lookup) / sizeof(lookup[0]); i++) {
@ -196,8 +170,6 @@ wasi_nn_destroy()
memset(&lookup[i].functions, 0, sizeof(api_function));
}
os_mutex_destroy(&wasi_nn_lock);
}
/* Utils */
@ -370,10 +342,9 @@ graph_encoding_to_backend_lib_name(graph_encoding encoding)
static bool
detect_and_load_backend(graph_encoding backend_hint,
struct backends_api_functions *backends,
graph_encoding *loaded_backend)
{
bool ret;
if (backend_hint > autodetect)
return false;
@ -385,58 +356,16 @@ detect_and_load_backend(graph_encoding backend_hint,
*loaded_backend = backend_hint;
os_mutex_lock(&wasi_nn_lock);
/* if already loaded */
if (lookup[backend_hint].backend_handle) {
os_mutex_unlock(&wasi_nn_lock);
if (lookup[backend_hint].backend_handle)
return true;
}
const char *backend_lib_name =
graph_encoding_to_backend_lib_name(backend_hint);
if (!backend_lib_name) {
os_mutex_unlock(&wasi_nn_lock);
if (!backend_lib_name)
return false;
}
ret = prepare_backend(backend_lib_name, lookup + backend_hint);
os_mutex_unlock(&wasi_nn_lock);
return ret;
}
static wasi_nn_error
ensure_backend(wasm_module_inst_t instance, graph_encoding encoding,
WASINNContext *wasi_nn_ctx)
{
wasi_nn_error res;
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(encoding, &loaded_backend)) {
res = invalid_encoding;
NN_ERR_PRINTF("load backend failed");
goto fail;
}
if (wasi_nn_ctx->is_backend_ctx_initialized) {
if (wasi_nn_ctx->backend != loaded_backend) {
res = unsupported_operation;
goto fail;
}
}
else {
wasi_nn_ctx->backend = loaded_backend;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
wasi_nn_ctx->is_backend_ctx_initialized = true;
}
return success;
fail:
return res;
return prepare_backend(backend_lib_name, backends + backend_hint);
}
/* WASI-NN implementation */
@ -452,8 +381,6 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
graph_encoding encoding, execution_target target, graph *g)
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
{
wasi_nn_error res;
NN_DBG_PRINTF("[WASI NN] LOAD [encoding=%d, target=%d]...", encoding,
target);
@ -461,23 +388,18 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
if (!instance)
return runtime_error;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
wasi_nn_error res;
graph_builder_array builder_native = { 0 };
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (success
!= (res = graph_builder_array_app_native(
instance, builder, builder_wasm_size, &builder_native)))
goto fail;
return res;
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
if (success
!= (res = graph_builder_array_app_native(instance, builder,
&builder_native)))
goto fail;
return res;
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
if (!wasm_runtime_validate_native_addr(instance, g,
@ -487,7 +409,19 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder,
goto fail;
}
res = ensure_backend(instance, encoding, wasi_nn_ctx);
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(encoding, lookup, &loaded_backend)) {
res = invalid_encoding;
NN_ERR_PRINTF("load backend failed");
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
@ -502,7 +436,6 @@ fail:
// XXX: Free intermediate structure pointers
if (builder_native.buf)
wasm_runtime_free(builder_native.buf);
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -511,8 +444,6 @@ wasi_nn_error
wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len,
graph *g)
{
wasi_nn_error res;
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
if (!instance) {
return runtime_error;
@ -536,26 +467,30 @@ wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len,
NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME %s...", name);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(autodetect, lookup, &loaded_backend)) {
NN_ERR_PRINTF("load backend failed");
return invalid_encoding;
}
res = ensure_backend(instance, autodetect, wasi_nn_ctx);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
wasi_nn_error res;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name, res,
wasi_nn_ctx->backend_ctx, name, name_len, g);
if (res != success)
goto fail;
wasi_nn_ctx->is_model_loaded = true;
res = success;
fail:
unlock_ctx(wasi_nn_ctx);
return res;
wasi_nn_ctx->backend = loaded_backend;
wasi_nn_ctx->is_model_loaded = true;
return success;
}
wasi_nn_error
@ -563,8 +498,6 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name,
int32_t name_len, char *config,
int32_t config_len, graph *g)
{
wasi_nn_error res;
wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env);
if (!instance) {
return runtime_error;
@ -593,28 +526,31 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name,
NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME_WITH_CONFIG %s %s...", name, config);
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
graph_encoding loaded_backend = autodetect;
if (!detect_and_load_backend(autodetect, lookup, &loaded_backend)) {
NN_ERR_PRINTF("load backend failed");
return invalid_encoding;
}
res = ensure_backend(instance, autodetect, wasi_nn_ctx);
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_ctx->backend = loaded_backend;
wasi_nn_error res;
/* init() the backend */
call_wasi_nn_func(wasi_nn_ctx->backend, init, res,
&wasi_nn_ctx->backend_ctx);
if (res != success)
goto fail;
;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name_with_config, res,
wasi_nn_ctx->backend_ctx, name, name_len, config,
config_len, g);
if (res != success)
goto fail;
wasi_nn_ctx->is_model_loaded = true;
res = success;
fail:
unlock_ctx(wasi_nn_ctx);
return res;
wasi_nn_ctx->backend = loaded_backend;
wasi_nn_ctx->is_model_loaded = true;
return success;
}
wasi_nn_error
@ -628,27 +564,20 @@ wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph g,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
if (!wasm_runtime_validate_native_addr(
instance, ctx, (uint64)sizeof(graph_execution_context))) {
NN_ERR_PRINTF("ctx is invalid");
res = invalid_argument;
goto fail;
return invalid_argument;
}
call_wasi_nn_func(wasi_nn_ctx->backend, init_execution_context, res,
wasi_nn_ctx->backend_ctx, g, ctx);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -663,21 +592,17 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
tensor input_tensor_native = { 0 };
if (success
!= (res = tensor_app_native(instance, input_tensor,
&input_tensor_native)))
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, set_input, res,
wasi_nn_ctx->backend_ctx, ctx, index,
@ -685,8 +610,7 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx,
// XXX: Free intermediate structure pointers
if (input_tensor_native.dimensions)
wasm_runtime_free(input_tensor_native.dimensions);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
@ -700,32 +624,26 @@ wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx)
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
call_wasi_nn_func(wasi_nn_ctx->backend, compute, res,
wasi_nn_ctx->backend_ctx, ctx);
fail:
unlock_ctx(wasi_nn_ctx);
return res;
}
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
wasi_nn_error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, void *output_tensor,
uint32_t index, tensor_data output_tensor,
uint32_t output_tensor_len, uint32_t *output_tensor_size)
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
wasi_nn_error
wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
uint32_t index, void *output_tensor,
uint32_t index, tensor_data output_tensor,
uint32_t *output_tensor_size)
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
{
@ -736,36 +654,28 @@ wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx,
return runtime_error;
}
wasi_nn_error res;
WASINNContext *wasi_nn_ctx = lock_ctx(instance);
if (wasi_nn_ctx == NULL) {
res = busy;
goto fail;
}
WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance);
wasi_nn_error res;
if (success != (res = is_model_initialized(wasi_nn_ctx)))
goto fail;
return res;
if (!wasm_runtime_validate_native_addr(instance, output_tensor_size,
(uint64)sizeof(uint32_t))) {
NN_ERR_PRINTF("output_tensor_size is invalid");
res = invalid_argument;
goto fail;
return invalid_argument;
}
tensor_data tensor = {
.buf = output_tensor,
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
.size = output_tensor_len,
#else
.size = *output_tensor_size,
#endif
};
call_wasi_nn_func(wasi_nn_ctx->backend, get_output, res,
wasi_nn_ctx->backend_ctx, ctx, index, &tensor,
wasi_nn_ctx->backend_ctx, ctx, index, output_tensor,
&output_tensor_len);
*output_tensor_size = output_tensor_len;
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
call_wasi_nn_func(wasi_nn_ctx->backend, get_output, res,
wasi_nn_ctx->backend_ctx, ctx, index, output_tensor,
output_tensor_size);
fail:
unlock_ctx(wasi_nn_ctx);
#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */
return res;
}
@ -787,7 +697,6 @@ static NativeSymbol native_symbols_wasi_nn[] = {
REG_NATIVE_FUNC(get_output, "(ii*i*)i"),
#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */
REG_NATIVE_FUNC(load, "(*ii*)i"),
REG_NATIVE_FUNC(load_by_name, "(*i*)i"),
REG_NATIVE_FUNC(init_execution_context, "(i*)i"),
REG_NATIVE_FUNC(set_input, "(ii*)i"),
REG_NATIVE_FUNC(compute, "(i)i"),

View File

@ -2,10 +2,7 @@
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdlib.h>
#include "wasi_nn_backend.h"
#include "wasi_nn_types.h"
#include "utils/logger.h"
#include "llama.h"
#include "ggml.h"
@ -289,7 +286,7 @@ deinit_backend(void *ctx)
llama_backend_free();
free(backend_ctx);
os_free(backend_ctx);
return success;
}
@ -305,11 +302,6 @@ __load_by_name_with_configuration(void *ctx, const char *filename, graph *g)
{
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
if (backend_ctx->model != NULL) {
// we only implement a single graph
return unsupported_operation;
}
// make sure backend_ctx->config is initialized
struct llama_model_params model_params =
@ -328,7 +320,6 @@ __load_by_name_with_configuration(void *ctx, const char *filename, graph *g)
#endif
backend_ctx->model = model;
*g = 0;
return success;
}
@ -369,16 +360,6 @@ init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx)
{
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
if (g != 0 || backend_ctx->model == NULL) {
// we only implement a single graph
return runtime_error;
}
if (backend_ctx->ctx != NULL) {
// we only implement a single context
return unsupported_operation;
}
struct llama_context_params ctx_params =
llama_context_params_from_wasi_nn_llama_config(&backend_ctx->config);
struct llama_context *llama_ctx =
@ -389,7 +370,6 @@ init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx)
}
backend_ctx->ctx = llama_ctx;
*exec_ctx = 0;
NN_INFO_PRINTF("n_predict = %d, n_ctx = %d", backend_ctx->config.n_predict,
llama_n_ctx(backend_ctx->ctx));
@ -401,36 +381,18 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor *wasi_nn_tensor)
{
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
if (exec_ctx != 0 || backend_ctx->ctx == NULL) {
// we only implement a single context
return runtime_error;
}
// tensor->data is the prompt string.
char *prompt_text = (char *)wasi_nn_tensor->data.buf;
uint32_t prompt_text_len = wasi_nn_tensor->data.size;
// note: buf[0] == 1 is a workaround for
// https://github.com/second-state/WasmEdge-WASINN-examples/issues/196.
// we may remove it in future.
if (wasi_nn_tensor->type != u8 || wasi_nn_tensor->dimensions->size != 1
|| !(wasi_nn_tensor->dimensions->buf[0] == 1
|| wasi_nn_tensor->dimensions->buf[0] == prompt_text_len)) {
return invalid_argument;
}
if (wasi_nn_tensor->dimensions->buf[0] == 1 && prompt_text_len != 1) {
NN_WARN_PRINTF("Ignoring seemingly wrong input tensor dimensions.");
}
// tensor->data is the prompt string. ends with \0
char *prompt_text = (char *)wasi_nn_tensor->data;
#ifndef NDEBUG
NN_DBG_PRINTF("--------------------------------------------------");
NN_DBG_PRINTF("prompt_text: %.*s", (int)prompt_text_len, prompt_text);
NN_DBG_PRINTF("prompt_text: %s", prompt_text);
NN_DBG_PRINTF("--------------------------------------------------");
#endif
// tokenize the prompt
uint32_t n_token_max = llama_n_ctx(backend_ctx->ctx);
uint32_t prompt_text_len = strlen(prompt_text);
if (backend_ctx->prompt == NULL) {
backend_ctx->prompt = calloc(n_token_max, sizeof(llama_token));
@ -468,11 +430,6 @@ compute(void *ctx, graph_execution_context exec_ctx)
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
wasi_nn_error ret = runtime_error;
if (exec_ctx != 0 || backend_ctx->ctx == NULL) {
// we only implement a single context
return runtime_error;
}
// reset the generation buffer
if (backend_ctx->generation == NULL) {
backend_ctx->generation =
@ -520,6 +477,7 @@ compute(void *ctx, graph_execution_context exec_ctx)
// main loop
int32_t n_cur = batch.n_tokens;
int n_decode = 0;
int32_t n_vocab = llama_n_vocab(backend_ctx->model);
llama_token_data *candidates = NULL;
@ -570,6 +528,7 @@ compute(void *ctx, graph_execution_context exec_ctx)
// push this new token for next evaluation
llama_batch_add(&batch, new_token_id, n_cur, seq_ids,
sizeof(seq_ids) / sizeof(seq_ids[0]), true);
n_decode++;
n_cur++;
if (llama_decode(backend_ctx->ctx, batch) != 0) {
@ -590,15 +549,10 @@ fail:
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx;
if (exec_ctx != 0 || backend_ctx->ctx == NULL) {
// we only implement a single context
return runtime_error;
}
// Compatibility with WasmEdge
if (index > 1) {
NN_ERR_PRINTF("Invalid output index %d", index);
@ -614,7 +568,7 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
printf("%s\n", output_metadata);
}
memcpy(output_tensor->buf, output_metadata, strlen(output_metadata));
memcpy(output_tensor, output_metadata, strlen(output_metadata));
*output_tensor_size = strlen(output_metadata);
return success;
}
@ -634,7 +588,7 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
printf("%s", buf);
}
memcpy(output_tensor->buf + end_pos, buf, strlen(buf));
memcpy(output_tensor + end_pos, buf, strlen(buf));
end_pos += strlen(buf);
}

View File

@ -3,7 +3,8 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "wasi_nn_backend.h"
#include "wasi_nn_types.h"
#include "wasi_nn_openvino.h"
#include "utils/logger.h"
#include "bh_platform.h"
@ -25,25 +26,17 @@
* from 4. to 6. is the Inference Loop
*/
/* these limits are arbitrary. */
#define MAX_GRAPHS 4
#define MAX_EXECUTION_CONTEXTS 4
typedef struct {
ov_core_t *core;
/* keep input model files */
struct OpenVINOGraph {
void *weight_data;
ov_tensor_t *weights_tensor;
ov_model_t *model;
/* add prepostprocess */
ov_model_t *new_model;
ov_compiled_model_t *compiled_model;
} graphs[MAX_GRAPHS];
struct OpenVINOExecutionContext {
struct OpenVINOGraph *graph;
ov_infer_request_t *infer_request;
} execution_contexts[MAX_EXECUTION_CONTEXTS];
unsigned int n_graphs;
unsigned int n_execution_contexts;
ov_tensor_t *input_tensor;
} OpenVINOContext;
/*
@ -65,7 +58,7 @@ dump_ov_shape_t(const ov_shape_t *shape, int32_t output_len, char *output)
{
int ret = 0;
ret = snprintf(output, output_len, "%" PRId64 ",[", shape->rank);
ret = snprintf(output, output_len, "%ld,[", shape->rank);
if (!ret)
return;
@ -73,7 +66,7 @@ dump_ov_shape_t(const ov_shape_t *shape, int32_t output_len, char *output)
output += ret;
for (unsigned i = 0; i < shape->rank && output_len; i++) {
ret = snprintf(output, output_len, " %" PRId64, shape->dims[i]);
ret = snprintf(output, output_len, " %ld", shape->dims[i]);
if (!ret)
return;
@ -143,7 +136,7 @@ print_model_input_output_info(ov_model_t *model)
output_port = NULL;
}
(void)ov_error;
ov_error = ov_error;
fail:
if (friendly_name)
ov_free(friendly_name);
@ -168,6 +161,8 @@ wasi_nn_tensor_type_to_openvino_element_type(tensor_type wasi_nn_type)
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
case fp64:
return F64;
case bf16:
return BF16;
case i64:
return I64;
case u8:
@ -188,29 +183,6 @@ wasi_nn_tensor_type_to_openvino_element_type(tensor_type wasi_nn_type)
return UNDEFINED;
}
static void
free_graph(struct OpenVINOGraph *graph)
{
if (graph->weight_data)
os_free(graph->weight_data);
if (graph->weights_tensor)
ov_tensor_free(graph->weights_tensor);
if (graph->model)
ov_model_free(graph->model);
if (graph->compiled_model)
ov_compiled_model_free(graph->compiled_model);
}
static void
free_execution_context(struct OpenVINOExecutionContext *c)
{
if (c->infer_request)
ov_infer_request_free(c->infer_request);
}
static wasi_nn_error
uint32_array_to_int64_array(uint32_t array_size, uint32_t *src, int64_t **dst)
{
@ -230,8 +202,6 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
unsigned int graph_idx;
wasi_nn_error ret = unsupported_operation;
if (encoding != openvino) {
@ -257,47 +227,39 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
graph_builder xml = builder->buf[0];
graph_builder weight = builder->buf[1];
graph_idx = ov_ctx->n_graphs;
if (graph_idx >= MAX_GRAPHS) {
return runtime_error;
/* if xml is a String with a model in IR */
if (!(xml.buf[xml.size] == '\0' && xml.buf[xml.size - 1] != '\0')) {
NN_ERR_PRINTF("Invalid xml string.");
return invalid_argument;
}
graph = &ov_ctx->graphs[graph_idx];
memset(graph, 0, sizeof(*graph));
/* transfer weight to an ov tensor */
{
graph->weight_data = os_malloc(weight.size);
if (!graph->weight_data)
ov_ctx->weight_data = os_malloc(weight.size);
if (!ov_ctx->weight_data)
goto fail;
memcpy(graph->weight_data, weight.buf, weight.size);
memcpy(ov_ctx->weight_data, weight.buf, weight.size);
ov_element_type_e type = U8;
int64_t dims[1] = { weight.size };
ov_shape_t shape = { 1, dims };
CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(type, shape,
graph->weight_data,
&graph->weights_tensor),
ov_ctx->weight_data,
&ov_ctx->weights_tensor),
ret);
}
/* load model from buffer */
CHECK_OV_STATUS(ov_core_read_model_from_memory_buffer(
ov_ctx->core, (char *)xml.buf, xml.size,
graph->weights_tensor, &graph->model),
ov_ctx->weights_tensor, &ov_ctx->model),
ret);
#ifndef NDEBUG
print_model_input_output_info(graph->model);
print_model_input_output_info(ov_ctx->model);
#endif
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0,
&graph->compiled_model),
ret);
*g = graph_idx;
ov_ctx->n_graphs++;
return success;
ret = success;
fail:
free_graph(graph);
return ret;
}
@ -305,62 +267,20 @@ __attribute__((visibility("default"))) wasi_nn_error
load_by_name(void *ctx, const char *filename, uint32_t filename_len, graph *g)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
unsigned int graph_idx;
wasi_nn_error ret = unsupported_operation;
graph_idx = ov_ctx->n_graphs;
if (graph_idx >= MAX_GRAPHS) {
return runtime_error;
}
graph = &ov_ctx->graphs[graph_idx];
memset(graph, 0, sizeof(*graph));
CHECK_OV_STATUS(
ov_core_read_model(ov_ctx->core, filename, NULL, &graph->model), ret);
ov_core_read_model(ov_ctx->core, filename, NULL, &ov_ctx->model), ret);
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0,
&graph->compiled_model),
ret);
*g = graph_idx;
ov_ctx->n_graphs++;
return success;
ret = success;
fail:
free_graph(graph);
return ret;
}
__attribute__((visibility("default"))) wasi_nn_error
init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOGraph *graph;
struct OpenVINOExecutionContext *exec;
unsigned int exec_idx;
wasi_nn_error ret;
if (g >= ov_ctx->n_graphs)
return runtime_error;
graph = &ov_ctx->graphs[g];
exec_idx = ov_ctx->n_execution_contexts;
if (exec_idx >= MAX_EXECUTION_CONTEXTS)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_idx];
memset(exec, 0, sizeof(*exec));
exec->graph = graph;
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
graph->compiled_model, &exec->infer_request),
ret);
*exec_ctx = exec_idx;
ov_ctx->n_execution_contexts++;
return success;
fail:
return ret;
}
__attribute__((visibility("default"))) wasi_nn_error
@ -368,15 +288,19 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor *wasi_nn_tensor)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
ov_shape_t input_shape = { 0 };
ov_tensor_t *input_tensor = NULL;
int64_t *ov_dims = NULL;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
ov_preprocess_prepostprocessor_t *ppp = NULL;
ov_preprocess_input_info_t *input_info = NULL;
ov_preprocess_input_tensor_info_t *input_tensor_info = NULL;
ov_layout_t *input_layout = NULL;
ov_preprocess_preprocess_steps_t *input_process = NULL;
ov_preprocess_input_model_info_t *p_input_model = NULL;
ov_layout_t *model_layout = NULL;
ov_preprocess_output_info_t *output_info = NULL;
ov_preprocess_output_tensor_info_t *output_tensor_info = NULL;
/* wasi_nn_tensor -> ov_tensor */
{
@ -386,6 +310,17 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
if (ret != success)
goto fail;
/* NCHW -> NHWC */
if (wasi_nn_tensor->dimensions->size == 4 || ov_dims[1] == 3) {
/* N */
/* H */
ov_dims[1] = ov_dims[2];
/* W */
ov_dims[2] = ov_dims[3];
/* C */
ov_dims[3] = 3;
}
CHECK_OV_STATUS(ov_shape_create(wasi_nn_tensor->dimensions->size,
ov_dims, &input_shape),
ret);
@ -401,22 +336,101 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index,
shape_info);
CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(input_type, input_shape,
wasi_nn_tensor->data.buf,
&input_tensor),
wasi_nn_tensor->data,
&ov_ctx->input_tensor),
ret);
}
/* set preprocess based on wasi_nn_tensor */
{
CHECK_OV_STATUS(
ov_preprocess_prepostprocessor_create(ov_ctx->model, &ppp), ret);
/* reuse user' created tensor's info */
CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_input_info_by_index(
ppp, index, &input_info),
ret);
CHECK_OV_STATUS(ov_preprocess_input_info_get_tensor_info(
input_info, &input_tensor_info),
ret);
CHECK_OV_STATUS(ov_preprocess_input_tensor_info_set_from(
input_tensor_info, ov_ctx->input_tensor),
ret);
/* ! HAS TO BE NHWC. Match previous layout conversion */
CHECK_OV_STATUS(ov_layout_create("NHWC", &input_layout), ret);
CHECK_OV_STATUS(ov_preprocess_input_tensor_info_set_layout(
input_tensor_info, input_layout),
ret);
/* add RESIZE */
CHECK_OV_STATUS(ov_preprocess_input_info_get_preprocess_steps(
input_info, &input_process),
ret);
CHECK_OV_STATUS(
ov_preprocess_preprocess_steps_resize(input_process, RESIZE_LINEAR),
ret);
/* input model */
CHECK_OV_STATUS(
ov_preprocess_input_info_get_model_info(input_info, &p_input_model),
ret);
// TODO: what if not?
CHECK_OV_STATUS(ov_layout_create("NCHW", &model_layout), ret);
CHECK_OV_STATUS(ov_preprocess_input_model_info_set_layout(p_input_model,
model_layout),
ret);
/* output -> F32(possibility) */
CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_output_info_by_index(
ppp, index, &output_info),
ret);
CHECK_OV_STATUS(ov_preprocess_output_info_get_tensor_info(
output_info, &output_tensor_info),
ret);
CHECK_OV_STATUS(
ov_preprocess_output_set_element_type(output_tensor_info, F32),
ret);
CHECK_OV_STATUS(
ov_preprocess_prepostprocessor_build(ppp, &ov_ctx->new_model), ret);
}
CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, ov_ctx->new_model,
"CPU", 0, &ov_ctx->compiled_model),
ret);
CHECK_OV_STATUS(ov_compiled_model_create_infer_request(
ov_ctx->compiled_model, &ov_ctx->infer_request),
ret);
/* install ov_tensor -> infer_request */
CHECK_OV_STATUS(ov_infer_request_set_input_tensor_by_index(
exec->infer_request, index, input_tensor),
ov_ctx->infer_request, index, ov_ctx->input_tensor),
ret);
ret = success;
fail:
if (ov_dims)
os_free(ov_dims);
if (input_tensor)
ov_tensor_free(input_tensor);
ov_shape_free(&input_shape);
if (ppp)
ov_preprocess_prepostprocessor_free(ppp);
if (input_info)
ov_preprocess_input_info_free(input_info);
if (input_tensor_info)
ov_preprocess_input_tensor_info_free(input_tensor_info);
if (input_layout)
ov_layout_free(input_layout);
if (input_process)
ov_preprocess_preprocess_steps_free(input_process);
if (p_input_model)
ov_preprocess_input_model_info_free(p_input_model);
if (model_layout)
ov_layout_free(model_layout);
if (output_info)
ov_preprocess_output_info_free(output_info);
if (output_tensor_info)
ov_preprocess_output_tensor_info_free(output_tensor_info);
return ret;
}
@ -425,14 +439,9 @@ __attribute__((visibility("default"))) wasi_nn_error
compute(void *ctx, graph_execution_context exec_ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
CHECK_OV_STATUS(ov_infer_request_infer(exec->infer_request), ret);
CHECK_OV_STATUS(ov_infer_request_infer(ov_ctx->infer_request), ret);
ret = success;
fail:
return ret;
@ -440,33 +449,23 @@ fail:
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
struct OpenVINOExecutionContext *exec;
wasi_nn_error ret = unsupported_operation;
ov_tensor_t *ov_tensor = NULL;
void *data = NULL;
size_t byte_size = 0;
if (exec_ctx >= ov_ctx->n_execution_contexts)
return runtime_error;
exec = &ov_ctx->execution_contexts[exec_ctx];
CHECK_OV_STATUS(ov_infer_request_get_output_tensor_by_index(
exec->infer_request, index, &ov_tensor),
ov_ctx->infer_request, index, &ov_tensor),
ret);
CHECK_OV_STATUS(ov_tensor_get_byte_size(ov_tensor, &byte_size), ret);
if (byte_size > output_tensor->size) {
ret = too_large;
goto fail;
}
CHECK_OV_STATUS(ov_tensor_data(ov_tensor, &data), ret);
memcpy(output_tensor->buf, data, byte_size);
memcpy(output_tensor, data, byte_size);
*output_tensor_size = (uint32_t)byte_size;
@ -512,7 +511,7 @@ init_backend(void **ctx)
*ctx = (void *)ov_ctx;
return success;
fail:
os_free(ov_ctx);
openvino_destroy((void *)ov_ctx);
return ret;
}
@ -520,16 +519,27 @@ __attribute__((visibility("default"))) wasi_nn_error
deinit_backend(void *ctx)
{
OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx;
unsigned int i;
if (!ov_ctx)
return invalid_argument;
for (i = 0; i < ov_ctx->n_execution_contexts; i++)
free_execution_context(&ov_ctx->execution_contexts[i]);
if (ov_ctx->weight_data)
os_free(ov_ctx->weight_data);
for (i = 0; i < ov_ctx->n_graphs; i++)
free_graph(&ov_ctx->graphs[i]);
if (ov_ctx->weights_tensor)
ov_tensor_free(ov_ctx->weights_tensor);
if (ov_ctx->input_tensor)
ov_tensor_free(ov_ctx->input_tensor);
if (ov_ctx->infer_request)
ov_infer_request_free(ov_ctx->infer_request);
if (ov_ctx->compiled_model)
ov_compiled_model_free(ov_ctx->compiled_model);
if (ov_ctx->model)
ov_model_free(ov_ctx->model);
if (ov_ctx->core)
ov_core_free(ov_ctx->core);

View File

@ -3,26 +3,15 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_BACKEND_H
#define WASI_NN_BACKEND_H
#ifndef WASI_NN_OPENVINO_HPP
#define WASI_NN_OPENVINO_HPP
#include "wasi_nn_types.h"
#ifdef __cplusplus
extern "C" {
#endif
__attribute__((visibility("default"))) wasi_nn_error
load(void *ctx, graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g);
__attribute__((visibility("default"))) wasi_nn_error
load_by_name(void *tflite_ctx, const char *name, uint32_t namelen, graph *g);
__attribute__((visibility("default"))) wasi_nn_error
load_by_name_with_config(void *ctx, const char *name, uint32_t namelen,
const char *config, uint32_t config_len, graph *g);
__attribute__((visibility("default"))) wasi_nn_error
init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx);
@ -35,7 +24,7 @@ compute(void *ctx, graph_execution_context exec_ctx);
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size);
tensor_data output_tensor, uint32_t *output_tensor_size);
__attribute__((visibility("default"))) wasi_nn_error
init_backend(void **ctx);
@ -43,8 +32,4 @@ init_backend(void **ctx);
__attribute__((visibility("default"))) wasi_nn_error
deinit_backend(void *ctx);
#ifdef __cplusplus
}
#endif
#endif /* WASI_NN_BACKEND_H */
#endif /* WASI_NN_OPENVINO_HPP */

View File

@ -9,44 +9,10 @@
#include "wasi_nn_types.h"
#include "wasm_export.h"
#include "bh_platform.h"
typedef struct {
korp_mutex lock;
bool busy;
bool is_backend_ctx_initialized;
bool is_model_loaded;
graph_encoding backend;
void *backend_ctx;
} WASINNContext;
typedef wasi_nn_error (*LOAD)(void *, graph_builder_array *, graph_encoding,
execution_target, graph *);
typedef wasi_nn_error (*LOAD_BY_NAME)(void *, const char *, uint32_t, graph *);
typedef wasi_nn_error (*LOAD_BY_NAME_WITH_CONFIG)(void *, const char *,
uint32_t, void *, uint32_t,
graph *);
typedef wasi_nn_error (*INIT_EXECUTION_CONTEXT)(void *, graph,
graph_execution_context *);
typedef wasi_nn_error (*SET_INPUT)(void *, graph_execution_context, uint32_t,
tensor *);
typedef wasi_nn_error (*COMPUTE)(void *, graph_execution_context);
typedef wasi_nn_error (*GET_OUTPUT)(void *, graph_execution_context, uint32_t,
tensor_data *, uint32_t *);
/* wasi-nn general APIs */
typedef wasi_nn_error (*BACKEND_INITIALIZE)(void **);
typedef wasi_nn_error (*BACKEND_DEINITIALIZE)(void *);
typedef struct {
LOAD load;
LOAD_BY_NAME load_by_name;
LOAD_BY_NAME_WITH_CONFIG load_by_name_with_config;
INIT_EXECUTION_CONTEXT init_execution_context;
SET_INPUT set_input;
COMPUTE compute;
GET_OUTPUT get_output;
BACKEND_INITIALIZE init;
BACKEND_DEINITIALIZE deinit;
} api_function;
#endif

View File

@ -3,10 +3,11 @@
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "wasi_nn_tensorflowlite.hpp"
#include "utils/logger.h"
#include "bh_platform.h"
#include "wasi_nn_backend.h"
#include "wasi_nn_types.h"
#include "wasm_export.h"
#include <tensorflow/lite/interpreter.h>
@ -84,10 +85,14 @@ is_valid_graph(TFLiteContext *tfl_ctx, graph g)
NN_ERR_PRINTF("Invalid graph: %d >= %d.", g, MAX_GRAPHS_PER_INST);
return runtime_error;
}
if (tfl_ctx->models[g].model == NULL) {
if (tfl_ctx->models[g].model_pointer == NULL) {
NN_ERR_PRINTF("Context (model) non-initialized.");
return runtime_error;
}
if (tfl_ctx->models[g].model == NULL) {
NN_ERR_PRINTF("Context (tflite model) non-initialized.");
return runtime_error;
}
return success;
}
@ -280,11 +285,6 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
{
TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx;
if (input_tensor->type != fp32) {
NN_ERR_PRINTF("unsupported input tensor type %u", input_tensor->type);
return runtime_error;
}
wasi_nn_error res;
if (success != (res = is_valid_graph_execution_context(tfl_ctx, ctx)))
return res;
@ -323,7 +323,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
index);
int size = model_tensor_size * sizeof(float);
bh_memcpy_s(it, size, input_tensor->data.buf, size);
bh_memcpy_s(it, size, input_tensor->data, size);
}
else { // TODO: Assuming uint8 quantized networks.
TfLiteAffineQuantization *quant_info =
@ -341,7 +341,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_DBG_PRINTF("input tensor: (scale, offset) = (%f, %f)", scale,
zero_point);
float *input_tensor_f = (float *)input_tensor->data.buf;
float *input_tensor_f = (float *)input_tensor->data;
for (uint32_t i = 0; i < model_tensor_size; ++i) {
it[i] = (uint8_t)(input_tensor_f[i] / scale + zero_point);
}
@ -365,7 +365,7 @@ compute(void *tflite_ctx, graph_execution_context ctx)
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
tensor_data *output_tensor, uint32_t *output_tensor_size)
tensor_data output_tensor, uint32_t *output_tensor_size)
{
TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx;
@ -388,34 +388,23 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
return too_large;
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
if (*output_tensor_size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
if (tensor->quantization.type == kTfLiteNoQuantization) {
NN_DBG_PRINTF("No quantization information");
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (output_tensor->size < tensor->bytes) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
if (output_tensor->size < tensor->bytes / sizeof(float)) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#endif
bh_memcpy_s(output_tensor->buf, output_tensor->size, tensor->data.data,
tensor->bytes);
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
*output_tensor_size = tensor->bytes;
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
*output_tensor_size = tensor->bytes / sizeof(float);
#endif
float *ot =
tfl_ctx->interpreters[ctx].interpreter->typed_output_tensor<float>(
index);
int size = model_tensor_size * sizeof(float);
bh_memcpy_s(output_tensor, size, ot, size);
}
else { // TODO: Assuming uint8 quantized networks.
TfLiteAffineQuantization *quant_info =
@ -424,27 +413,6 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_ERR_PRINTF("Quantization per channel is not supported");
return runtime_error;
}
uint32_t model_tensor_size = 1;
for (int i = 0; i < (int)tensor->dims->size; ++i)
model_tensor_size *= (uint32_t)tensor->dims->data[i];
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
if (output_tensor->size / sizeof(float) < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
if (output_tensor->size < model_tensor_size) {
NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index);
return too_large;
}
#endif
uint8_t *ot = tfl_ctx->interpreters[ctx]
.interpreter->typed_output_tensor<uint8_t>(index);
@ -453,22 +421,13 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
NN_DBG_PRINTF("output tensor: (scale, offset) = (%f, %f)", scale,
zero_point);
float *output_tensor_f = (float *)output_tensor->buf;
float *output_tensor_f = (float *)output_tensor;
for (uint32_t i = 0; i < model_tensor_size; ++i) {
output_tensor_f[i] = (ot[i] - zero_point) * scale;
}
#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0
*output_tensor_size = model_tensor_size * sizeof(float);
#else
/*
* for now, maintain the bug-to-bug compatibility with the old abi,
* where the size here is the number of fp32, not bytes.
*/
*output_tensor_size = model_tensor_size;
#endif
}
*output_tensor_size = model_tensor_size;
return success;
}
@ -513,6 +472,7 @@ deinit_backend(void *tflite_ctx)
NN_DBG_PRINTF("Freeing memory.");
for (int i = 0; i < MAX_GRAPHS_PER_INST; ++i) {
tfl_ctx->models[i].model.reset();
if (tfl_ctx->models[i].model_pointer) {
if (tfl_ctx->delegate) {
switch (tfl_ctx->models[i].target) {
case gpu:
@ -529,7 +489,8 @@ deinit_backend(void *tflite_ctx)
#if WASM_ENABLE_WASI_NN_EXTERNAL_DELEGATE != 0
TfLiteExternalDelegateDelete(tfl_ctx->delegate);
#else
NN_ERR_PRINTF("External delegate delete but not enabled.");
NN_ERR_PRINTF(
"External delegate delete but not enabled.");
#endif
break;
}
@ -537,7 +498,6 @@ deinit_backend(void *tflite_ctx)
break;
}
}
if (tfl_ctx->models[i].model_pointer) {
wasm_runtime_free(tfl_ctx->models[i].model_pointer);
}
tfl_ctx->models[i].model_pointer = NULL;

View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef WASI_NN_TENSORFLOWLITE_HPP
#define WASI_NN_TENSORFLOWLITE_HPP
#include "wasi_nn_types.h"
#ifdef __cplusplus
extern "C" {
#endif
__attribute__((visibility("default"))) wasi_nn_error
load(void *tflite_ctx, graph_builder_array *builder, graph_encoding encoding,
execution_target target, graph *g);
__attribute__((visibility("default"))) wasi_nn_error
load_by_name(void *tflite_ctx, const char *filename, uint32_t filename_len,
graph *g);
__attribute__((visibility("default"))) wasi_nn_error
init_execution_context(void *tflite_ctx, graph g, graph_execution_context *ctx);
__attribute__((visibility("default"))) wasi_nn_error
set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
tensor *input_tensor);
__attribute__((visibility("default"))) wasi_nn_error
compute(void *tflite_ctx, graph_execution_context ctx);
__attribute__((visibility("default"))) wasi_nn_error
get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index,
tensor_data output_tensor, uint32_t *output_tensor_size);
__attribute__((visibility("default"))) wasi_nn_error
init_backend(void **tflite_ctx);
__attribute__((visibility("default"))) wasi_nn_error
deinit_backend(void *tflite_ctx);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -37,10 +37,7 @@ RUN wget https://github.com/Kitware/CMake/releases/download/v${CMAKE_VER}/cmake-
WORKDIR /tmp
RUN git clone https://github.com/VeriSilicon/TIM-VX.git tim-vx \
&& git clone https://github.com/VeriSilicon/tflite-vx-delegate.git \
&& git clone https://github.com/tensorflow/tensorflow.git --branch v2.12.0
WORKDIR /tmp/tensorflow
RUN git cherry-pick -n 5115fa96d7c5b41451674892317be43e30b7c389
&& git clone https://github.com/tensorflow/tensorflow.git
# Build TIM-VX
@ -102,24 +99,28 @@ RUN cp --parents \
ENV VIVANTE_SDK_DIR=/tmp/tim-vx/prebuilt-sdk/x86_64_linux/
ENV VSIMULATOR_CONFIG=czl
ENV LD_LIBRARY_PATH=/tmp/tim-vx/prebuilt-sdk/x86_64_linux/lib:/usr/local/lib:/lib/x86_64-linux-gnu/:/lib64/:/usr/lib:$LD_LIBRARY_PATH
# Build WASI-NN
WORKDIR /home/wamr
COPY . .
WORKDIR /home/wamr/product-mini/platforms/linux
WORKDIR /home/wamr/core/iwasm/libraries/wasi-nn/test/build
RUN rm -rf build \
&& cmake -S . -B build\
-DCMAKE_LIBRARY_PATH="/usr/local/lib/" \
-DCMAKE_INCLUDE_PATH="/usr/local/include/" \
# hadolint ignore=SC2086
RUN cmake \
-DCMAKE_LIBRARY_PATH=${CMAKE_LIBRARY_PATH}:/usr/local/lib/ \
-DCMAKE_INCLUDE_PATH=${CMAKE_INCLUDE_PATH}:/usr/local/include/ \
-DWAMR_BUILD_WASI_NN=1 \
-DWAMR_BUILD_WASI_NN_TFLITE=1\
-DWAMR_BUILD_WASI_NN_ENABLE_EXT=1 \
-DWASI_NN_EXT_DELEGATE_PATH="/usr/lib/libvx_delegate.so" \
&& cmake --build build -j "$(grep -c ^processor /proc/cpuinfo)"
..
RUN cp /home/wamr/product-mini/platforms/linux/build/iwasm /run/iwasm \
RUN make -j "$(grep -c ^processor /proc/cpuinfo)"
RUN cp /home/wamr/core/iwasm/libraries/wasi-nn/test/build/iwasm /run/iwasm \
&& cp /home/wamr/product-mini/platforms/linux/build/lib*.so /usr/lib
ENTRYPOINT [ "/run/iwasm" ]

View File

@ -3,17 +3,6 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# on intel mac, this ends up with a lot of the following error.
#
# AttributeError: 'Sequential' object has no attribute '_get_save_spec'.
#
# * "pip install tensorflow" installs tensorflow 2.16.2 on intel mac.
# (because it's the last version before tf deprecated the target.)
# * keras 3 support in the version seems incomplete (thus the error)
# * a workaround: use keras 2 as mentioned in:
# https://github.com/tensorflow/tensorflow/releases/tag/v2.16.1
# https://blog.tensorflow.org/2024/03/whats-new-in-tensorflow-216.html
CURR_PATH=$(cd $(dirname $0) && pwd -P)
# WASM application that uses WASI-NN

View File

@ -3,7 +3,7 @@
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import AveragePooling2D, Conv2D
from keras.layers import AveragePooling2D, Conv2D
from tensorflow.keras import Input, Model

View File

@ -58,7 +58,7 @@ wasm_load(char *model_name, graph *g, execution_target target)
wasi_nn_error
wasm_load_by_name(const char *model_name, graph *g)
{
wasi_nn_error res = load_by_name(model_name, strlen(model_name), g);
wasi_nn_error res = load_by_name(model_name, g);
return res;
}
@ -108,8 +108,7 @@ run_inference(execution_target target, float *input, uint32_t *input_size,
uint32_t num_output_tensors)
{
graph graph;
if (wasm_load_by_name(model_name, &graph) != success) {
if (wasm_load(model_name, &graph, target) != success) {
NN_ERR_PRINTF("Error when loading model.");
exit(1);
}

View File

@ -201,20 +201,10 @@ openat(int fd, const char *pathname, int flags, ...)
int ret;
char dir_path[DIR_PATH_LEN];
char *full_path;
mode_t mode = 0;
bool has_mode = false;
if (flags & O_CREAT) {
va_list ap;
va_start(ap, flags);
mode = (mode_t)va_arg(ap, int);
va_end(ap);
has_mode = true;
}
ret = fcntl(fd, F_GETPATH, dir_path);
if (ret != 0) {
errno = EINVAL;
errno = -EINVAL;
return -1;
}
@ -224,7 +214,7 @@ openat(int fd, const char *pathname, int flags, ...)
return -1;
}
new_fd = has_mode ? open(full_path, flags, mode) : open(full_path, flags);
new_fd = open(full_path, flags);
free(full_path);
return new_fd;

View File

@ -35,8 +35,8 @@ extend_vector(Vector *vector, size_t length)
if (length <= vector->max_elems)
return true;
if (length < vector->max_elems * 3 / 2)
length = vector->max_elems * 3 / 2;
if (length < vector->size_elem * 3 / 2)
length = vector->size_elem * 3 / 2;
if (!(data = alloc_vector_data(length, vector->size_elem))) {
return false;
@ -194,12 +194,12 @@ bh_vector_append(Vector *vector, const void *elem_buf)
goto just_return;
}
/* make sure one more slot is used by the thread who allocates it */
/* make sure one more slot is used by the thread who allocas it */
if (vector->lock)
os_mutex_lock(vector->lock);
if (!extend_vector(vector, vector->num_elems + 1)) {
LOG_ERROR("Append vector elem failed: extend vector failed.\n");
LOG_ERROR("Append ector elem failed: extend vector failed.\n");
goto unlock_return;
}

View File

@ -18,7 +18,7 @@
/* clang-format off */
#define WAMR_VERSION_MAJOR 2
#define WAMR_VERSION_MINOR 3
#define WAMR_VERSION_PATCH 1
#define WAMR_VERSION_PATCH 0
/* clang-format on */
#endif

View File

@ -102,7 +102,6 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
### **Enable lib wasi-nn**
- **WAMR_BUILD_WASI_NN**=1/0, default to disable if not set
> Note: WAMR_BUILD_WASI_NN without WAMR_BUILD_WASI_EPHEMERAL_NN is deprecated and will likely be removed in future versions of WAMR. Please consider to enable WAMR_BUILD_WASI_EPHEMERAL_NN as well.
> Note: See [WASI-NN](../core/iwasm/libraries/wasi-nn) for more details.
### **Enable lib wasi-nn GPU mode**
@ -114,7 +113,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
- **WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH**=Path to the external delegate shared library (e.g. `libedgetpu.so.1.0` for Coral USB)
### **Enable lib wasi-nn with `wasi_ephemeral_nn` module support**
- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to enable if not set
- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to disable if not set
### **Disable boundary check with hardware trap**
- **WAMR_DISABLE_HW_BOUND_CHECK**=1/0, default to enable if not set and supported by platform
@ -328,10 +327,6 @@ And the wasm app can calls below APIs to allocate/free memory from/to the shared
- **WAMR_BUILD_SHRUNK_MEMORY**=1/0, default to enable if not set
> Note: When enabled, this feature will reduce memory usage by decreasing the size of the linear memory, particularly when the `memory.grow` opcode is not used and memory usage is somewhat predictable.
## **Instruction metering**
- **WAMR_BUILD_INSTRUCTION_METERING**=1/0, default to disable if not set
> Note: Enabling this feature allows limiting the number of instructions a wasm module instance can execute. Use the `wasm_runtime_set_instruction_count_limit(...)` API before calling `wasm_runtime_call_*(...)` APIs to enforce this limit.
## **Combination of configurations:**
We can combine the configurations. For example, if we want to disable interpreter, enable AOT and WASI, we can run command:

View File

@ -22,12 +22,7 @@ set (WAMR_ROOT_DIR path/to/wamr/root)
include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake)
add_library(vmlib ${WAMR_RUNTIME_LIB_SOURCE})
# include bh_read_file.h
include (${SHARED_DIR}/utils/uncommon/shared_uncommon.cmake)
add_executable (your_project main.c ${UNCOMMON_SHARED_SOURCE})
target_link_libraries (your_project vmlib -lm)
target_link_libraries (your_project vmlib)
```
Examples can be found in [CMakeLists.txt of linux platform](../product-mini/platforms/linux/CMakeLists.txt) and [other platforms](../product-mini/platforms). The available features to configure can be found in [Build WAMR vmcore](./build_wamr.md#wamr-vmcore-cmake-building-configurations).
@ -36,10 +31,6 @@ Developer can also use Makefile to embed WAMR, by defining macros and including
## The runtime initialization
``` C
#include "bh_platform.h"
#include "bh_read_file.h"
#include "wasm_export.h"
char *buffer, error_buf[128];
wasm_module_t module;
wasm_module_inst_t module_inst;
@ -51,7 +42,7 @@ Developer can also use Makefile to embed WAMR, by defining macros and including
wasm_runtime_init();
/* read WASM file into a memory buffer */
buffer = bh_read_file_to_buffer(…, &size);
buffer = read_wasm_binary_to_buffer(…, &size);
/* add line below if we want to export native functions to WASM app */
wasm_runtime_register_natives(...);

View File

@ -4,6 +4,6 @@ WebAssembly [reference-types](https://github.com/WebAssembly/reference-types) pr
WAMR has implemented the reference-types proposal. WAMR allows a native method to pass a host object to a WASM application as an `externref` parameter or receives a host object from a WASM application as an `externref` result. Internally, WAMR won't try to parse or dereference `externref`. It is an opaque type.
The restriction of using `externref` in a native method is the host object has to be the value of a `uintptr_t` variable. In other words, it takes **8 bytes** on 64-bit machine and **4 bytes** on 32-bit machines. Please keep that in mind especially when calling `wasm_runtime_call_wasm`.
The restriction of using `externref` in a native method is the host object has to be the value of a `unintptr_t` variable. In other words, it takes **8 bytes** on 64-bit machine and **4 bytes** on 32-bit machines. Please keep that in mind especially when calling `wasm_runtime_call_wasm`.
Please ref to the [sample](../samples/ref-types) for more details.

View File

@ -1,4 +1,4 @@
version: "2.2.0~3"
version: "2.0.0"
description: WebAssembly Micro Runtime - A lightweight standalone WebAssembly (Wasm) runtime with small footprint, high performance and highly configurable features
url: https://bytecodealliance.org/
repository: https://github.com/bytecodealliance/wasm-micro-runtime.git
@ -11,7 +11,5 @@ targets:
- esp32s3
- esp32c3
- esp32c6
- esp32p4
- esp32c5
examples:
- path: product-mini/platforms/esp-idf

View File

@ -112,12 +112,12 @@ def wasm_vec_to_list(vec):
wasm_frame_vec_t,
wasm_extern_vec_t,
]
known_vec_pointer_type = [POINTER(vec_type) for vec_type in known_vec_type]
known_vec_pointer_type = [POINTER(type) for type in known_vec_type]
if any([isinstance(vec, pointer_type) for pointer_type in known_vec_pointer_type]):
if any([isinstance(vec, type) for type in known_vec_pointer_type]):
vec = dereference(vec)
return [vec.data[i] for i in range(vec.num_elems)]
elif any([isinstance(vec, vec_type) for vec_type in known_vec_type]):
elif any([isinstance(vec, type) for type in known_vec_type]):
return [vec.data[i] for i in range(vec.num_elems)]
else:
raise RuntimeError("not a known vector type")
@ -405,7 +405,7 @@ def __compare_wasm_val_t(self, other):
elif WASM_F32 == self.kind:
return self.of.f32 == other.of.f32
elif WASM_F64 == self.kind:
return self.of.f64 == other.of.f64
return self.of.f64 == other.of.f63
elif WASM_EXTERNREF == self.kind:
raise RuntimeError("FIXME")
else:

View File

@ -6,7 +6,7 @@
### Pre-requisites
#### Install requirements
Before proceeding it is necessary to make sure your Python environment is correctly configured. To do this open a terminal session in this directory and perform the following:
Before proceeding it is necessary to make sure your Python environment is correctly configured. To do ths open a terminal session in this directory and perfom the following:
```shell

View File

@ -353,12 +353,12 @@ writable and needs to be copied into a ctype array.
#### variable arguments
A function with _variable arguments_ makes it hard to specify the required
A function with _variable arugments_ makes it hard to specify the required
argument types for the function prototype. It leaves us one way to call it
directly without any arguments type checking.
```python
libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_double(3.14), "World!")
libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_doulbe(3.14), "World!")
```
#### Use `c_bool` to represent `wasm_mutability_t `
@ -373,7 +373,7 @@ libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_double(3
### bindgen.py
`bindgen.py` is a tool to create WAMR python binding automatically. `binding.py`
`bindge.py` is a tool to create WAMR python binding automatically. `binding.py`
is generated. We should avoid modification on it. Additional helpers should go
to `ffi.py`.

View File

@ -111,7 +111,7 @@ The Fast JIT is a lightweight JIT engine with quick startup, small footprint and
(6) To enable the `Multi-tier JIT` mode:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1
cmake .. -DWAMR_BUILD_FAST_JTI=1 -DWAMR_BUILD_JIT=1
make
```
The Multi-tier JIT is a two level JIT tier-up engine, which launches Fast JIT to run the wasm module as soon as possible and creates backend threads to compile the LLVM JIT functions at the same time, and when the LLVM JIT functions are compiled, the runtime will switch the extecution from the Fast JIT jitted code to LLVM JIT jitted code gradually, so as to gain the best performance.

View File

@ -114,12 +114,6 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}")
set (CMAKE_MACOSX_RPATH True)
# if enable wasi-nn, both wasi-nn-backends and iwasm
# need to use same WAMR (dynamic) libraries
if (WAMR_BUILD_WASI_NN EQUAL 1)
set (BUILD_SHARED_LIBS ON)
endif ()
set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..)
include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake)
@ -138,7 +132,7 @@ add_library (vmlib ${WAMR_RUNTIME_LIB_SOURCE})
set_version_info (vmlib)
target_include_directories(vmlib INTERFACE
$<INSTALL_INTERFACE:include>
$<INSTALL_INTERFACE:include/iwasm>
)
set (WAMR_PUBLIC_HEADERS
@ -157,7 +151,7 @@ target_link_libraries (vmlib ${LLVM_AVAILABLE_LIBS} ${UV_A_LIBS} -lm -ldl -lpthr
install (TARGETS vmlib
EXPORT iwasmTargets
DESTINATION lib
PUBLIC_HEADER DESTINATION include
PUBLIC_HEADER DESTINATION include/iwasm
)
install_iwasm_package ()

View File

@ -1,91 +0,0 @@
# How to Use WAMR with ESP-IDF
ESP-IDF is the official development framework for Espressif SoCs, supporting Windows, Linux, and macOS. WAMR (WebAssembly Micro Runtime) can be integrated as a standard [ESP-IDF](https://github.com/espressif/esp-idf) component.
## 1. Setup the ESP-IDF Development Environment
This example demonstrates how to use WAMR with ESP-IDF. Before proceeding, ensure you have the ESP-IDF development environment installed. For the relevant process, please refer to ESP-IDF [documents](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/get-started/index.html).
### Prerequisites
#### Software Requirements
* ESP-IDF v4.4.0 and above.
#### Hardware Requirements
* A development board with one of the following SoCs:
- ESP32
- ESP32-C3
- ESP32-S3
- ESP32-C6
- ESP32-P4
- ESP32-C5
* See [Development Boards](https://www.espressif.com/en/products/devkits) for more information about it.
> Note: Different chips require different ESP-IDF versions, please check [ESP-IDF Release and SoC Compatibility](https://github.com/espressif/esp-idf?tab=readme-ov-file#esp-idf-release-and-soc-compatibility) before proceeding.
### Installation Steps
1. Navigate to the ESP-IDF root directory.
2. Run the installation script based on your OS:
- Linux/MacOS
```
./install.sh
```
- Windows
```
./install.bat
```
3. If successful, you should see:
```
All done! You can now run:
. ./export.sh
```
## 2. Compiling and Running the Project
### Set the Target Chip
Switch to the project directory and specify the target chip:
```bash
idf.py set-target <chip_name>
```
### Configure the project
Open the configuration menu:
```bash
idf.py menuconfig
```
To modify WAMR settings, navigate to: `Component config -> WASM Micro Runtime`
### Build and Flash
Run the following command to compile, flash, and monitor the application:
```bash
idf.py -p PORT flash monitor
```
(To exit the serial monitor, type ``Ctrl-]``.)
See the [Getting Started Guide](https://idf.espressif.com/) for full steps to configure and use ESP-IDF to build projects.

View File

@ -7,20 +7,16 @@ ESP32_TARGET="esp32"
ESP32C3_TARGET="esp32c3"
ESP32S3_TARGET="esp32s3"
ESP32C6_TARGET="esp32c6"
ESP32P4_TARGET="esp32p4"
ESP32C5_TARGET="esp32c5"
usage ()
{
echo "USAGE:"
echo "$0 $ESP32_TARGET|$ESP32C3_TARGET|$ESP32S3_TARGET|$ESP32C6_TARGET|$ESP32P4_TARGET|$ESP32C5_TARGET"
echo "$0 $ESP32_TARGET|$ESP32C3_TARGET|$ESP32S3_TARGET"
echo "Example:"
echo " $0 $ESP32_TARGET"
echo " $0 $ESP32C3_TARGET"
echo " $0 $ESP32S3_TARGET"
echo " $0 $ESP32C6_TARGET"
echo " $0 $ESP32P4_TARGET"
echo " $0 $ESP32C5_TARGET"
exit 1
}
@ -30,18 +26,12 @@ fi
TARGET=$1
if [ "$TARGET" = "$ESP32C5_TARGET" ]; then
IDF_ST_CMD="idf.py --preview set-target $TARGET"
else
IDF_ST_CMD="idf.py set-target $TARGET"
fi
if [[ -z "${WAMR_PATH}" ]]; then
export WAMR_PATH=$PWD/../../..
fi
rm -rf build
$IDF_ST_CMD
idf.py set-target $TARGET
idf.py build
idf.py flash

View File

@ -79,7 +79,7 @@ struct wamr_pal_create_process_args {
// Untrusted environment variable array pass to new process.
//
// The untrusted env vars to the command. And the last element of the array
// must be NULL to indicate the end of the array.
// must be NULL to indicate the length of array.
//
// Optional field.
const char **env;

View File

@ -177,7 +177,7 @@ add_library (vmlib ${WAMR_RUNTIME_LIB_SOURCE})
set_version_info (vmlib)
target_include_directories(vmlib INTERFACE
$<INSTALL_INTERFACE:include>
$<INSTALL_INTERFACE:include/iwasm>
)
set (WAMR_PUBLIC_HEADERS
@ -197,7 +197,7 @@ target_link_libraries (vmlib ${LLVM_AVAILABLE_LIBS} ${UV_A_LIBS} -lm -ldl -lpthr
install (TARGETS vmlib
EXPORT iwasmTargets
DESTINATION lib
PUBLIC_HEADER DESTINATION include
PUBLIC_HEADER DESTINATION include/iwasm
)
install_iwasm_package ()

View File

@ -16,6 +16,8 @@ set (CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
set(CMAKE_CXX_STANDARD 17)
add_definitions(-DCOMPILING_WASM_RUNTIME_API=1)
# Set WAMR_BUILD_TARGET, currently values supported:
# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]", "MIPS", "XTENSA"
if (NOT DEFINED WAMR_BUILD_TARGET)
@ -145,7 +147,7 @@ add_library (vmlib ${WAMR_RUNTIME_LIB_SOURCE})
set_version_info (vmlib)
target_include_directories(vmlib INTERFACE
$<INSTALL_INTERFACE:include>
$<INSTALL_INTERFACE:include/iwasm>
)
set (WAMR_PUBLIC_HEADERS
@ -172,7 +174,7 @@ endif()
install (TARGETS vmlib
EXPORT iwasmTargets
DESTINATION lib
PUBLIC_HEADER DESTINATION include
PUBLIC_HEADER DESTINATION include/iwasm
)
install_iwasm_package ()

View File

@ -1,15 +1,9 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Refer to https://docs.zephyrproject.org/3.7.0/develop/getting_started/index.html
# for more information on how to set up the Zephyr development environment.
FROM ubuntu:22.04
ARG DEBIAN_FRONTEND=noninteractive
ENV TZ=Asian/Shanghai
ARG ZEPHYR_SDK_VERSION=0.16.9
# In west_lite.yml, the Zephyr version is set to v3.7.0
#ARG ZEPHYR_VERSION=3.7.0
# Install dependencies for Zephyr
# hadolint ignore=DL3008
@ -22,34 +16,28 @@ RUN apt-get update && apt-get install -y --no-install-recommends git cmake ninja
# Install the Zephyr Software Development Kit (SDK)
WORKDIR /opt
# hadolint ignore=DL4006
RUN wget --progress=dot:giga https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v${ZEPHYR_SDK_VERSION}/zephyr-sdk-${ZEPHYR_SDK_VERSION}_linux-x86_64.tar.xz \
&& wget --progress=dot:giga -O - https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v${ZEPHYR_SDK_VERSION}/sha256.sum | shasum --check --ignore-missing \
&& tar xf zephyr-sdk-${ZEPHYR_SDK_VERSION}_linux-x86_64.tar.xz && rm zephyr-sdk-${ZEPHYR_SDK_VERSION}_linux-x86_64.tar.xz
RUN wget --progress=dot:giga https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v0.16.3/zephyr-sdk-0.16.3_linux-x86_64.tar.xz \
&& wget --progress=dot:giga -O - https://github.com/zephyrproject-rtos/sdk-ng/releases/download/v0.16.3/sha256.sum | shasum --check --ignore-missing \
&& tar xvf zephyr-sdk-0.16.3_linux-x86_64.tar.xz && rm zephyr-sdk-0.16.3_linux-x86_64.tar.xz
WORKDIR /opt/zephyr-sdk-${ZEPHYR_SDK_VERSION}
WORKDIR /opt/zephyr-sdk-0.16.3
# hadolint ignore=DL4006
# Install host tools and Register Zephyr SDK CMake package
RUN ./setup.sh -h -c
RUN yes | ./setup.sh
# Get Zephyr
WORKDIR /root/zephyrproject/smoke-test
# hadolint ignore=DL3013
RUN pip3 install --no-cache-dir west
COPY ./west_lite.yml ./west.yml
# init the west workspace with a minimal manifest
RUN west init -l
RUN pip3 install --no-cache-dir west && west init -m https://github.com/zephyrproject-rtos/zephyr --mr v3.5.0 /root/zephyrproject
WORKDIR /root/zephyrproject
RUN west update --stats
RUN west update
WORKDIR /root/zephyrproject/modules/zephyr
RUN west zephyr-export && pip install --no-cache-dir -r ./scripts/requirements.txt
ENV ZEPHYR_BASE="/root/zephyrproject/modules/zephyr"
WORKDIR /root/zephyrproject/zephyr
RUN west zephyr-export && pip install --no-cache-dir -r ~/zephyrproject/zephyr/scripts/requirements.txt
# Git clone wamr
WORKDIR /root/zephyrproject/modules/
RUN git clone https://github.com/bytecodealliance/wasm-micro-runtime.git wasm-micro-runtime
WORKDIR /root/zephyrproject/modules/wasm-micro-runtime/product-mini/platforms/zephyr
WORKDIR /root
RUN git clone https://github.com/bytecodealliance/wasm-micro-runtime.git
WORKDIR /root/wasm-micro-runtime/product-mini/platforms/zephyr/simple
ENV ZEPHYR_BASE="/root/zephyrproject/zephyr"

View File

@ -87,12 +87,6 @@ is a 64-bit ARM target for emulating the Cortex-A53 platform.
west build . -b qemu_cortex_a53 -p always -- -DWAMR_BUILD_TARGET=AARCH64
```
[ARC QEMU](https://docs.zephyrproject.org/latest/boards/qemu/arc/doc/index.html)
is a 32-bit ARC target for emulating the ARC platform.
```shell
west build . -b qemu_arc/qemu_arc_em -p always -- -DWAMR_BUILD_TARGET=ARC
```
## Flashing or Running Image

View File

@ -1,15 +0,0 @@
# The west manifest file for WAMR on Zephyr smoke test.
#
manifest:
#
# Please add items below based on alphabetical order
projects:
- name: zephyr
url: https://github.com/zephyrproject-rtos/zephyr
revision: v3.7.0
clone-depth: 1
path: modules/zephyr
west-commands: scripts/west-commands.yml
self:
path: smoke-test

View File

@ -5,7 +5,6 @@
#include "wasm_export.h"
#include "bh_read_file.h"
#include "bh_getopt.h"
void
my_log(uint32 log_level, const char *file, int line, const char *fmt, ...)

View File

@ -1,10 +0,0 @@
# Copyright (C) 2025 Midokura Japan KK. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
cmake_minimum_required(VERSION 3.16)
project(printversion LANGUAGES C)
add_executable(printversion printversion.c)
find_package(iwasm REQUIRED)
target_link_libraries(printversion iwasm::vmlib)

View File

@ -1,21 +0,0 @@
/*
* Copyright (C) 2025 Midokura Japan KK. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <inttypes.h>
#include <stdint.h>
#include <stdio.h>
#include <wasm_export.h>
int
main(int argc, char **argv)
{
uint32_t major;
uint32_t minor;
uint32_t patch;
wasm_runtime_get_version(&major, &minor, &patch);
printf("wasm-micro-runtime %" PRIu32 ".%" PRIu32 ".%" PRIu32 "\n", major,
minor, patch);
}

View File

@ -1,24 +0,0 @@
#! /bin/sh
# Copyright (C) 2025 Midokura Japan KK. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
DIST=$(mktemp -d)
# WAMR_BUILD_SIMD=0 to avoid fetching simde, which is
# not relevant to this particular test.
cmake -B build-wamr \
-D CMAKE_INSTALL_PREFIX=${DIST} \
-D WAMR_BUILD_SIMD=0 \
../..
cmake --build build-wamr -t install
cmake -B build-app \
-D CMAKE_PREFIX_PATH=${DIST} \
-D CMAKE_INSTALL_PREFIX=${DIST} \
.
cmake --build build-app
./build-app/printversion

View File

@ -121,6 +121,7 @@ def main():
print("\n================================")
print("Test address resolving")
cmd = "./iwasm --allow-resolve=*.com addr_resolve.wasm github.com"
cmd = "./multicast_server FF02:113D:6FDD:2C17:A643:FFE2:1BD1:3CD2"
run_cmd(cmd, args.working_directory)
# wait for a second

View File

@ -8,8 +8,6 @@ Refer to the `README.md` under each folder for how to build and run the benchmar
## Install `llvm-profdata`
> PS: the `llvm-profdata` vesion needs to be the same major version with llvm libraries used to build wamrc.
The tool `llvm-profdata` is used when running the `test_pgo.sh` script under the benchmark folder. There are two ways to install it:
1. Refer to https://apt.llvm.org/, e.g. in Ubuntu 20.04, add lines below to /etc/apt/source.list
@ -20,22 +18,19 @@ deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal main
# 15
deb http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main
deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main
# 18
deb http://apt.llvm.org/focal/ llvm-toolchain-focal-18 main
deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-18 main
```
Then run `sudo apt update`, `sudo apt install llvm`. And after installing:
```bash
cd /usr/bin
sudo ln -s llvm-profdata-18 llvm-profdata
sudo ln -s llvm-profdata-15 llvm-profdata
```
2. Build manually
```bash
git clone --depth 1 --branch release/18.x https://github.com/llvm/llvm-project.git
git clone --depth 1 --branch release/15.x https://github.com/llvm/llvm-project.git
cd llvm-project
mkdir build && cd build
cmake ../llvm \

View File

@ -2,7 +2,6 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
PLATFORM=$(uname -s | tr A-Z a-z)

View File

@ -2,7 +2,6 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -e
PLATFORM=$(uname -s | tr A-Z a-z)

View File

@ -181,12 +181,7 @@ add_link_options(-fsanitize=fuzzer -fno-sanitize=vptr)
# Enable sanitizers if not in oss-fuzz environment
set(CFLAGS_ENV $ENV{CFLAGS})
string(FIND "${CFLAGS_ENV}" "-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION" FUZZ_POS)
if (FUZZ_POS GREATER -1)
set(IN_OSS_FUZZ 1)
else()
set(IN_OSS_FUZZ 0)
endif()
string(FIND "${CFLAGS_ENV}" "-DFUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION" IN_OSS_FUZZ)
add_subdirectory(aot-compiler)
add_subdirectory(wasm-mutator)

View File

@ -68,7 +68,7 @@ target_link_directories(aotclib PUBLIC ${LLVM_LIBRARY_DIR})
target_link_libraries(aotclib PUBLIC ${REQUIRED_LLVM_LIBS})
if(NOT IN_OSS_FUZZ)
message(STATUS "Enable ASan and UBSan in non-oss-fuzz environment for aotclib")
message(STATUS "Enable ASan and UBSan in non-oss-fuzz environment")
target_compile_options(aotclib PUBLIC
-fprofile-instr-generate -fcoverage-mapping
-fno-sanitize-recover=all

View File

@ -72,7 +72,7 @@ def to_json(inst, cls):
class Fuzzing(db.Model):
__tablename__ = 'fuzzing_task'
__tablename__ = 'fazzing_task'
id = db.Column(db.Integer, autoincrement=True,
primary_key=True, nullable=False)
repo = db.Column(db.String(200), nullable=False, default='')
@ -96,7 +96,7 @@ class TaskError(db.Model):
__tablename__ = 'task_error'
id = db.Column(db.Integer, autoincrement=True,
primary_key=True, nullable=False)
fuzzing_id = db.Column(db.Integer, db.ForeignKey("fuzzing_task.id"))
fazzing_id = db.Column(db.Integer, db.ForeignKey("fazzing_task.id"))
name = db.Column(db.String(200), nullable=False, default='')
std_out = db.Column(db.Text, default='')
data = db.Column(db.JSON)
@ -119,9 +119,9 @@ def to_data(data):
def error_count(data):
error = len(TaskError.query.filter(
TaskError.fuzzing_id == data.get('id'), TaskError.status.in_([1, 2])).all())
TaskError.fazzing_id == data.get('id'), TaskError.status.in_([1, 2])).all())
end_error = len(TaskError.query.filter(
TaskError.fuzzing_id == data.get('id'), TaskError.status == 0).all())
TaskError.fazzing_id == data.get('id'), TaskError.status == 0).all())
data['error'] = error
data['end_error'] = end_error
return data
@ -159,11 +159,11 @@ def show_fuzz_list():
id = data.get('id')
if id:
all_error = TaskError.query.filter(
TaskError.fuzzing_id == id).with_entities(TaskError.id, TaskError.fuzzing_id,
TaskError.fazzing_id == id).with_entities(TaskError.id, TaskError.fazzing_id,
TaskError.create_time, TaskError.data,
TaskError.name, TaskError.status,
TaskError.update_time, TaskError.comment).order_by(TaskError.status.desc(), TaskError.update_time.desc(), TaskError.id.desc()).all()
data_message = [{'id': error['id'], "fuzzing_id": error['fuzzing_id'],
data_message = [{'id': error['id'], "fuzzing_id": error['fazzing_id'],
"name": error['name'], "data": error['data'],
'create_time': error['create_time'].strftime('%Y-%m-%d %H:%M:%S'),
'update_time': error['update_time'].strftime('%Y-%m-%d %H:%M:%S'),
@ -204,7 +204,7 @@ def New_fuzzing():
# curd.set_error_status_to(list(map(lambda x: x.id, error_list)), db)
# Fuzzing.query.filter_by(id=fuzz.id).delete()
fuzz.data = {'error': "Clone repo Error"}
db.session.commit()
db.commit()
return jsonify({"status": 0, "result": "", "msg": "Clone repo Error"})
wamr_path_parent = fuzz_dir.parent.parent
@ -277,7 +277,7 @@ def scheduler_run_task():
for fuzz in fuzz_query:
all_error = TaskError.query.filter(
TaskError.fuzzing_id == fuzz.id).with_entities(TaskError.name).all()
TaskError.fazzing_id == fuzz.id).with_entities(TaskError.name).all()
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{fuzz.id}'
dir_list = filter(lambda x: x.startswith(
@ -287,7 +287,7 @@ def scheduler_run_task():
for dir in dir_list:
cmd = f'cd {fuzz_cmd} && ./wasm_mutator_fuzz {dir}'
status, resp = getstatusoutput(cmd)
task_error = TaskError(name=dir, std_out=resp, fuzzing_id=fuzz.id,
task_error = TaskError(name=dir, std_out=resp, fazzing_id=fuzz.id,
create_time=datetime.utcnow() + timedelta(hours=8))
db.session.add(task_error)
db.session.commit()
@ -312,7 +312,7 @@ def get_error_txt():
return jsonify({"status": 0, "results": [], 'msg': "Error"})
error = TaskError.query.get(id)
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{error.fuzzing_id}'
'workspace' / f'build_{error.fazzing_id}'
file_cmd = fuzz_cmd / error.name
response = send_file(file_cmd, as_attachment=True,
@ -351,7 +351,7 @@ def get_cases_zip():
with ZipFile(memory_file, "w", ZIP_DEFLATED) as zf:
for task_error in task_query:
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{task_error.fuzzing_id}'
'workspace' / f'build_{task_error.fazzing_id}'
file_cmd = fuzz_cmd / task_error.name
zf.write(str(file_cmd), arcname=task_error.name)
memory_file.seek(0)
@ -399,7 +399,7 @@ def error_restart():
if run_status:
return jsonify({"status": 0, "results": [], 'msg': "There are already tasks in progress"})
task_query = TaskError.query.filter(TaskError.id.in_(id_list)).all()
fuzzing_id = task_query[0].fuzzing_id
fuzzing_id = task_query[0].fazzing_id
fuzz_cmd = wasm_mutator_dir / \
'workspace' / f'build_{fuzzing_id}'
restart_cmd = wasm_mutator_dir / \
@ -412,7 +412,7 @@ def error_restart():
if not Path(restart_cmd / 'wamr').exists():
print('------ error: clone repo not folder exists ------')
# fuzz.data = {'error': "Clone repo Error"}
db.session.commit()
db.commit()
return jsonify({"status": 0, "result": "", "msg": "Clone repo Error"})
wamr_path_parent = fuzz_dir.parent.parent
wamr_path = wamr_path_parent / 'wamr'

View File

@ -58,7 +58,7 @@ add_executable(wasm_mutator_fuzz wasm_mutator_fuzz.cc)
target_link_libraries(wasm_mutator_fuzz PRIVATE vmlib m)
if(NOT IN_OSS_FUZZ)
message(STATUS "Enable ASan and UBSan in non-oss-fuzz environment for vmlib")
message(STATUS "Enable ASan and UBSan in non-oss-fuzz environment")
target_compile_options(vmlib PUBLIC
-fprofile-instr-generate -fcoverage-mapping
-fno-sanitize-recover=all

View File

@ -218,57 +218,22 @@ simply run `run.py`
./run.py
```
Specify a specific issue with option `--issues`/`-i`
```shell
./run.py --issues 2833 # test 1 issue #2833
./run.py -i 2833,2834,2835 # test 3 issues #2833 #2834 #2835
```
If everything went well, you should see similarly output in your command line output
```shell
==== Test results ====
Total: 22
Passed: 22
Failed: 0
Left issues in folder: no more
Cases in JSON but not found in folder: no more
Finish testing, 22/22 of test cases passed, no more issues should further test
```
If you add the test case under directory `issues` but forget to add the running config in json file, the output can be something like
```shell
==== Test results ====
Total: 21
Passed: 21
Failed: 0
missed: 0
Left issues in folder: #3022
Cases in JSON but not found in folder: no more
```
If you add the test case in `running_config.json` but used the wrong id or forget to add the test case under directory `issues`, the output can be someting like
```shell
==== Test results ====
Total: 21
Passed: 21
Failed: 0
missed: 0
Left issues in folder: #2855
Cases in JSON but not found in folder: #12345
Finish testing, 21/21 of test cases passed, {2945} issue(s) should further test
```
If some test case are failing, then it will be something like
```shell
==== Test results ====
Total: 22
Passed: 21
Failed: 1
Left issues in folder: no more
Cases in JSON but not found in folder: no more
Finish testing, 21/22 of test cases passed, no more issue(s) should further test
```
And a log file named `issues_tests.log` will be generated and inside it will display the details of the failing cases, for example:

View File

@ -10,9 +10,7 @@ import os
import subprocess
import glob
import re
import argparse
from typing import Dict, Optional, List
from typing import Dict
WORK_DIR = os.getcwd()
TEST_WASM_COMMAND = (
@ -47,12 +45,7 @@ def dump_error_log(failing_issue_id, command_lists, exit_code_cmp, stdout_cmp):
)
def get_issue_ids_should_test(selected_ids: Optional[List[int]] = None):
"""Find all issue IDs that should be tested in folder issues."""
# If specific issue IDs are provided, return them as a set
if selected_ids:
return set(selected_ids)
def get_issue_ids_should_test():
# Define the path pattern
path_pattern = "issues/issue-*"
@ -67,8 +60,8 @@ def get_issue_ids_should_test(selected_ids: Optional[List[int]] = None):
# Extract the issue number using regular expression
match = re.search(pattern, dir_path)
if match:
issue_number = int(match.group(1))
issue_numbers.add(issue_number)
issue_number = match.group(1)
issue_numbers.add(int(issue_number))
# Print the set of issue numbers
return issue_numbers
@ -84,10 +77,10 @@ def get_and_check(d, key, default=None, nullable=False):
def run_and_compare_results(
issue_id, cmd, description, ret_code, stdout_content
) -> bool:
passed_ids, failed_ids, issue_id, cmd, description, ret_code, stdout_content
):
print(f"####################################")
print(f"test BA issue #{issue_id} `{description}`...")
print(f"test BA issue #{issue_id} `{description}`: {cmd}")
command_list = cmd.split()
result = subprocess.run(
command_list,
@ -102,21 +95,19 @@ def run_and_compare_results(
exit_code_cmp = f"exit code (actual, expected) : {actual_exit_code, ret_code}"
stdout_cmp = f"stdout (actual, expected) : {actual_output, stdout_content}"
print(exit_code_cmp)
print(stdout_cmp)
if actual_exit_code == ret_code and (
actual_output == stdout_content
or (
stdout_content == "Compile success"
and actual_output.find(stdout_content) != -1
)
or (stdout_content == "Compile success"
and actual_output.find(stdout_content) != -1)
or (len(stdout_content) > 30 and actual_output.find(stdout_content) != -1)
):
passed_ids.add(issue_id)
print("== PASS ==")
return True
else:
print(cmd)
print(exit_code_cmp)
print(stdout_cmp)
failed_ids.add(issue_id)
print(f"== FAILED: {issue_id} ==")
dump_error_log(
issue_id,
@ -124,11 +115,15 @@ def run_and_compare_results(
exit_code_cmp,
stdout_cmp,
)
return False
print("")
def run_issue_test_wamrc(issue_id, compile_options):
def run_issue_test_wamrc(
passed_ids, failed_ids, issue_id, compile_options, stdout_only_cmp_last_line=False
):
compiler = get_and_check(compile_options, "compiler")
only_compile = get_and_check(compile_options, "only compile")
in_file = get_and_check(compile_options, "in file")
out_file = get_and_check(compile_options, "out file")
options = get_and_check(compile_options, "options")
@ -150,10 +145,14 @@ def run_issue_test_wamrc(issue_id, compile_options):
compiler=compiler, options=options, out_file=out_file_path, in_file=in_file_path
)
return run_and_compare_results(issue_id, cmd, description, ret_code, stdout_content)
run_and_compare_results(
passed_ids, failed_ids, issue_id, cmd, description, ret_code, stdout_content
)
return only_compile
def run_issue_test_iwasm(issue_id, test_case) -> bool:
def run_issue_test_iwasm(passed_ids, failed_ids, issue_id, test_case):
runtime = get_and_check(test_case, "runtime")
mode = get_and_check(test_case, "mode")
file = get_and_check(test_case, "file")
@ -195,19 +194,17 @@ def run_issue_test_iwasm(issue_id, test_case) -> bool:
argument=argument,
)
return run_and_compare_results(issue_id, cmd, description, ret_code, stdout_content)
run_and_compare_results(
passed_ids, failed_ids, issue_id, cmd, description, ret_code, stdout_content
)
def process_and_run_test_cases(
data: Dict[str, Dict], selected_ids: Optional[List[int]] = None
):
issue_ids_should_test = get_issue_ids_should_test(selected_ids)
def process_and_run_test_cases(data: Dict[str, Dict]):
issue_ids_should_test = get_issue_ids_should_test()
passed_ids = set()
failed_ids = set()
json_only_ids = set()
# Iterate through each test case in the json data
for test_case in data.get("test cases", []):
is_deprecated = get_and_check(test_case, "deprecated")
issue_ids = get_and_check(test_case, "ids", default=[])
@ -217,79 +214,33 @@ def process_and_run_test_cases(
continue
compile_options = get_and_check(test_case, "compile_options", nullable=True)
for issue_id in issue_ids:
if issue_id not in issue_ids_should_test:
json_only_ids.add(issue_id)
continue
only_compile = False
# if this issue needs to test wamrc to compile the test case first
if compile_options:
only_compile = compile_options["only compile"]
run_issue_test_wamrc(passed_ids, failed_ids, issue_id, compile_options)
# if this issue requires to test iwasm to run the test case
if not only_compile:
run_issue_test_iwasm(passed_ids, failed_ids, issue_id, test_case)
# cross out the this issue_id in the should test set
issue_ids_should_test.remove(issue_id)
only_compile = False
# if this issue needs to test wamrc to compile the test case first
if compile_options:
only_compile = compile_options["only compile"]
compile_res = run_issue_test_wamrc(issue_id, compile_options)
if only_compile:
if compile_res:
passed_ids.add(issue_id)
else:
failed_ids.add(issue_id)
continue
else:
# if compile success, then continue to test iwasm
if not compile_res:
failed_ids.add(issue_id)
continue
# if this issue requires to test iwasm to run the test case
if not only_compile:
if run_issue_test_iwasm(issue_id, test_case):
passed_ids.add(issue_id)
else:
failed_ids.add(issue_id)
total = len(passed_ids) + len(failed_ids)
passed = len(passed_ids)
failed = len(failed_ids)
format_issue_ids_should_test = (
" ".join(f"#{x}" for x in issue_ids_should_test)
if issue_ids_should_test
else "no more"
issue_ids_should_test = (
issue_ids_should_test if issue_ids_should_test else "no more"
)
format_json_only_ids = (
" ".join(f"#{x}" for x in json_only_ids) if json_only_ids else "no more"
)
print(f"####################################")
print(f"==== Test results ====")
print(f" Total: {total}")
print(f" Passed: {passed}")
print(f" Failed: {failed}")
if not selected_ids:
print(f" Left issues in folder: {format_issue_ids_should_test}")
print(f" Cases in JSON but not found in folder: {format_json_only_ids}")
else:
print(f" Issues not found in folder: {format_issue_ids_should_test}")
def main():
parser = argparse.ArgumentParser(description="Run BA issue tests.")
parser.add_argument(
"-i",
"--issues",
type=str,
help="Comma separated list of issue ids to run, e.g. 1,2,3. Default: all.",
)
args = parser.parse_args()
selected_ids = None
if args.issues:
selected_ids = [int(x) for x in args.issues.split(",") if x.strip().isdigit()]
# Path to the JSON file
file_path = "running_config.json"
@ -305,7 +256,7 @@ def main():
os.remove(LOG_FILE)
# Process the data
process_and_run_test_cases(data, selected_ids)
process_and_run_test_cases(data)
if __name__ == "__main__":

View File

@ -17,7 +17,7 @@ git apply ../../../wamr-test-suites/spec-test-script/gc_ignore_cases.patch
# Set OCaml compiler environment
eval $(opam config env)
echo "compile the reference interpreter"
echo "compile the reference intepreter"
pushd interpreter
make
popd

Some files were not shown because too many files have changed in this diff Show More