diff --git a/.github/actions/install-wasi-sdk-wabt/action.yml b/.github/actions/install-wasi-sdk-wabt/action.yml index c872e4252..6b79087c2 100644 --- a/.github/actions/install-wasi-sdk-wabt/action.yml +++ b/.github/actions/install-wasi-sdk-wabt/action.yml @@ -30,14 +30,23 @@ runs: if: ${{ startsWith(inputs.os, 'ubuntu') }} shell: bash run: | + echo "Downloading wasi-sdk for Ubuntu..." sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-linux.tar.gz + + echo "Extracting wasi-sdk..." sudo tar -xf wasi-sdk.tar.gz sudo ln -sf wasi-sdk-25.0-x86_64-linux/ wasi-sdk + + echo "Downloading wabt for Ubuntu..." sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz + + echo "Extracting wabt..." sudo tar -xf wabt.tar.gz sudo ln -sf wabt-1.0.37 wabt + /opt/wasi-sdk/bin/clang --version /opt/wabt/bin/wasm-interp --version + echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on ubuntu" working-directory: /opt @@ -45,14 +54,23 @@ runs: if: ${{ inputs.os == 'macos-13' }} shell: bash run: | + echo "Downloading wasi-sdk for macOS-13..." sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-macos.tar.gz + + echo "Extracting wasi-sdk..." sudo tar -xf wasi-sdk.tar.gz sudo ln -sf wasi-sdk-25.0-x86_64-macos wasi-sdk + + echo "Downloading wabt for macOS-13..." sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz + + echo "Extracting wabt..." sudo tar -xf wabt.tar.gz sudo ln -sf wabt-1.0.36 wabt + /opt/wasi-sdk/bin/clang --version /opt/wabt/bin/wasm-interp --version + echo "::notice::wasi-sdk-25 and wabt-1.0.36 installed on macos-13" working-directory: /opt @@ -60,21 +78,48 @@ runs: if: ${{ inputs.os == 'macos-14' }} shell: bash run: | + echo "Downloading wasi-sdk for macOS-14..." sudo wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-arm64-macos.tar.gz + + echo "Extracting wasi-sdk..." sudo tar -xf wasi-sdk.tar.gz sudo ln -sf wasi-sdk-25.0-arm64-macos wasi-sdk + + echo "Downloading wabt for macOS-14..." sudo wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-macos-14.tar.gz + + echo "Extracting wabt..." sudo tar -xf wabt.tar.gz sudo ln -sf wabt-1.0.37 wabt + /opt/wasi-sdk/bin/clang --version /opt/wabt/bin/wasm-interp --version + echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on macos-14" working-directory: /opt - #TODO: Add support for Windows - name: Set up wasi-sdk and wabt on Windows if: ${{ startsWith(inputs.os, 'windows') }} - shell: powershell + shell: bash run: | - echo "::notice::Support for Windows is not implemented yet" - exit 1 + choco install -y wget + + mkdir -p /opt/wasi-sdk + mkdir -p /opt/wabt + + echo "Downloading wasi-sdk for Windows..." + wget -O wasi-sdk.tar.gz --progress=dot:giga https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-25/wasi-sdk-25.0-x86_64-windows.tar.gz + + echo "Extracting wasi-sdk..." + tar --strip-components=1 -xf wasi-sdk.tar.gz -C /opt/wasi-sdk + + echo "Downloading wabt for Windows..." + wget -O wabt.tar.gz --progress=dot:giga https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-windows.tar.gz + + echo "Extracting wabt..." + tar --strip-components=1 -xf wabt.tar.gz -C /opt/wabt + + /opt/wasi-sdk/bin/clang --version + /opt/wabt/bin/wasm-interp --version + + echo "::notice::wasi-sdk-25 and wabt-1.0.37 installed on Windows" diff --git a/.github/workflows/build_iwasm_release.yml b/.github/workflows/build_iwasm_release.yml index a975d5807..6acd90fa6 100644 --- a/.github/workflows/build_iwasm_release.yml +++ b/.github/workflows/build_iwasm_release.yml @@ -23,7 +23,7 @@ on: type: string required: true upload_url: - description: a semantic version number. it is required when `release` is true. + description: upload binary assets to the URL of release type: string required: false ver_num: diff --git a/.github/workflows/build_wamrc.yml b/.github/workflows/build_wamrc.yml index 55d63f13b..07e3c7cdb 100644 --- a/.github/workflows/build_wamrc.yml +++ b/.github/workflows/build_wamrc.yml @@ -23,7 +23,7 @@ on: type: string required: true upload_url: - description: a semantic version number. it is required when `release` is true. + description: upload binary assets to the URL of release type: string required: false ver_num: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 665c1588d..9c3309721 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -53,7 +53,7 @@ jobs: # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL - uses: github/codeql-action/init@v3.28.18 + uses: github/codeql-action/init@v3.29.0 with: languages: ${{ matrix.language }} @@ -70,7 +70,7 @@ jobs: - run: | ./.github/scripts/codeql_buildscript.sh - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3.28.18 + uses: github/codeql-action/analyze@v3.29.0 with: category: "/language:${{matrix.language}}" upload: false @@ -99,7 +99,7 @@ jobs: output: ${{ steps.step1.outputs.sarif-output }}/cpp.sarif - name: Upload CodeQL results to code scanning - uses: github/codeql-action/upload-sarif@v3.28.18 + uses: github/codeql-action/upload-sarif@v3.29.0 with: sarif_file: ${{ steps.step1.outputs.sarif-output }} category: "/language:${{matrix.language}}" diff --git a/.github/workflows/compilation_on_android_ubuntu.yml b/.github/workflows/compilation_on_android_ubuntu.yml index 65a540747..828773ae0 100644 --- a/.github/workflows/compilation_on_android_ubuntu.yml +++ b/.github/workflows/compilation_on_android_ubuntu.yml @@ -621,49 +621,6 @@ jobs: llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }} running_mode: aot test_option: $WAMR_COMPILER_TEST_OPTIONS - exclude: - # incompatible modes and features - # classic-interp doesn't support simd - - running_mode: "classic-interp" - test_option: $SIMD_TEST_OPTIONS - # llvm jit doesn't support multi module - - running_mode: "jit" - test_option: $MULTI_MODULES_TEST_OPTIONS - # fast-jit doesn't support multi module, simd - - running_mode: "fast-jit" - test_option: $MULTI_MODULES_TEST_OPTIONS - - running_mode: "fast-jit" - test_option: $SIMD_TEST_OPTIONS - # multi-tier-jit doesn't support multi module, simd - - running_mode: "multi-tier-jit" - test_option: $MULTI_MODULES_TEST_OPTIONS - - running_mode: "multi-tier-jit" - test_option: $SIMD_TEST_OPTIONS - # fast-jit and multi-tier-jit don't support GC - - running_mode: "fast-jit" - test_option: $GC_TEST_OPTIONS - - running_mode: "multi-tier-jit" - test_option: $GC_TEST_OPTIONS - # fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Memory64 - - running_mode: "fast-interp" - test_option: $MEMORY64_TEST_OPTIONS - - running_mode: "fast-jit" - test_option: $MEMORY64_TEST_OPTIONS - - running_mode: "jit" - test_option: $MEMORY64_TEST_OPTIONS - - running_mode: "multi-tier-jit" - test_option: $MEMORY64_TEST_OPTIONS - # aot, fast-interp, fast-jit, llvm-jit, multi-tier-jit don't support Multi Memory - - running_mode: "aot" - test_option: $MULTI_MEMORY_TEST_OPTIONS - - running_mode: "fast-interp" - test_option: $MULTI_MEMORY_TEST_OPTIONS - - running_mode: "fast-jit" - test_option: $MULTI_MEMORY_TEST_OPTIONS - - running_mode: "jit" - test_option: $MULTI_MEMORY_TEST_OPTIONS - - running_mode: "multi-tier-jit" - test_option: $MULTI_MEMORY_TEST_OPTIONS steps: - name: checkout @@ -767,123 +724,3 @@ jobs: eval $(opam env) ./test_wamr.sh ${{ env.X86_32_TARGET_TEST_OPTIONS }} ${{ matrix.test_option }} -t ${{ matrix.running_mode }} working-directory: ./tests/wamr-test-suites - - test-wamr-ide: - needs: - [ - build_iwasm - ] - runs-on: ubuntu-22.04 - env: - PYTHON_VERSION: '3.10' - PYTHON_UBUNTU_STANDALONE_BUILD: https://github.com/indygreg/python-build-standalone/releases/download/20230507/cpython-3.10.11+20230507-x86_64-unknown-linux-gnu-install_only.tar.gz - - steps: - - name: checkout - uses: actions/checkout@v4 - - - name: install dependencies - run: | - rustup target add wasm32-wasip1 - sudo apt update && sudo apt-get install -y lld ninja-build - npm install - working-directory: test-tools/wamr-ide/VSCode-Extension - - - name: code style check - run: | - npm install --save-dev prettier - npm run prettier-format-check - working-directory: test-tools/wamr-ide/VSCode-Extension - - - name: build iwasm with source debugging feature - run: | - mkdir build - cd build - cmake .. -DWAMR_BUILD_DEBUG_INTERP=1 -DWAMR_BUILD_REF_TYPES=1 - make - working-directory: product-mini/platforms/linux - - - name: Cache LLDB - id: cache-lldb - uses: actions/cache@v4 - env: - cache-name: cache-lldb-vscode - with: - path: test-tools/wamr-ide/VSCode-Extension/resource/debug/linux - key: ${{ env.cache-name }}-${{ hashFiles('build-scripts/lldb_wasm.patch') }}-${{ env.PYTHON_UBUNTU_STANDALONE_BUILD }} - - - if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }} - name: get stand-alone python ubuntu - run: | - wget ${{ env.PYTHON_UBUNTU_STANDALONE_BUILD }} -O python.tar.gz - tar -xvf python.tar.gz - working-directory: core/deps - - - if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }} - name: download llvm - run: | - wget https://github.com/llvm/llvm-project/archive/1f27fe6128769f00197925c3b8f6abb9d0e5cd2e.zip - unzip -q 1f27fe6128769f00197925c3b8f6abb9d0e5cd2e.zip - mv llvm-project-1f27fe6128769f00197925c3b8f6abb9d0e5cd2e llvm-project - working-directory: core/deps - - - if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }} - name: apply wamr patch - run: | - git init - git config user.email "action@github.com" - git config user.name "github action" - git apply ../../../build-scripts/lldb_wasm.patch - working-directory: core/deps/llvm-project - - - if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }} - name: build lldb ubuntu - run: | - echo "start to build lldb..." - mkdir -p wamr-lldb - cmake -S ./llvm -B build \ - -G Ninja \ - -DCMAKE_INSTALL_PREFIX=../wamr-lldb \ - -DCMAKE_BUILD_TYPE:STRING="Release" \ - -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ - -DLLVM_ENABLE_PROJECTS="clang;lldb" \ - -DLLVM_TARGETS_TO_BUILD:STRING="X86;WebAssembly" \ - -DLLVM_BUILD_BENCHMARKS:BOOL=OFF \ - -DLLVM_BUILD_DOCS:BOOL=OFF \ - -DLLVM_BUILD_EXAMPLES:BOOL=OFF \ - -DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF \ - -DLLVM_BUILD_TESTS:BOOL=OFF \ - -DLLVM_INCLUDE_BENCHMARKS:BOOL=OFF \ - -DLLVM_INCLUDE_DOCS:BOOL=OFF \ - -DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \ - -DLLVM_INCLUDE_TESTS:BOOL=OFF \ - -DLLVM_ENABLE_BINDINGS:BOOL=OFF \ - -DLLVM_ENABLE_LIBXML2:BOOL=ON \ - -DLLVM_ENABLE_LLD:BOOL=ON \ - -DLLDB_ENABLE_PYTHON:BOOL=ON \ - -DLLDB_EMBED_PYTHON_HOME=ON \ - -DLLDB_PYTHON_HOME=.. \ - -DLLDB_PYTHON_RELATIVE_PATH=lib/lldb-python \ - -DPython3_EXECUTABLE="$(pwd)/../python/bin/python${{ env.PYTHON_VERSION }}" - cmake --build build --target lldb install --parallel $(nproc) - working-directory: core/deps/llvm-project - - - if: ${{ steps.cache-lldb.outputs.cache-hit != 'true' }} - name: copy lldb to extension folder - run: | - mkdir -p bin - mkdir -p lib - cp ../../../../../../core/deps/llvm-project/lldb/tools/lldb-vscode/package.json ./ - cp -r ../../../../../../core/deps/llvm-project/lldb/tools/lldb-vscode/syntaxes/ ./ - cp ../../../../../../core/deps/llvm-project/build/bin/lldb* bin - cp ../../../../../../core/deps/llvm-project/build/lib/liblldb*.so lib - cp ../../../../../../core/deps/llvm-project/build/lib/liblldb*.so.* lib - cp -R ../../../../../../core/deps/llvm-project/build/lib/lldb-python lib - cp -R ../../../../../../core/deps/python/lib/python* lib - cp ../../../../../../core/deps/python/lib/libpython${{ env.PYTHON_VERSION }}.so.1.0 lib - working-directory: test-tools/wamr-ide/VSCode-Extension/resource/debug/linux - - - name: run tests - timeout-minutes: 5 - run: xvfb-run npm run test - working-directory: test-tools/wamr-ide/VSCode-Extension diff --git a/.github/workflows/compilation_on_sgx.yml b/.github/workflows/compilation_on_sgx.yml index 7577b0bda..ec27fd8ba 100644 --- a/.github/workflows/compilation_on_sgx.yml +++ b/.github/workflows/compilation_on_sgx.yml @@ -291,28 +291,6 @@ jobs: - name: run spec tests run: | - set +e source /opt/intel/sgxsdk/environment - attempts=0 - max_attempts=3 - - while [ $attempts -lt $max_attempts ]; do - ./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }} - exitcode="$?" - - if [ $exitcode -eq 0 ]; then - echo "Spec test passed" - exit 0 - elif [ $exitcode -ne 143 ]; then - echo "Spec test failed with error code $exitcode" - exit 1 - fi - - echo "$exitcode is a known GitHub-hosted runner issue" - echo "::notice::Re-running the spec test due to error code 143" - attempts=$((attempts + 1)) - done - - echo "::notice::Report an error with code 143 in SGX CI after $max_attempts attempts" - exit 143 + ./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }} working-directory: ./tests/wamr-test-suites diff --git a/.github/workflows/compilation_on_windows.yml b/.github/workflows/compilation_on_windows.yml index 21f961cb9..7cee2aa40 100644 --- a/.github/workflows/compilation_on_windows.yml +++ b/.github/workflows/compilation_on_windows.yml @@ -172,6 +172,10 @@ jobs: run: ./build.sh working-directory: ./core/iwasm/libraries/lib-wasi-threads/test/ + - name: install wget + shell: bash + run: choco install wget + - name: run tests shell: bash timeout-minutes: 20 diff --git a/.github/workflows/nightly_run.yml b/.github/workflows/nightly_run.yml index f261c880b..51a80705b 100644 --- a/.github/workflows/nightly_run.yml +++ b/.github/workflows/nightly_run.yml @@ -36,15 +36,14 @@ env: LLVM_EAGER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=0 -DWAMR_BUILD_FAST_JIT=0 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0" MULTI_TIER_JIT_BUILD_OPTIONS: "-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_FAST_INTERP=0 -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_JIT=1 -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=1" # For Spec Test - # FIXME: use binary release(adding -b) instead of building from source after upgrading to 22.04 - DEFAULT_TEST_OPTIONS: "-s spec -P" - MULTI_MODULES_TEST_OPTIONS: "-s spec -M -P" - SIMD_TEST_OPTIONS: "-s spec -S -P" - EXTENDED_CONST_EXPR_TEST_OPTIONS: "-s spec -N -P" - THREADS_TEST_OPTIONS: "-s spec -p -P" - X86_32_TARGET_TEST_OPTIONS: "-m x86_32 -P" + DEFAULT_TEST_OPTIONS: "-s spec -b -P" + MULTI_MODULES_TEST_OPTIONS: "-s spec -b -P -M" + SIMD_TEST_OPTIONS: "-s spec -b -P -S" + THREADS_TEST_OPTIONS: "-s spec -b -P -p" + X86_32_TARGET_TEST_OPTIONS: "-m x86_32" WASI_TEST_OPTIONS: "-s wasi_certification -w" - + EXTENDED_CONST_EXPR_TEST_OPTIONS: "-s spec -N -P" + permissions: contents: read @@ -620,24 +619,11 @@ jobs: sanitizer: tsan - running_mode: "multi-tier-jit" sanitizer: tsan - # classic-interp and fast-interp don't support simd - - running_mode: "classic-interp" - test_option: $SIMD_TEST_OPTIONS + # simd128.h brings ubsan errors + # like: negation of XXXcannot be represented in type 'long int'; + # cast to an unsigned type to negate this value to itself - running_mode: "fast-interp" - test_option: $SIMD_TEST_OPTIONS - # llvm jit doesn't support multi module - - running_mode: "jit" - test_option: $MULTI_MODULES_TEST_OPTIONS - # fast-jit doesn't support multi module, simd - - running_mode: "fast-jit" - test_option: $MULTI_MODULES_TEST_OPTIONS - - running_mode: "fast-jit" - test_option: $SIMD_TEST_OPTIONS - # multi-tier-jit doesn't support multi module, simd - - running_mode: "multi-tier-jit" - test_option: $MULTI_MODULES_TEST_OPTIONS - - running_mode: "multi-tier-jit" - test_option: $SIMD_TEST_OPTIONS + sanitizer: ubsan steps: - name: checkout uses: actions/checkout@v4 diff --git a/.github/workflows/supply_chain.yml b/.github/workflows/supply_chain.yml index d9e10a71c..827b300f1 100644 --- a/.github/workflows/supply_chain.yml +++ b/.github/workflows/supply_chain.yml @@ -60,6 +60,6 @@ jobs: # Upload the results to GitHub's code scanning dashboard. - name: "Upload to code-scanning" - uses: github/codeql-action/upload-sarif@57eebf61a2246ab60a0c2f5a85766db783ad3553 + uses: github/codeql-action/upload-sarif@2847b7f7ab9f48fc49eca90a53fff6007285f399 with: sarif_file: results.sarif diff --git a/.github/workflows/wamr_wasi_extensions.yml b/.github/workflows/wamr_wasi_extensions.yml new file mode 100644 index 000000000..e9d10fe93 --- /dev/null +++ b/.github/workflows/wamr_wasi_extensions.yml @@ -0,0 +1,57 @@ +# Copyright (C) 2019 Intel Corporation. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +name: wamr_wasi_extensions + +on: + pull_request: + types: + - opened + - synchronize + paths: + - ".github/workflows/wamr_wasi_extensions.yml" + - "wamr_wasi_extensios/**" + - "core/iwasm/libraries/wasi-nn/include/**" + - "core/iwasm/libraries/lib-socket/**" + # allow to be triggered manually + workflow_dispatch: + +# Cancel any in-flight jobs for the same PR/branch so there's only one active +# at a time +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + build_wamr_wasi_extensions: + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-22.04, macos-13, macos-14] + steps: + - name: checkout + uses: actions/checkout@v4 + + - name: install-wasi-sdk-wabt + uses: ./.github/actions/install-wasi-sdk-wabt + with: + os: ${{ matrix.os }} + + - name: Build wamr-wasi-extensions + run: | + mkdir dist + ./build_libs.sh $(pwd)/dist/wamr-wasi-extensions + working-directory: wamr-wasi-extensions + + - name: Build wamr-wasi-extensions samples + run: | + ./build_samples.sh $(pwd)/dist/wamr-wasi-extensions + working-directory: wamr-wasi-extensions + + - name: Upload artifacts + if: matrix.os == 'macos-14' + uses: actions/upload-artifact@v4 + with: + name: wamr-wasi-extensions + path: wamr-wasi-extensions/dist + retention-days: 10 diff --git a/CMakeLists.txt b/CMakeLists.txt index 88a1642b8..4b28fa89c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -99,9 +99,9 @@ if (NOT DEFINED WAMR_BUILD_LIB_WASI_THREADS) set (WAMR_BUILD_LIB_WASI_THREADS 0) endif () -if (NOT DEFINED WAMR_ENABLE_COPY_CALLSTACK) +if (NOT DEFINED WAMR_BUILD_COPY_CALL_STACK) # Disable copy callstack by default - set (WAMR_ENABLE_COPY_CALLSTACK 0) + set (WAMR_BUILD_COPY_CALL_STACK 0) endif() if (NOT DEFINED WAMR_BUILD_MINI_LOADER) diff --git a/RELEASE_NOTES.md b/RELEASE_NOTES.md index 8b3cfec28..167da4703 100644 --- a/RELEASE_NOTES.md +++ b/RELEASE_NOTES.md @@ -497,7 +497,7 @@ - wasm loader: Fix handling if block without op else (#3404) - ref-types: Correct default value for function local variables (#3397) - aot compiler: Fix the length type passed to aot_memmove/aot_memset (#3378) -- Fix loader and mini-loader select potiential error (#3374) +- Fix loader and mini-loader select potential error (#3374) - Fix aot debugger compilation error on windows (#3370) - A few native stack detection fixes for macOS/arm64 (#3368) - Fix ESP32-S3 compiling error (#3359) diff --git a/build-scripts/config_common.cmake b/build-scripts/config_common.cmake index 916499355..d7a211a13 100644 --- a/build-scripts/config_common.cmake +++ b/build-scripts/config_common.cmake @@ -338,15 +338,10 @@ if (WAMR_BUILD_SHARED_HEAP EQUAL 1) add_definitions (-DWASM_ENABLE_SHARED_HEAP=1) message (" Shared heap enabled") endif() - -if (WAMR_ENABLE_COPY_CALLSTACK EQUAL 1) - add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=1) +if (WAMR_BUILD_COPY_CALL_STACK EQUAL 1) + add_definitions (-DWASM_ENABLE_COPY_CALL_STACK=1) message(" Copy callstack enabled") -else () - add_definitions (-DWAMR_ENABLE_COPY_CALLSTACK=0) - message(" Copy callstack disabled") endif() - if (WAMR_BUILD_MEMORY64 EQUAL 1) # if native is 32-bit or cross-compiled to 32-bit if (NOT WAMR_BUILD_TARGET MATCHES ".*64.*") @@ -543,6 +538,9 @@ if (WAMR_BUILD_WASI_NN EQUAL 1) if (DEFINED WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH) add_definitions (-DWASM_WASI_NN_EXTERNAL_DELEGATE_PATH="${WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH}") endif () + if (NOT DEFINED WAMR_BUILD_WASI_EPHEMERAL_NN) + set(WAMR_BUILD_WASI_EPHEMERAL_NN 1) + endif() if (WAMR_BUILD_WASI_EPHEMERAL_NN EQUAL 1) message (" WASI-NN: use 'wasi_ephemeral_nn' instead of 'wasi-nn'") add_definitions (-DWASM_ENABLE_WASI_EPHEMERAL_NN=1) @@ -682,10 +680,8 @@ endif () if (WAMR_BUILD_EXTENDED_CONST_EXPR EQUAL 1) message (" Extended constant expression enabled") add_definitions(-DWASM_ENABLE_EXTENDED_CONST_EXPR=1) -else() - message (" Extended constant expression disabled") - add_definitions(-DWASM_ENABLE_EXTENDED_CONST_EXPR=0) endif () + ######################################## # Show Phase4 Wasm proposals status. ######################################## diff --git a/build-scripts/requirements.txt b/build-scripts/requirements.txt index ef487e06e..480d0c4bb 100644 --- a/build-scripts/requirements.txt +++ b/build-scripts/requirements.txt @@ -1 +1 @@ -requests==2.32.3 \ No newline at end of file +requests==2.32.4 \ No newline at end of file diff --git a/build-scripts/runtime_lib.cmake b/build-scripts/runtime_lib.cmake index e538b5d91..f7639f6a6 100644 --- a/build-scripts/runtime_lib.cmake +++ b/build-scripts/runtime_lib.cmake @@ -106,6 +106,7 @@ endif () if (WAMR_BUILD_WASI_NN EQUAL 1) include (${IWASM_DIR}/libraries/wasi-nn/cmake/wasi_nn.cmake) + set (WAMR_BUILD_MODULE_INST_CONTEXT 1) endif () if (WAMR_BUILD_LIB_PTHREAD EQUAL 1) diff --git a/ci/coding_guidelines_check.py b/ci/coding_guidelines_check.py index 43c366259..131bca5b6 100644 --- a/ci/coding_guidelines_check.py +++ b/ci/coding_guidelines_check.py @@ -4,7 +4,6 @@ # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # import argparse -import re from pathlib import Path import re import shlex @@ -39,7 +38,7 @@ INVALID_FILE_NAME_SEGMENT = r"([a-zA-Z0-9]+\-[a-zA-Z0-9]+)" def locate_command(command: str) -> bool: if not shutil.which(command): - print(f"Command '{command}'' not found") + print(f"Command '{command}' not found") return False return True diff --git a/core/config.h b/core/config.h index cf8acae65..38af3b029 100644 --- a/core/config.h +++ b/core/config.h @@ -193,8 +193,8 @@ #error "Heap aux stack allocation must be enabled for WASI threads" #endif -#ifndef WAMR_ENABLE_COPY_CALLSTACK -#define WAMR_ENABLE_COPY_CALLSTACK 0 +#ifndef WASM_ENABLE_COPY_CALL_STACK +#define WASM_ENABLE_COPY_CALL_STACK 0 #endif #ifndef WASM_ENABLE_BASE_LIB diff --git a/core/iwasm/aot/aot_loader.c b/core/iwasm/aot/aot_loader.c index 9f387034a..358ec5d1d 100644 --- a/core/iwasm/aot/aot_loader.c +++ b/core/iwasm/aot/aot_loader.c @@ -1332,6 +1332,13 @@ load_init_expr(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module, read_uint32(buf, buf_end, type_idx); read_uint32(buf, buf_end, length); + if (type_idx >= module->type_count + || !wasm_type_is_array_type(module->types[type_idx])) { + set_error_buf(error_buf, error_buf_size, + "invalid or non-array type index."); + goto fail; + } + if (init_expr_type == INIT_EXPR_TYPE_ARRAY_NEW_DEFAULT) { expr->u.unary.v.array_new_default.type_index = type_idx; expr->u.unary.v.array_new_default.length = length; @@ -1778,6 +1785,12 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module, (void)u8; read_uint32(buf, buf_end, j); +#if WASM_ENABLE_AOT_VALIDATOR != 0 + if (j >= module->type_count) { + set_error_buf(error_buf, error_buf_size, "invalid type index"); + goto fail; + } +#endif if (module->types[j]->ref_count == UINT16_MAX) { set_error_buf(error_buf, error_buf_size, "wasm type's ref count too large"); @@ -2041,6 +2054,13 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module, AOTType *cur_type = module->types[j]; parent_type_idx = cur_type->parent_type_idx; if (parent_type_idx != (uint32)-1) { /* has parent */ +#if WASM_ENABLE_AOT_VALIDATOR != 0 + if (parent_type_idx >= module->type_count) { + set_error_buf(error_buf, error_buf_size, + "invalid parent type index"); + goto fail; + } +#endif AOTType *parent_type = module->types[parent_type_idx]; module->types[j]->parent_type = parent_type; @@ -2064,6 +2084,13 @@ load_types(const uint8 **p_buf, const uint8 *buf_end, AOTModule *module, AOTType *cur_type = module->types[j]; parent_type_idx = cur_type->parent_type_idx; if (parent_type_idx != (uint32)-1) { /* has parent */ +#if WASM_ENABLE_AOT_VALIDATOR != 0 + if (parent_type_idx >= module->type_count) { + set_error_buf(error_buf, error_buf_size, + "invalid parent type index"); + goto fail; + } +#endif AOTType *parent_type = module->types[parent_type_idx]; /* subtyping has been checked during compilation */ bh_assert(wasm_type_is_subtype_of( @@ -3378,7 +3405,7 @@ do_data_relocation(AOTModule *module, AOTRelocationGroup *group, uint8 *data_addr; uint32 data_size = 0, i; AOTRelocation *relocation = group->relocations; - void *symbol_addr; + void *symbol_addr = NULL; char *symbol, *data_section_name; if (!strncmp(group->section_name, ".rela.", 6)) { diff --git a/core/iwasm/aot/aot_runtime.c b/core/iwasm/aot/aot_runtime.c index a4268dd1a..4b94791ed 100644 --- a/core/iwasm/aot/aot_runtime.c +++ b/core/iwasm/aot/aot_runtime.c @@ -4243,9 +4243,9 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame) } #endif /* end of WASM_ENABLE_AOT_STACK_FRAME != 0 */ -#if WAMR_ENABLE_COPY_CALLSTACK != 0 +#if WASM_ENABLE_COPY_CALL_STACK != 0 uint32 -aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer, +aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer, const uint32 length, const uint32 skip_n, char *error_buf, uint32 error_buf_size) { @@ -4299,7 +4299,7 @@ aot_copy_callstack_tiny_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer, } uint32 -aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer, +aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, WASMCApiFrame *buffer, const uint32 length, const uint32 skip_n, char *error_buf, uint32_t error_buf_size) { @@ -4349,7 +4349,7 @@ aot_copy_callstack_standard_frame(WASMExecEnv *exec_env, wasm_frame_t *buffer, } uint32 -aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer, +aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer, const uint32 length, const uint32 skip_n, char *error_buf, uint32_t error_buf_size) { @@ -4371,7 +4371,7 @@ aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer, error_buf, error_buf_size); } } -#endif // WAMR_ENABLE_COPY_CALLSTACK +#endif // WASM_ENABLE_COPY_CALL_STACK #if WASM_ENABLE_DUMP_CALL_STACK != 0 bool @@ -4983,8 +4983,8 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf, } prof_header.magic = 0xFF6C70726F667281LL; - /* Version 8 */ - prof_header.version = 0x0000000000000008LL; + /* Version 9 */ + prof_header.version = 0x0000000000000009LL; /* with VARIANT_MASK_IR_PROF (IR Instrumentation) */ prof_header.version |= 0x1ULL << 56; /* with VARIANT_MASK_MEMPROF (Memory Profile) */ @@ -4993,14 +4993,19 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf, prof_header.num_prof_counters = num_prof_counters; prof_header.names_size = prof_names_size; prof_header.value_kind_last = 1; + /* __llvm_prf_bits won't be used in PGO, set dummy value here */ + prof_header.num_prof_bitmaps = 0; + prof_header.bitmap_delta = 0; if (!is_little_endian()) { aot_exchange_uint64((uint8 *)&prof_header.magic); aot_exchange_uint64((uint8 *)&prof_header.version); aot_exchange_uint64((uint8 *)&prof_header.num_prof_data); aot_exchange_uint64((uint8 *)&prof_header.num_prof_counters); + aot_exchange_uint64((uint8 *)&prof_header.num_prof_bitmaps); aot_exchange_uint64((uint8 *)&prof_header.names_size); aot_exchange_uint64((uint8 *)&prof_header.counters_delta); + aot_exchange_uint64((uint8 *)&prof_header.bitmap_delta); aot_exchange_uint64((uint8 *)&prof_header.value_kind_last); } @@ -5018,19 +5023,23 @@ aot_dump_pgo_prof_data_to_buf(AOTModuleInstance *module_inst, char *buf, prof_data_64->func_md5 = prof_data->func_md5; prof_data_64->func_hash = prof_data->func_hash; prof_data_64->offset_counters = prof_data->offset_counters; + prof_data_64->offset_bitmaps = prof_data->offset_bitmaps; prof_data_64->func_ptr = prof_data->func_ptr; prof_data_64->values = (uint64)(uintptr_t)prof_data->values; prof_data_64->num_counters = prof_data->num_counters; + /* __llvm_prf_bits won't be used in PGO, set dummy value here */ + prof_data_64->num_bitmaps = 0; prof_data_64->num_value_sites[0] = prof_data->num_value_sites[0]; prof_data_64->num_value_sites[1] = prof_data->num_value_sites[1]; if (!is_little_endian()) { aot_exchange_uint64((uint8 *)&prof_data_64->func_hash); aot_exchange_uint64((uint8 *)&prof_data_64->offset_counters); - aot_exchange_uint64((uint8 *)&prof_data_64->offset_counters); + aot_exchange_uint64((uint8 *)&prof_data_64->offset_bitmaps); aot_exchange_uint64((uint8 *)&prof_data_64->func_ptr); aot_exchange_uint64((uint8 *)&prof_data_64->values); aot_exchange_uint32((uint8 *)&prof_data_64->num_counters); + aot_exchange_uint32((uint8 *)&prof_data_64->num_bitmaps); aot_exchange_uint16((uint8 *)&prof_data_64->num_value_sites[0]); aot_exchange_uint16((uint8 *)&prof_data_64->num_value_sites[1]); } diff --git a/core/iwasm/aot/aot_runtime.h b/core/iwasm/aot/aot_runtime.h index 5be51c05a..e957f3959 100644 --- a/core/iwasm/aot/aot_runtime.h +++ b/core/iwasm/aot/aot_runtime.h @@ -437,6 +437,9 @@ typedef struct AOTFrame { } AOTFrame; #if WASM_ENABLE_STATIC_PGO != 0 +/* The bitmaps fields in LLVMProfileRawHeader, LLVMProfileData, + * LLVMProfileData_64 all dummy fields, it's used in MC/DC code coverage + * instead of PGO. See https://llvm.org/docs/InstrProfileFormat.html#bitmap */ typedef struct LLVMProfileRawHeader { uint64 magic; uint64 version; @@ -445,8 +448,11 @@ typedef struct LLVMProfileRawHeader { uint64 padding_bytes_before_counters; uint64 num_prof_counters; uint64 padding_bytes_after_counters; + uint64 num_prof_bitmaps; + uint64 padding_bytes_after_bitmaps; uint64 names_size; uint64 counters_delta; + uint64 bitmap_delta; uint64 names_delta; uint64 value_kind_last; } LLVMProfileRawHeader; @@ -464,10 +470,12 @@ typedef struct LLVMProfileData { uint64 func_md5; uint64 func_hash; uint64 offset_counters; + uint64 offset_bitmaps; uintptr_t func_ptr; ValueProfNode **values; uint32 num_counters; uint16 num_value_sites[2]; + uint32 num_bitmaps; } LLVMProfileData; /* The profiling data for writing to the output file, the width of @@ -477,10 +485,12 @@ typedef struct LLVMProfileData_64 { uint64 func_md5; uint64 func_hash; uint64 offset_counters; + uint64 offset_bitmaps; uint64 func_ptr; uint64 values; uint32 num_counters; uint16 num_value_sites[2]; + uint32 num_bitmaps; } LLVMProfileData_64; #endif /* end of WASM_ENABLE_STATIC_PGO != 0 */ @@ -777,12 +787,12 @@ aot_frame_update_profile_info(WASMExecEnv *exec_env, bool alloc_frame); bool aot_create_call_stack(struct WASMExecEnv *exec_env); -#if WAMR_ENABLE_COPY_CALLSTACK != 0 +#if WASM_ENABLE_COPY_CALL_STACK != 0 uint32 -aot_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer, +aot_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer, const uint32 length, const uint32 skip_n, char *error_buf, uint32_t error_buf_size); -#endif // WAMR_ENABLE_COPY_CALLSTACK +#endif // WASM_ENABLE_COPY_CALL_STACK /** * @brief Dump wasm call stack or get the size diff --git a/core/iwasm/common/gc/gc_type.c b/core/iwasm/common/gc/gc_type.c index bafa3c86c..8ae12f642 100644 --- a/core/iwasm/common/gc/gc_type.c +++ b/core/iwasm/common/gc/gc_type.c @@ -1145,7 +1145,7 @@ wasm_reftype_is_subtype_of(uint8 type1, const WASMRefType *ref_type1, return true; else { int32 heap_type = ref_type1->ref_ht_common.heap_type; - // We dont care whether type2 is nullable or not. So + // We don't care whether type2 is nullable or not. So // we normalize it into its related one-byte type. if (type2 == REF_TYPE_HT_NULLABLE || type2 == REF_TYPE_HT_NON_NULLABLE) { diff --git a/core/iwasm/common/wasm_runtime_common.c b/core/iwasm/common/wasm_runtime_common.c index dcee0aeaf..e1d3542b9 100644 --- a/core/iwasm/common/wasm_runtime_common.c +++ b/core/iwasm/common/wasm_runtime_common.c @@ -1743,9 +1743,9 @@ wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env) wasm_exec_env_destroy(exec_env); } -#if WAMR_ENABLE_COPY_CALLSTACK != 0 +#if WASM_ENABLE_COPY_CALL_STACK != 0 uint32 -wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer, +wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer, const uint32 length, const uint32 skip_n, char *error_buf, uint32_t error_buf_size) { @@ -1780,7 +1780,7 @@ wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer, strncpy(error_buf, err_msg, error_buf_size); return 0; } -#endif // WAMR_ENABLE_COPY_CALLSTACK +#endif // WASM_ENABLE_COPY_CALL_STACK bool wasm_runtime_init_thread_env(void) diff --git a/core/iwasm/common/wasm_runtime_common.h b/core/iwasm/common/wasm_runtime_common.h index 64a6cd793..ad22ea10b 100644 --- a/core/iwasm/common/wasm_runtime_common.h +++ b/core/iwasm/common/wasm_runtime_common.h @@ -758,12 +758,12 @@ wasm_runtime_create_exec_env(WASMModuleInstanceCommon *module_inst, WASM_RUNTIME_API_EXTERN void wasm_runtime_destroy_exec_env(WASMExecEnv *exec_env); -#if WAMR_ENABLE_COPY_CALLSTACK != 0 +#if WASM_ENABLE_COPY_CALL_STACK != 0 WASM_RUNTIME_API_EXTERN uint32_t -wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer, +wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer, const uint32 length, const uint32 skip_n, char *error_buf, uint32 error_buf_size); -#endif // WAMR_ENABLE_COPY_CALLSTACK +#endif // WASM_ENABLE_COPY_CALL_STACK /* See wasm_export.h for description */ WASM_RUNTIME_API_EXTERN WASMModuleInstanceCommon * diff --git a/core/iwasm/compilation/aot_emit_aot_file.c b/core/iwasm/compilation/aot_emit_aot_file.c index 208498fcb..fec1c34cb 100644 --- a/core/iwasm/compilation/aot_emit_aot_file.c +++ b/core/iwasm/compilation/aot_emit_aot_file.c @@ -3419,6 +3419,12 @@ aot_resolve_object_data_sections(AOTObjectData *obj_data) bh_memcpy_s(data_section->name, size, buf, size); data_section->is_name_allocated = true; } + else if (obj_data->comp_ctx->enable_llvm_pgo + && !strcmp(name, "__llvm_prf_bits")) { + LOG_WARNING("__llvm_prf_bits section is not supported and " + "shouldn't be used in PGO."); + return false; + } if (obj_data->comp_ctx->enable_llvm_pgo && !strcmp(name, "__llvm_prf_names")) { diff --git a/core/iwasm/compilation/aot_llvm.c b/core/iwasm/compilation/aot_llvm.c index 4f6878148..a36ffb7ee 100644 --- a/core/iwasm/compilation/aot_llvm.c +++ b/core/iwasm/compilation/aot_llvm.c @@ -3207,6 +3207,21 @@ aot_create_comp_context(const AOTCompData *comp_data, aot_comp_option_t option) #if WASM_ENABLE_WAMR_COMPILER != 0 WASMModule *wasm_module = (WASMModule *)comp_data->wasm_module; + bool is_memory64 = false; + + /* TODO: multi-memories for now assuming the memory64 flag of a memory is + * consistent across multi-memories */ + if (wasm_module->import_memory_count > 0) + is_memory64 = !!(wasm_module->import_memories[0].u.memory.mem_type.flags + & MEMORY64_FLAG); + else if (wasm_module->memory_count > 0) + is_memory64 = !!(wasm_module->memories[0].flags & MEMORY64_FLAG); + + if (!(option->bounds_checks == 1 || option->bounds_checks == 0) + && is_memory64) { + /* For memory64, the boundary check default value is true */ + comp_ctx->enable_bound_check = true; + } /* Return error if SIMD is disabled by command line but SIMD instructions * are used */ @@ -3987,7 +4002,7 @@ aot_get_func_from_table(const AOTCompContext *comp_ctx, LLVMValueRef base, if (!(func = LLVMBuildBitCast(comp_ctx->builder, func, func_type, "func"))) { - aot_set_last_error("cast function fialed."); + aot_set_last_error("cast function failed."); goto fail; } @@ -4056,7 +4071,7 @@ aot_load_const_from_table(AOTCompContext *comp_ctx, LLVMValueRef base, if (!(const_addr = LLVMBuildBitCast(comp_ctx->builder, const_addr, const_ptr_type, "const_addr"))) { - aot_set_last_error("cast const fialed."); + aot_set_last_error("cast const failed."); return NULL; } diff --git a/core/iwasm/include/wasm_export.h b/core/iwasm/include/wasm_export.h index b4ab34bea..ca96f5824 100644 --- a/core/iwasm/include/wasm_export.h +++ b/core/iwasm/include/wasm_export.h @@ -139,8 +139,6 @@ typedef struct wasm_frame_t { uint32_t *lp; } WASMCApiFrame; -typedef WASMCApiFrame wasm_frame_t; - /* WASM section */ typedef struct wasm_section_t { struct wasm_section_t *next; @@ -904,7 +902,7 @@ wasm_runtime_destroy_exec_env(wasm_exec_env_t exec_env); * @return number of copied frames */ WASM_RUNTIME_API_EXTERN uint32_t -wasm_copy_callstack(const wasm_exec_env_t exec_env, wasm_frame_t *buffer, +wasm_copy_callstack(const wasm_exec_env_t exec_env, WASMCApiFrame *buffer, const uint32_t length, const uint32_t skip_n, char *error_buf, uint32_t error_buf_size); diff --git a/core/iwasm/interpreter/wasm_interp_classic.c b/core/iwasm/interpreter/wasm_interp_classic.c index 17e1085c2..f7de04806 100644 --- a/core/iwasm/interpreter/wasm_interp_classic.c +++ b/core/iwasm/interpreter/wasm_interp_classic.c @@ -4088,7 +4088,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module, case WASM_OP_STRING_ENCODE_LOSSY_UTF8_ARRAY: case WASM_OP_STRING_ENCODE_WTF8_ARRAY: { - uint32 start, array_len, count; + uint32 start, array_len; int32 bytes_written; EncodingFlag flag = WTF8; WASMArrayType *array_type; diff --git a/core/iwasm/interpreter/wasm_loader.c b/core/iwasm/interpreter/wasm_loader.c index 2c6234d89..12e68c06e 100644 --- a/core/iwasm/interpreter/wasm_loader.c +++ b/core/iwasm/interpreter/wasm_loader.c @@ -1001,10 +1001,10 @@ load_init_expr(WASMModule *module, const uint8 **p_buf, const uint8 *buf_end, { uint8 type1; +#if WASM_ENABLE_GC == 0 CHECK_BUF(p, p_end, 1); type1 = read_uint8(p); -#if WASM_ENABLE_GC == 0 cur_value.ref_index = NULL_REF; if (!push_const_expr_stack(&const_expr_ctx, flag, type1, &cur_value, @@ -1014,9 +1014,14 @@ load_init_expr(WASMModule *module, const uint8 **p_buf, const uint8 *buf_end, error_buf, error_buf_size)) goto fail; #else + int32 heap_type; + read_leb_int32(p, p_end, heap_type); + type1 = (uint8)((int32)0x80 + heap_type); + cur_value.gc_obj = NULL_REF; if (!is_byte_a_type(type1) + || !wasm_is_valid_heap_type(heap_type) || wasm_is_type_multi_byte_type(type1)) { p--; read_leb_uint32(p, p_end, type_idx); @@ -2267,9 +2272,9 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module, "recursive type count too large"); return false; } - module->type_count += rec_count - 1; new_total_size = - sizeof(WASMFuncType *) * (uint64)module->type_count; + sizeof(WASMFuncType *) + * (uint64)(module->type_count + rec_count - 1); if (new_total_size > UINT32_MAX) { set_error_buf(error_buf, error_buf_size, "allocate memory failed"); @@ -2277,6 +2282,7 @@ load_type_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module, } MEM_REALLOC(module->types, (uint32)total_size, (uint32)new_total_size); + module->type_count += rec_count - 1; total_size = new_total_size; } @@ -2813,7 +2819,8 @@ load_table_import(const uint8 **p_buf, const uint8 *buf_end, error_buf_size)) { return false; } - if (wasm_is_reftype_htref_non_nullable(ref_type.ref_type)) { + if (!wasm_is_type_reftype(ref_type.ref_type) + || wasm_is_reftype_htref_non_nullable(ref_type.ref_type)) { set_error_buf(error_buf, error_buf_size, "type mismatch"); return false; } @@ -3339,6 +3346,15 @@ load_table(const uint8 **p_buf, const uint8 *buf_end, WASMModule *module, error_buf_size)) { return false; } + /* + * TODO: add this validator + * `wasm_is_reftype_htref_non_nullable(ref_type.ref_type)` + * after sync up with the latest GC spec + */ + if (!wasm_is_type_reftype(ref_type.ref_type)) { + set_error_buf(error_buf, error_buf_size, "type mismatch"); + return false; + } table->table_type.elem_type = ref_type.ref_type; if (need_ref_type_map) { if (!(table->table_type.elem_ref_type = @@ -3566,7 +3582,8 @@ load_import_section(const uint8 *buf, const uint8 *buf_end, WASMModule *module, /* valtype */ CHECK_BUF(p, p_end, 1); global_type = read_uint8(p); - if (wasm_is_reftype_htref_nullable(global_type)) { + if (wasm_is_reftype_htref_nullable(global_type) + || wasm_is_reftype_htref_non_nullable(global_type)) { int32 heap_type; read_leb_int32(p, p_end, heap_type); (void)heap_type; @@ -3927,7 +3944,7 @@ load_function_section(const uint8 *buf, const uint8 *buf_end, * we shall make a copy of code body [p_code, p_code + code_size] * when we are worrying about inappropriate releasing behaviour. * all code bodies are actually in a buffer which user allocates in - * his embedding environment and we don't have power on them. + * their embedding environment and we don't have power over them. * it will be like: * code_body_cp = malloc(code_size); * memcpy(code_body_cp, p_code, code_size); @@ -15253,8 +15270,6 @@ re_scan: case WASM_OP_STRING_NEW_LOSSY_UTF8: case WASM_OP_STRING_NEW_WTF8: { - uint32 memidx; - #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 func->has_memory_operations = true; #endif @@ -15266,7 +15281,6 @@ re_scan: POP_I32(); POP_I32(); PUSH_REF(REF_TYPE_STRINGREF); - (void)memidx; break; } case WASM_OP_STRING_CONST: @@ -15294,8 +15308,6 @@ re_scan: case WASM_OP_STRING_ENCODE_LOSSY_UTF8: case WASM_OP_STRING_ENCODE_WTF8: { - uint32 memidx; - #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 func->has_memory_operations = true; #endif @@ -15307,7 +15319,6 @@ re_scan: POP_I32(); POP_STRINGREF(); PUSH_I32(); - (void)memidx; break; } case WASM_OP_STRING_CONCAT: @@ -15348,8 +15359,6 @@ re_scan: case WASM_OP_STRINGVIEW_WTF8_ENCODE_LOSSY_UTF8: case WASM_OP_STRINGVIEW_WTF8_ENCODE_WTF8: { - uint32 memidx; - #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 func->has_memory_operations = true; #endif @@ -15364,7 +15373,6 @@ re_scan: POP_REF(REF_TYPE_STRINGVIEWWTF8); PUSH_I32(); PUSH_I32(); - (void)memidx; break; } case WASM_OP_STRINGVIEW_WTF8_SLICE: @@ -15396,8 +15404,6 @@ re_scan: } case WASM_OP_STRINGVIEW_WTF16_ENCODE: { - uint32 memidx; - #if WASM_ENABLE_JIT != 0 || WASM_ENABLE_WAMR_COMPILER != 0 func->has_memory_operations = true; #endif @@ -15411,7 +15417,6 @@ re_scan: POP_I32(); POP_REF(REF_TYPE_STRINGVIEWWTF16); PUSH_I32(); - (void)memidx; break; } case WASM_OP_STRINGVIEW_WTF16_SLICE: diff --git a/core/iwasm/interpreter/wasm_mini_loader.c b/core/iwasm/interpreter/wasm_mini_loader.c index 094ba4d1f..771538a14 100644 --- a/core/iwasm/interpreter/wasm_mini_loader.c +++ b/core/iwasm/interpreter/wasm_mini_loader.c @@ -1395,7 +1395,7 @@ load_function_section(const uint8 *buf, const uint8 *buf_end, * we shall make a copy of code body [p_code, p_code + code_size] * when we are worrying about inappropriate releasing behaviour. * all code bodies are actually in a buffer which user allocates in - * his embedding environment and we don't have power on them. + * their embedding environment and we don't have power over them. * it will be like: * code_body_cp = malloc(code_size); * memcpy(code_body_cp, p_code, code_size); diff --git a/core/iwasm/interpreter/wasm_runtime.c b/core/iwasm/interpreter/wasm_runtime.c index 8045d35e3..2b7b11f90 100644 --- a/core/iwasm/interpreter/wasm_runtime.c +++ b/core/iwasm/interpreter/wasm_runtime.c @@ -2751,7 +2751,7 @@ wasm_instantiate(WASMModule *module, WASMModuleInstance *parent, } STORE_PTR((void **)global_data, func_obj); global_data += sizeof(void *); - /* Also update the inital_value since other globals may + /* Also update the initial_value since other globals may * refer to this */ global->initial_value.gc_obj = (wasm_obj_t)func_obj; break; @@ -4256,9 +4256,9 @@ wasm_get_module_inst_mem_consumption(const WASMModuleInstance *module_inst, #endif /* end of (WASM_ENABLE_MEMORY_PROFILING != 0) \ || (WASM_ENABLE_MEMORY_TRACING != 0) */ -#if WAMR_ENABLE_COPY_CALLSTACK != 0 +#if WASM_ENABLE_COPY_CALL_STACK != 0 uint32 -wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer, +wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer, uint32 length, uint32 skip_n, char *error_buf, uint32_t error_buf_size) { @@ -4303,7 +4303,7 @@ wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer, } return count >= skip_n ? count - skip_n : 0; } -#endif // WAMR_ENABLE_COPY_CALLSTACK +#endif // WASM_ENABLE_COPY_CALL_STACK #if WASM_ENABLE_DUMP_CALL_STACK != 0 bool diff --git a/core/iwasm/interpreter/wasm_runtime.h b/core/iwasm/interpreter/wasm_runtime.h index 8d38c8831..3d91d8b60 100644 --- a/core/iwasm/interpreter/wasm_runtime.h +++ b/core/iwasm/interpreter/wasm_runtime.h @@ -731,12 +731,12 @@ wasm_get_table_inst(const WASMModuleInstance *module_inst, uint32 tbl_idx) #if WASM_ENABLE_DUMP_CALL_STACK != 0 -#if WAMR_ENABLE_COPY_CALLSTACK != 0 +#if WASM_ENABLE_COPY_CALL_STACK != 0 uint32 -wasm_interp_copy_callstack(WASMExecEnv *exec_env, wasm_frame_t *buffer, +wasm_interp_copy_callstack(WASMExecEnv *exec_env, WASMCApiFrame *buffer, uint32 length, uint32 skip_n, char *error_buf, uint32_t error_buf_size); -#endif // WAMR_ENABLE_COPY_CALLSTACK +#endif // WASM_ENABLE_COPY_CALL_STACK bool wasm_interp_create_call_stack(struct WASMExecEnv *exec_env); diff --git a/core/iwasm/libraries/debug-engine/debug_engine.c b/core/iwasm/libraries/debug-engine/debug_engine.c index 340e657e8..24d57d706 100644 --- a/core/iwasm/libraries/debug-engine/debug_engine.c +++ b/core/iwasm/libraries/debug-engine/debug_engine.c @@ -743,7 +743,7 @@ wasm_debug_instance_get_obj_mem(WASMDebugInstance *instance, uint64 offset, module_inst = (WASMModuleInstance *)exec_env->module_inst; if (offset + *size > module_inst->module->load_size) { - LOG_VERBOSE("wasm_debug_instance_get_data_mem size over flow!\n"); + LOG_VERBOSE("wasm_debug_instance_get_data_mem size overflow!\n"); *size = module_inst->module->load_size >= offset ? module_inst->module->load_size - offset : 0; @@ -797,7 +797,7 @@ wasm_debug_instance_get_linear_mem(WASMDebugInstance *instance, uint64 offset, num_bytes_per_page = memory->num_bytes_per_page; linear_mem_size = num_bytes_per_page * memory->cur_page_count; if (offset + *size > linear_mem_size) { - LOG_VERBOSE("wasm_debug_instance_get_linear_mem size over flow!\n"); + LOG_VERBOSE("wasm_debug_instance_get_linear_mem size overflow!\n"); *size = linear_mem_size >= offset ? linear_mem_size - offset : 0; } bh_memcpy_s(buf, (uint32)*size, memory->memory_data + offset, @@ -830,7 +830,7 @@ wasm_debug_instance_set_linear_mem(WASMDebugInstance *instance, uint64 offset, num_bytes_per_page = memory->num_bytes_per_page; linear_mem_size = num_bytes_per_page * memory->cur_page_count; if (offset + *size > linear_mem_size) { - LOG_VERBOSE("wasm_debug_instance_get_linear_mem size over flow!\n"); + LOG_VERBOSE("wasm_debug_instance_get_linear_mem size overflow!\n"); *size = linear_mem_size >= offset ? linear_mem_size - offset : 0; } bh_memcpy_s(memory->memory_data + offset, (uint32)*size, buf, diff --git a/core/iwasm/libraries/debug-engine/handler.c b/core/iwasm/libraries/debug-engine/handler.c index 743165dd9..14c7fae6e 100644 --- a/core/iwasm/libraries/debug-engine/handler.c +++ b/core/iwasm/libraries/debug-engine/handler.c @@ -175,6 +175,19 @@ process_wasm_global(WASMGDBServer *server, char *args) os_mutex_unlock(&tmpbuf_lock); } +/* TODO: let server send an empty/error reply. + Original issue: 4265 + Not tested yet, but it should work. + */ +static void +send_reply(WASMGDBServer *server, const char *err) +{ + if (!err || !*err) + write_packet(server, ""); + else + write_packet(server, err); +} + void handle_general_query(WASMGDBServer *server, char *payload) { @@ -214,6 +227,7 @@ handle_general_query(WASMGDBServer *server, char *payload) if (!args) { LOG_ERROR("payload parse error during handle_general_query"); + send_reply(server, ""); return; } @@ -384,7 +398,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid) if (status == 0) { os_mutex_lock(&tmpbuf_lock); (void)snprintf(tmpbuf, MAX_PACKET_SIZE, "W%02" PRIx32, status); - write_packet(server, tmpbuf); + send_reply(server, tmpbuf); os_mutex_unlock(&tmpbuf_lock); return; } @@ -403,6 +417,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid) "T%02" PRIx32 "thread:%" PRIx64 ";name:%s;", gdb_status, (uint64)(uintptr_t)tid, "nobody"); if (len < 0 || len >= MAX_PACKET_SIZE) { + send_reply(server, "E01"); os_mutex_unlock(&tmpbuf_lock); return; } @@ -410,6 +425,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid) if (tids_count > 0) { int n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "threads:"); if (n < 0 || n >= MAX_PACKET_SIZE - len) { + send_reply(server, "E01"); os_mutex_unlock(&tmpbuf_lock); return; } @@ -426,6 +442,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid) } if (n < 0 || n >= MAX_PACKET_SIZE - len) { + send_reply(server, "E01"); os_mutex_unlock(&tmpbuf_lock); return; } @@ -452,6 +469,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid) "thread-pcs:%" PRIx64 ";00:%s;reason:%s;description:", pc, pc_string, "exception"); if (n < 0 || n >= MAX_PACKET_SIZE - len) { + send_reply(server, "E01"); os_mutex_unlock(&tmpbuf_lock); return; } @@ -462,6 +480,7 @@ send_thread_stop_status(WASMGDBServer *server, uint32 status, korp_tid tid) n = snprintf(tmpbuf + len, MAX_PACKET_SIZE - len, "%02x", exception[i]); if (n < 0 || n >= MAX_PACKET_SIZE - len) { + send_reply(server, "E01"); os_mutex_unlock(&tmpbuf_lock); return; } @@ -592,7 +611,7 @@ handle_get_register(WASMGDBServer *server, char *payload) int32 i = strtol(payload, NULL, 16); if (i != 0) { - write_packet(server, "E01"); + send_reply(server, "E01"); return; } regdata = wasm_debug_instance_get_pc( @@ -748,7 +767,7 @@ handle_add_break(WASMGDBServer *server, char *payload) if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length)) != 3) { LOG_ERROR("Unsupported number of add break arguments %d", arg_c); - write_packet(server, ""); + send_reply(server, ""); return; } @@ -783,7 +802,7 @@ handle_remove_break(WASMGDBServer *server, char *payload) if ((arg_c = sscanf(payload, "%zx,%" SCNx64 ",%zx", &type, &addr, &length)) != 3) { LOG_ERROR("Unsupported number of remove break arguments %d", arg_c); - write_packet(server, ""); + send_reply(server, ""); return; } @@ -835,6 +854,7 @@ handle_malloc(WASMGDBServer *server, char *payload) } else { LOG_ERROR("Payload parse error during handle malloc"); + send_reply(server, ""); return; } diff --git a/core/iwasm/libraries/lib-socket/src/wasi/wasi_socket_ext.c b/core/iwasm/libraries/lib-socket/src/wasi/wasi_socket_ext.c index 1172d0a77..f573d35b8 100644 --- a/core/iwasm/libraries/lib-socket/src/wasi/wasi_socket_ext.c +++ b/core/iwasm/libraries/lib-socket/src/wasi/wasi_socket_ext.c @@ -12,6 +12,26 @@ #include #include +/* + * Avoid direct TLS access to allow a single library to be + * linked to both of threaded and non-threaded applications. + * + * wasi-libc's errno is a TLS variable, exposed directly via + * errno.h. if we use it here, LLVM may lower it differently, + * depending on enabled features like atomcs and bulk-memory. + * we tweak the way to access errno here in order to make us + * compatible with both of threaded and non-threaded applications. + * __errno_location() should be reasonably stable because + * it was introduced as an alternative ABI for non-C software. + * https://github.com/WebAssembly/wasi-libc/pull/347 + */ +#if defined(errno) +#undef errno +#endif +int * +__errno_location(void); +#define errno (*__errno_location()) + #define HANDLE_ERROR(error) \ if (error != __WASI_ERRNO_SUCCESS) { \ errno = error; \ diff --git a/core/iwasm/libraries/wasi-nn/include/wasi_ephemeral_nn.h b/core/iwasm/libraries/wasi-nn/include/wasi_ephemeral_nn.h new file mode 100644 index 000000000..f76295a1e --- /dev/null +++ b/core/iwasm/libraries/wasi-nn/include/wasi_ephemeral_nn.h @@ -0,0 +1,12 @@ +/* + * Copyright (C) 2025 Midokura Japan KK. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#define WASM_ENABLE_WASI_EPHEMERAL_NN 1 +#define WASI_NN_NAME(name) wasi_ephemeral_nn_##name + +#include "wasi_nn.h" + +#undef WASM_ENABLE_WASI_EPHEMERAL_NN +#undef WASI_NN_NAME diff --git a/core/iwasm/libraries/wasi-nn/include/wasi_nn.h b/core/iwasm/libraries/wasi-nn/include/wasi_nn.h index 35b2d9bf0..cda26324e 100644 --- a/core/iwasm/libraries/wasi-nn/include/wasi_nn.h +++ b/core/iwasm/libraries/wasi-nn/include/wasi_nn.h @@ -21,6 +21,7 @@ #else #define WASI_NN_IMPORT(name) \ __attribute__((import_module("wasi_nn"), import_name(name))) +#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.) #endif /** @@ -34,17 +35,22 @@ * @return wasi_nn_error Execution status. */ #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 -wasi_nn_error -load(graph_builder *builder, uint32_t builder_len, graph_encoding encoding, - execution_target target, graph *g) WASI_NN_IMPORT("load"); +WASI_NN_ERROR_TYPE +WASI_NN_NAME(load) +(WASI_NN_NAME(graph_builder) * builder, uint32_t builder_len, + WASI_NN_NAME(graph_encoding) encoding, WASI_NN_NAME(execution_target) target, + WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load"); #else -wasi_nn_error -load(graph_builder_array *builder, graph_encoding encoding, - execution_target target, graph *g) WASI_NN_IMPORT("load"); +WASI_NN_ERROR_TYPE +WASI_NN_NAME(load) +(WASI_NN_NAME(graph_builder_array) * builder, + WASI_NN_NAME(graph_encoding) encoding, WASI_NN_NAME(execution_target) target, + WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load"); #endif -wasi_nn_error -load_by_name(const char *name, uint32_t name_len, graph *g) +WASI_NN_ERROR_TYPE +WASI_NN_NAME(load_by_name) +(const char *name, uint32_t name_len, WASI_NN_NAME(graph) * g) WASI_NN_IMPORT("load_by_name"); /** @@ -59,8 +65,9 @@ load_by_name(const char *name, uint32_t name_len, graph *g) * @param ctx Execution context. * @return wasi_nn_error Execution status. */ -wasi_nn_error -init_execution_context(graph g, graph_execution_context *ctx) +WASI_NN_ERROR_TYPE +WASI_NN_NAME(init_execution_context) +(WASI_NN_NAME(graph) g, WASI_NN_NAME(graph_execution_context) * ctx) WASI_NN_IMPORT("init_execution_context"); /** @@ -71,9 +78,10 @@ init_execution_context(graph g, graph_execution_context *ctx) * @param tensor Input tensor. * @return wasi_nn_error Execution status. */ -wasi_nn_error -set_input(graph_execution_context ctx, uint32_t index, tensor *tensor) - WASI_NN_IMPORT("set_input"); +WASI_NN_ERROR_TYPE +WASI_NN_NAME(set_input) +(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index, + WASI_NN_NAME(tensor) * tensor) WASI_NN_IMPORT("set_input"); /** * @brief Compute the inference on the given inputs. @@ -81,8 +89,9 @@ set_input(graph_execution_context ctx, uint32_t index, tensor *tensor) * @param ctx Execution context. * @return wasi_nn_error Execution status. */ -wasi_nn_error -compute(graph_execution_context ctx) WASI_NN_IMPORT("compute"); +WASI_NN_ERROR_TYPE +WASI_NN_NAME(compute) +(WASI_NN_NAME(graph_execution_context) ctx) WASI_NN_IMPORT("compute"); /** * @brief Extract the outputs after inference. @@ -97,15 +106,16 @@ compute(graph_execution_context ctx) WASI_NN_IMPORT("compute"); * @return wasi_nn_error Execution status. */ #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 -wasi_nn_error -get_output(graph_execution_context ctx, uint32_t index, - tensor_data output_tensor, uint32_t output_tensor_max_size, - uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output"); +WASI_NN_ERROR_TYPE +WASI_NN_NAME(get_output) +(WASI_NN_NAME(graph_execution_context) ctx, uint32_t index, + uint8_t *output_tensor, uint32_t output_tensor_max_size, + uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output"); #else -wasi_nn_error -get_output(graph_execution_context ctx, uint32_t index, - tensor_data output_tensor, uint32_t *output_tensor_size) - WASI_NN_IMPORT("get_output"); +WASI_NN_ERROR_TYPE +WASI_NN_NAME(get_output) +(graph_execution_context ctx, uint32_t index, uint8_t *output_tensor, + uint32_t *output_tensor_size) WASI_NN_IMPORT("get_output"); #endif #endif diff --git a/core/iwasm/libraries/wasi-nn/include/wasi_nn_types.h b/core/iwasm/libraries/wasi-nn/include/wasi_nn_types.h index dd6b8f14a..952fb65e2 100644 --- a/core/iwasm/libraries/wasi-nn/include/wasi_nn_types.h +++ b/core/iwasm/libraries/wasi-nn/include/wasi_nn_types.h @@ -13,6 +13,23 @@ extern "C" { #endif +/* our host logic doesn't use any prefix. neither legacy wasi_nn.h does. */ + +#if !defined(__wasm__) || !defined(WASI_NN_NAME) +#define WASI_NN_NAME(name) name +#define WASI_NN_ERROR_NAME(name) name +#define WASI_NN_TYPE_NAME(name) name +#define WASI_NN_ENCODING_NAME(name) name +#define WASI_NN_TARGET_NAME(name) name +#define WASI_NN_ERROR_TYPE wasi_nn_error +#else +#define WASI_NN_ERROR_NAME(name) WASI_NN_NAME(error_##name) +#define WASI_NN_TYPE_NAME(name) WASI_NN_NAME(type_##name) +#define WASI_NN_ENCODING_NAME(name) WASI_NN_NAME(encoding_##name) +#define WASI_NN_TARGET_NAME(name) WASI_NN_NAME(target_##name) +#define WASI_NN_ERROR_TYPE WASI_NN_NAME(error); +#endif + /** * ERRORS * @@ -22,22 +39,22 @@ extern "C" { // https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L5-L17 // Error codes returned by functions in this API. typedef enum { - success = 0, - invalid_argument, - invalid_encoding, - missing_memory, - busy, - runtime_error, - unsupported_operation, - too_large, - not_found, + WASI_NN_ERROR_NAME(success) = 0, + WASI_NN_ERROR_NAME(invalid_argument), + WASI_NN_ERROR_NAME(invalid_encoding), + WASI_NN_ERROR_NAME(missing_memory), + WASI_NN_ERROR_NAME(busy), + WASI_NN_ERROR_NAME(runtime_error), + WASI_NN_ERROR_NAME(unsupported_operation), + WASI_NN_ERROR_NAME(too_large), + WASI_NN_ERROR_NAME(not_found), // for WasmEdge-wasi-nn - end_of_sequence = 100, // End of Sequence Found. - context_full = 101, // Context Full. - prompt_tool_long = 102, // Prompt Too Long. - model_not_found = 103, // Model Not Found. -} wasi_nn_error; + WASI_NN_ERROR_NAME(end_of_sequence) = 100, // End of Sequence Found. + WASI_NN_ERROR_NAME(context_full) = 101, // Context Full. + WASI_NN_ERROR_NAME(prompt_tool_long) = 102, // Prompt Too Long. + WASI_NN_ERROR_NAME(model_not_found) = 103, // Model Not Found. +} WASI_NN_ERROR_TYPE; /** * TENSOR @@ -51,15 +68,27 @@ typedef enum { typedef struct { uint32_t *buf; uint32_t size; -} tensor_dimensions; +} WASI_NN_NAME(tensor_dimensions); #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 // sync up with // https://github.com/WebAssembly/wasi-nn/blob/71320d95b8c6d43f9af7f44e18b1839db85d89b4/wasi-nn.witx#L19-L28 // The type of the elements in a tensor. -typedef enum { fp16 = 0, fp32, fp64, u8, i32, i64 } tensor_type; +typedef enum { + WASI_NN_TYPE_NAME(fp16) = 0, + WASI_NN_TYPE_NAME(fp32), + WASI_NN_TYPE_NAME(fp64), + WASI_NN_TYPE_NAME(u8), + WASI_NN_TYPE_NAME(i32), + WASI_NN_TYPE_NAME(i64), +} WASI_NN_NAME(tensor_type); #else -typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type; +typedef enum { + WASI_NN_TYPE_NAME(fp16) = 0, + WASI_NN_TYPE_NAME(fp32), + WASI_NN_TYPE_NAME(up8), + WASI_NN_TYPE_NAME(ip32), +} WASI_NN_NAME(tensor_type); #endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ // The tensor data. @@ -70,7 +99,14 @@ typedef enum { fp16 = 0, fp32, up8, ip32 } tensor_type; // 4-byte f32 elements would have a data array of length 16). Naturally, this // representation requires some knowledge of how to lay out data in // memory--e.g., using row-major ordering--and could perhaps be improved. -typedef uint8_t *tensor_data; +#if !defined(__wasm__) || WASM_ENABLE_WASI_EPHEMERAL_NN != 0 +typedef struct { + uint8_t *buf; + uint32_t size; +} WASI_NN_NAME(tensor_data); +#else +typedef uint8_t *WASI_NN_NAME(tensor_data); +#endif // A tensor. typedef struct { @@ -78,16 +114,16 @@ typedef struct { // represent a tensor containing a single value, use `[1]` for the tensor // dimensions. #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 && defined(__wasm__) - tensor_dimensions dimensions; + WASI_NN_NAME(tensor_dimensions) dimensions; #else - tensor_dimensions *dimensions; + WASI_NN_NAME(tensor_dimensions) * dimensions; #endif // Describe the type of element in the tensor (e.g., f32). uint8_t type; uint8_t _pad[3]; // Contains the tensor data. - tensor_data data; -} tensor; + WASI_NN_NAME(tensor_data) data; +} WASI_NN_NAME(tensor); /** * GRAPH @@ -102,15 +138,15 @@ typedef struct { typedef struct { uint8_t *buf; uint32_t size; -} graph_builder; +} WASI_NN_NAME(graph_builder); typedef struct { - graph_builder *buf; + WASI_NN_NAME(graph_builder) * buf; uint32_t size; -} graph_builder_array; +} WASI_NN_NAME(graph_builder_array); // An execution graph for performing inference (i.e., a model). -typedef uint32_t graph; +typedef uint32_t WASI_NN_NAME(graph); // sync up with // https://github.com/WebAssembly/wasi-nn/blob/main/wit/wasi-nn.wit#L75 @@ -118,23 +154,25 @@ typedef uint32_t graph; // various backends that encode (i.e., serialize) their graph IR with different // formats. typedef enum { - openvino = 0, - onnx, - tensorflow, - pytorch, - tensorflowlite, - ggml, - autodetect, - unknown_backend, -} graph_encoding; + WASI_NN_ENCODING_NAME(openvino) = 0, + WASI_NN_ENCODING_NAME(onnx), + WASI_NN_ENCODING_NAME(tensorflow), + WASI_NN_ENCODING_NAME(pytorch), + WASI_NN_ENCODING_NAME(tensorflowlite), + WASI_NN_ENCODING_NAME(ggml), + WASI_NN_ENCODING_NAME(autodetect), + WASI_NN_ENCODING_NAME(unknown_backend), +} WASI_NN_NAME(graph_encoding); // Define where the graph should be executed. -typedef enum execution_target { cpu = 0, gpu, tpu } execution_target; +typedef enum WASI_NN_NAME(execution_target) { + WASI_NN_TARGET_NAME(cpu) = 0, + WASI_NN_TARGET_NAME(gpu), + WASI_NN_TARGET_NAME(tpu), +} WASI_NN_NAME(execution_target); // Bind a `graph` to the input and output tensors for an inference. -typedef uint32_t graph_execution_context; - -/* Definition of 'wasi_nn.h' structs in WASM app format (using offset) */ +typedef uint32_t WASI_NN_NAME(graph_execution_context); #ifdef __cplusplus } diff --git a/core/iwasm/libraries/wasi-nn/src/utils/wasi_nn_app_native.c b/core/iwasm/libraries/wasi-nn/src/utils/wasi_nn_app_native.c index 6e91c949b..4d56fed93 100644 --- a/core/iwasm/libraries/wasi-nn/src/utils/wasi_nn_app_native.c +++ b/core/iwasm/libraries/wasi-nn/src/utils/wasi_nn_app_native.c @@ -99,7 +99,8 @@ graph_builder_array_app_native(wasm_module_inst_t instance, static wasi_nn_error tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements, - tensor_wasm *input_tensor_wasm, tensor_data *data) + tensor_wasm *input_tensor_wasm, void **data, + uint32_t *size) { #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 #define data_size input_tensor_wasm->data_size @@ -113,8 +114,9 @@ tensor_data_app_native(wasm_module_inst_t instance, uint32_t total_elements, NN_ERR_PRINTF("input_tensor_wasm->data_offset is invalid"); return invalid_argument; } - *data = (tensor_data)wasm_runtime_addr_app_to_native( + *data = wasm_runtime_addr_app_to_native( instance, (uint64)input_tensor_wasm->data_offset); + *size = data_size; return success; #undef data_size } @@ -188,16 +190,19 @@ tensor_app_native(wasm_module_inst_t instance, tensor_wasm *input_tensor_wasm, NN_DBG_PRINTF("Tensor type: %d", input_tensor_wasm->type); NN_DBG_PRINTF("Total number of elements: %d", total_elements); - tensor_data data = NULL; + void *data = NULL; + uint32_t datasize; if (success - != (res = tensor_data_app_native(instance, total_elements, - input_tensor_wasm, &data))) { + != (res = + tensor_data_app_native(instance, total_elements, + input_tensor_wasm, &data, &datasize))) { wasm_runtime_free(dimensions); return res; } input_tensor->type = input_tensor_wasm->type; input_tensor->dimensions = dimensions; - input_tensor->data = data; + input_tensor->data.buf = data; + input_tensor->data.size = datasize; return success; } diff --git a/core/iwasm/libraries/wasi-nn/src/wasi_nn.c b/core/iwasm/libraries/wasi-nn/src/wasi_nn.c index 1a8ad03c6..5c865c5be 100644 --- a/core/iwasm/libraries/wasi-nn/src/wasi_nn.c +++ b/core/iwasm/libraries/wasi-nn/src/wasi_nn.c @@ -20,6 +20,10 @@ #include "wasi_nn_types.h" #include "wasm_export.h" +#if WASM_ENABLE_WASI_EPHEMERAL_NN == 0 +#warning You are using "wasi_nn", which is a legacy WAMR-specific ABI. It's deperecated and will likely be removed in future versions of WAMR. Please use "wasi_ephemeral_nn" instead. (For a WASM module, use the wasi_ephemeral_nn.h header instead. For the runtime configurations, enable WASM_ENABLE_WASI_EPHEMERAL_NN/WAMR_BUILD_WASI_EPHEMERAL_NN.) +#endif + #define HASHMAP_INITIAL_SIZE 20 #if defined(__APPLE__) #define LIB_EXTENTION ".dylib" @@ -51,65 +55,36 @@ struct backends_api_functions { NN_ERR_PRINTF("Error %s() -> %d", #func, wasi_error); \ } while (0) -/* HashMap utils */ -static HashMap *hashmap; - -static uint32 -hash_func(const void *key) -{ - // fnv1a_hash - const uint32 FNV_PRIME = 16777619; - const uint32 FNV_OFFSET_BASIS = 2166136261U; - - uint32 hash = FNV_OFFSET_BASIS; - const unsigned char *bytes = (const unsigned char *)key; - - for (size_t i = 0; i < sizeof(uintptr_t); ++i) { - hash ^= bytes[i]; - hash *= FNV_PRIME; - } - - return hash; -} - -static bool -key_equal_func(void *key1, void *key2) -{ - return key1 == key2; -} - -static void -key_destroy_func(void *key1) -{ - /* key type is wasm_module_inst_t*. do nothing */ -} +static void *wasi_nn_key; static void wasi_nn_ctx_destroy(WASINNContext *wasi_nn_ctx) { - NN_DBG_PRINTF("[WASI NN] DEINIT..."); - if (wasi_nn_ctx == NULL) { - NN_ERR_PRINTF( - "Error when deallocating memory. WASI-NN context is NULL"); return; } + NN_DBG_PRINTF("[WASI NN] DEINIT..."); NN_DBG_PRINTF("Freeing wasi-nn"); NN_DBG_PRINTF("-> is_model_loaded: %d", wasi_nn_ctx->is_model_loaded); NN_DBG_PRINTF("-> current_encoding: %d", wasi_nn_ctx->backend); - /* deinit() the backend */ - wasi_nn_error res; - call_wasi_nn_func(wasi_nn_ctx->backend, deinit, res, - wasi_nn_ctx->backend_ctx); + bh_assert(!wasi_nn_ctx->busy); + /* deinit() the backend */ + if (wasi_nn_ctx->is_backend_ctx_initialized) { + wasi_nn_error res; + call_wasi_nn_func(wasi_nn_ctx->backend, deinit, res, + wasi_nn_ctx->backend_ctx); + } + + os_mutex_destroy(&wasi_nn_ctx->lock); wasm_runtime_free(wasi_nn_ctx); } static void -value_destroy_func(void *value) +dtor(wasm_module_inst_t inst, void *ctx) { - wasi_nn_ctx_destroy((WASINNContext *)value); + wasi_nn_ctx_destroy(ctx); } bool @@ -122,12 +97,9 @@ wasi_nn_initialize() return false; } - // hashmap { instance: wasi_nn_ctx } - hashmap = bh_hash_map_create(HASHMAP_INITIAL_SIZE, true, hash_func, - key_equal_func, key_destroy_func, - value_destroy_func); - if (hashmap == NULL) { - NN_ERR_PRINTF("Error while initializing hashmap"); + wasi_nn_key = wasm_runtime_create_context_key(dtor); + if (wasi_nn_key == NULL) { + NN_ERR_PRINTF("Failed to create context key"); os_mutex_destroy(&wasi_nn_lock); return false; } @@ -148,6 +120,11 @@ wasi_nn_initialize_context() } memset(wasi_nn_ctx, 0, sizeof(WASINNContext)); + if (os_mutex_init(&wasi_nn_ctx->lock)) { + NN_ERR_PRINTF("Error when initializing a lock for WASI-NN context"); + wasm_runtime_free(wasi_nn_ctx); + return NULL; + } return wasi_nn_ctx; } @@ -156,29 +133,59 @@ static WASINNContext * wasm_runtime_get_wasi_nn_ctx(wasm_module_inst_t instance) { WASINNContext *wasi_nn_ctx = - (WASINNContext *)bh_hash_map_find(hashmap, (void *)instance); + wasm_runtime_get_context(instance, wasi_nn_key); if (wasi_nn_ctx == NULL) { - wasi_nn_ctx = wasi_nn_initialize_context(); - if (wasi_nn_ctx == NULL) - return NULL; - - bool ok = - bh_hash_map_insert(hashmap, (void *)instance, (void *)wasi_nn_ctx); - if (!ok) { - NN_ERR_PRINTF("Error while storing context"); - wasi_nn_ctx_destroy(wasi_nn_ctx); + WASINNContext *newctx = wasi_nn_initialize_context(); + if (newctx == NULL) return NULL; + os_mutex_lock(&wasi_nn_lock); + wasi_nn_ctx = wasm_runtime_get_context(instance, wasi_nn_key); + if (wasi_nn_ctx == NULL) { + wasm_runtime_set_context_spread(instance, wasi_nn_key, newctx); + wasi_nn_ctx = newctx; + newctx = NULL; + } + os_mutex_unlock(&wasi_nn_lock); + if (newctx != NULL) { + wasi_nn_ctx_destroy(newctx); } } - return wasi_nn_ctx; } +static WASINNContext * +lock_ctx(wasm_module_inst_t instance) +{ + WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance); + if (wasi_nn_ctx == NULL) { + return NULL; + } + os_mutex_lock(&wasi_nn_ctx->lock); + if (wasi_nn_ctx->busy) { + os_mutex_unlock(&wasi_nn_ctx->lock); + return NULL; + } + wasi_nn_ctx->busy = true; + os_mutex_unlock(&wasi_nn_ctx->lock); + return wasi_nn_ctx; +} + +static void +unlock_ctx(WASINNContext *wasi_nn_ctx) +{ + if (wasi_nn_ctx == NULL) { + return; + } + os_mutex_lock(&wasi_nn_ctx->lock); + bh_assert(wasi_nn_ctx->busy); + wasi_nn_ctx->busy = false; + os_mutex_unlock(&wasi_nn_ctx->lock); +} + void wasi_nn_destroy() { - // destroy hashmap will destroy keys and values - bh_hash_map_destroy(hashmap); + wasm_runtime_destroy_context_key(wasi_nn_key); // close backends' libraries and registered functions for (unsigned i = 0; i < sizeof(lookup) / sizeof(lookup[0]); i++) { @@ -397,6 +404,41 @@ detect_and_load_backend(graph_encoding backend_hint, return ret; } +static wasi_nn_error +ensure_backend(wasm_module_inst_t instance, graph_encoding encoding, + WASINNContext *wasi_nn_ctx) +{ + wasi_nn_error res; + + graph_encoding loaded_backend = autodetect; + if (!detect_and_load_backend(encoding, &loaded_backend)) { + res = invalid_encoding; + NN_ERR_PRINTF("load backend failed"); + goto fail; + } + + if (wasi_nn_ctx->is_backend_ctx_initialized) { + if (wasi_nn_ctx->backend != loaded_backend) { + res = unsupported_operation; + goto fail; + } + } + else { + wasi_nn_ctx->backend = loaded_backend; + + /* init() the backend */ + call_wasi_nn_func(wasi_nn_ctx->backend, init, res, + &wasi_nn_ctx->backend_ctx); + if (res != success) + goto fail; + + wasi_nn_ctx->is_backend_ctx_initialized = true; + } + return success; +fail: + return res; +} + /* WASI-NN implementation */ #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 @@ -410,6 +452,8 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder, graph_encoding encoding, execution_target target, graph *g) #endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ { + wasi_nn_error res; + NN_DBG_PRINTF("[WASI NN] LOAD [encoding=%d, target=%d]...", encoding, target); @@ -417,18 +461,23 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder, if (!instance) return runtime_error; - wasi_nn_error res; + WASINNContext *wasi_nn_ctx = lock_ctx(instance); + if (wasi_nn_ctx == NULL) { + res = busy; + goto fail; + } + graph_builder_array builder_native = { 0 }; #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 if (success != (res = graph_builder_array_app_native( instance, builder, builder_wasm_size, &builder_native))) - return res; + goto fail; #else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */ if (success != (res = graph_builder_array_app_native(instance, builder, &builder_native))) - return res; + goto fail; #endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ if (!wasm_runtime_validate_native_addr(instance, g, @@ -438,19 +487,7 @@ wasi_nn_load(wasm_exec_env_t exec_env, graph_builder_array_wasm *builder, goto fail; } - graph_encoding loaded_backend = autodetect; - if (!detect_and_load_backend(encoding, &loaded_backend)) { - res = invalid_encoding; - NN_ERR_PRINTF("load backend failed"); - goto fail; - } - - WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance); - wasi_nn_ctx->backend = loaded_backend; - - /* init() the backend */ - call_wasi_nn_func(wasi_nn_ctx->backend, init, res, - &wasi_nn_ctx->backend_ctx); + res = ensure_backend(instance, encoding, wasi_nn_ctx); if (res != success) goto fail; @@ -465,6 +502,7 @@ fail: // XXX: Free intermediate structure pointers if (builder_native.buf) wasm_runtime_free(builder_native.buf); + unlock_ctx(wasi_nn_ctx); return res; } @@ -473,6 +511,8 @@ wasi_nn_error wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len, graph *g) { + wasi_nn_error res; + wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env); if (!instance) { return runtime_error; @@ -496,29 +536,26 @@ wasi_nn_load_by_name(wasm_exec_env_t exec_env, char *name, uint32_t name_len, NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME %s...", name); - graph_encoding loaded_backend = autodetect; - if (!detect_and_load_backend(autodetect, &loaded_backend)) { - NN_ERR_PRINTF("load backend failed"); - return invalid_encoding; + WASINNContext *wasi_nn_ctx = lock_ctx(instance); + if (wasi_nn_ctx == NULL) { + res = busy; + goto fail; } - WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance); - wasi_nn_ctx->backend = loaded_backend; - - wasi_nn_error res; - /* init() the backend */ - call_wasi_nn_func(wasi_nn_ctx->backend, init, res, - &wasi_nn_ctx->backend_ctx); + res = ensure_backend(instance, autodetect, wasi_nn_ctx); if (res != success) - return res; + goto fail; call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name, res, wasi_nn_ctx->backend_ctx, name, name_len, g); if (res != success) - return res; + goto fail; wasi_nn_ctx->is_model_loaded = true; - return success; + res = success; +fail: + unlock_ctx(wasi_nn_ctx); + return res; } wasi_nn_error @@ -526,6 +563,8 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name, int32_t name_len, char *config, int32_t config_len, graph *g) { + wasi_nn_error res; + wasm_module_inst_t instance = wasm_runtime_get_module_inst(exec_env); if (!instance) { return runtime_error; @@ -554,30 +593,28 @@ wasi_nn_load_by_name_with_config(wasm_exec_env_t exec_env, char *name, NN_DBG_PRINTF("[WASI NN] LOAD_BY_NAME_WITH_CONFIG %s %s...", name, config); - graph_encoding loaded_backend = autodetect; - if (!detect_and_load_backend(autodetect, &loaded_backend)) { - NN_ERR_PRINTF("load backend failed"); - return invalid_encoding; + WASINNContext *wasi_nn_ctx = lock_ctx(instance); + if (wasi_nn_ctx == NULL) { + res = busy; + goto fail; } - WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance); - wasi_nn_ctx->backend = loaded_backend; - - wasi_nn_error res; - /* init() the backend */ - call_wasi_nn_func(wasi_nn_ctx->backend, init, res, - &wasi_nn_ctx->backend_ctx); + res = ensure_backend(instance, autodetect, wasi_nn_ctx); if (res != success) - return res; + goto fail; + ; call_wasi_nn_func(wasi_nn_ctx->backend, load_by_name_with_config, res, wasi_nn_ctx->backend_ctx, name, name_len, config, config_len, g); if (res != success) - return res; + goto fail; wasi_nn_ctx->is_model_loaded = true; - return success; + res = success; +fail: + unlock_ctx(wasi_nn_ctx); + return res; } wasi_nn_error @@ -591,20 +628,27 @@ wasi_nn_init_execution_context(wasm_exec_env_t exec_env, graph g, return runtime_error; } - WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance); - wasi_nn_error res; + WASINNContext *wasi_nn_ctx = lock_ctx(instance); + if (wasi_nn_ctx == NULL) { + res = busy; + goto fail; + } + if (success != (res = is_model_initialized(wasi_nn_ctx))) - return res; + goto fail; if (!wasm_runtime_validate_native_addr( instance, ctx, (uint64)sizeof(graph_execution_context))) { NN_ERR_PRINTF("ctx is invalid"); - return invalid_argument; + res = invalid_argument; + goto fail; } call_wasi_nn_func(wasi_nn_ctx->backend, init_execution_context, res, wasi_nn_ctx->backend_ctx, g, ctx); +fail: + unlock_ctx(wasi_nn_ctx); return res; } @@ -619,17 +663,21 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx, return runtime_error; } - WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance); - wasi_nn_error res; + WASINNContext *wasi_nn_ctx = lock_ctx(instance); + if (wasi_nn_ctx == NULL) { + res = busy; + goto fail; + } + if (success != (res = is_model_initialized(wasi_nn_ctx))) - return res; + goto fail; tensor input_tensor_native = { 0 }; if (success != (res = tensor_app_native(instance, input_tensor, &input_tensor_native))) - return res; + goto fail; call_wasi_nn_func(wasi_nn_ctx->backend, set_input, res, wasi_nn_ctx->backend_ctx, ctx, index, @@ -637,7 +685,8 @@ wasi_nn_set_input(wasm_exec_env_t exec_env, graph_execution_context ctx, // XXX: Free intermediate structure pointers if (input_tensor_native.dimensions) wasm_runtime_free(input_tensor_native.dimensions); - +fail: + unlock_ctx(wasi_nn_ctx); return res; } @@ -651,26 +700,32 @@ wasi_nn_compute(wasm_exec_env_t exec_env, graph_execution_context ctx) return runtime_error; } - WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance); - wasi_nn_error res; + WASINNContext *wasi_nn_ctx = lock_ctx(instance); + if (wasi_nn_ctx == NULL) { + res = busy; + goto fail; + } + if (success != (res = is_model_initialized(wasi_nn_ctx))) - return res; + goto fail; call_wasi_nn_func(wasi_nn_ctx->backend, compute, res, wasi_nn_ctx->backend_ctx, ctx); +fail: + unlock_ctx(wasi_nn_ctx); return res; } #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 wasi_nn_error wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx, - uint32_t index, tensor_data output_tensor, + uint32_t index, void *output_tensor, uint32_t output_tensor_len, uint32_t *output_tensor_size) #else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */ wasi_nn_error wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx, - uint32_t index, tensor_data output_tensor, + uint32_t index, void *output_tensor, uint32_t *output_tensor_size) #endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ { @@ -681,28 +736,36 @@ wasi_nn_get_output(wasm_exec_env_t exec_env, graph_execution_context ctx, return runtime_error; } - WASINNContext *wasi_nn_ctx = wasm_runtime_get_wasi_nn_ctx(instance); - wasi_nn_error res; + WASINNContext *wasi_nn_ctx = lock_ctx(instance); + if (wasi_nn_ctx == NULL) { + res = busy; + goto fail; + } + if (success != (res = is_model_initialized(wasi_nn_ctx))) - return res; + goto fail; if (!wasm_runtime_validate_native_addr(instance, output_tensor_size, (uint64)sizeof(uint32_t))) { NN_ERR_PRINTF("output_tensor_size is invalid"); - return invalid_argument; + res = invalid_argument; + goto fail; } + tensor_data tensor = { + .buf = output_tensor, #if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 + .size = output_tensor_len, +#else + .size = *output_tensor_size, +#endif + }; call_wasi_nn_func(wasi_nn_ctx->backend, get_output, res, - wasi_nn_ctx->backend_ctx, ctx, index, output_tensor, - &output_tensor_len); - *output_tensor_size = output_tensor_len; -#else /* WASM_ENABLE_WASI_EPHEMERAL_NN == 0 */ - call_wasi_nn_func(wasi_nn_ctx->backend, get_output, res, - wasi_nn_ctx->backend_ctx, ctx, index, output_tensor, + wasi_nn_ctx->backend_ctx, ctx, index, &tensor, output_tensor_size); -#endif /* WASM_ENABLE_WASI_EPHEMERAL_NN != 0 */ +fail: + unlock_ctx(wasi_nn_ctx); return res; } diff --git a/core/iwasm/libraries/wasi-nn/src/wasi_nn_llamacpp.c b/core/iwasm/libraries/wasi-nn/src/wasi_nn_llamacpp.c index 23c867b0a..572a5bf33 100644 --- a/core/iwasm/libraries/wasi-nn/src/wasi_nn_llamacpp.c +++ b/core/iwasm/libraries/wasi-nn/src/wasi_nn_llamacpp.c @@ -2,6 +2,9 @@ * Copyright (C) 2019 Intel Corporation. All rights reserved. * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception */ + +#include + #include "wasi_nn_types.h" #include "utils/logger.h" #include "llama.h" @@ -286,7 +289,7 @@ deinit_backend(void *ctx) llama_backend_free(); - os_free(backend_ctx); + free(backend_ctx); return success; } @@ -381,18 +384,18 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index, tensor *wasi_nn_tensor) { struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx; - // tensor->data is the prompt string. ends with \0 - char *prompt_text = (char *)wasi_nn_tensor->data; + // tensor->data is the prompt string. + char *prompt_text = (char *)wasi_nn_tensor->data.buf; + uint32_t prompt_text_len = wasi_nn_tensor->data.size; #ifndef NDEBUG NN_DBG_PRINTF("--------------------------------------------------"); - NN_DBG_PRINTF("prompt_text: %s", prompt_text); + NN_DBG_PRINTF("prompt_text: %.*s", (int)prompt_text_len, prompt_text); NN_DBG_PRINTF("--------------------------------------------------"); #endif // tokenize the prompt uint32_t n_token_max = llama_n_ctx(backend_ctx->ctx); - uint32_t prompt_text_len = strlen(prompt_text); if (backend_ctx->prompt == NULL) { backend_ctx->prompt = calloc(n_token_max, sizeof(llama_token)); @@ -477,7 +480,6 @@ compute(void *ctx, graph_execution_context exec_ctx) // main loop int32_t n_cur = batch.n_tokens; - int n_decode = 0; int32_t n_vocab = llama_n_vocab(backend_ctx->model); llama_token_data *candidates = NULL; @@ -528,7 +530,6 @@ compute(void *ctx, graph_execution_context exec_ctx) // push this new token for next evaluation llama_batch_add(&batch, new_token_id, n_cur, seq_ids, sizeof(seq_ids) / sizeof(seq_ids[0]), true); - n_decode++; n_cur++; if (llama_decode(backend_ctx->ctx, batch) != 0) { @@ -549,7 +550,7 @@ fail: __attribute__((visibility("default"))) wasi_nn_error get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index, - tensor_data output_tensor, uint32_t *output_tensor_size) + tensor_data *output_tensor, uint32_t *output_tensor_size) { struct LlamaContext *backend_ctx = (struct LlamaContext *)ctx; @@ -568,7 +569,7 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index, printf("%s\n", output_metadata); } - memcpy(output_tensor, output_metadata, strlen(output_metadata)); + memcpy(output_tensor->buf, output_metadata, strlen(output_metadata)); *output_tensor_size = strlen(output_metadata); return success; } @@ -588,7 +589,7 @@ get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index, printf("%s", buf); } - memcpy(output_tensor + end_pos, buf, strlen(buf)); + memcpy(output_tensor->buf + end_pos, buf, strlen(buf)); end_pos += strlen(buf); } diff --git a/core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.c b/core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.c index dcfec1ccb..ae3a8572c 100644 --- a/core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.c +++ b/core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.c @@ -26,17 +26,25 @@ * from 4. to 6. is the Inference Loop */ +/* these limits are arbitrary. */ +#define MAX_GRAPHS 4 +#define MAX_EXECUTION_CONTEXTS 4 + typedef struct { ov_core_t *core; /* keep input model files */ - void *weight_data; - ov_tensor_t *weights_tensor; - ov_model_t *model; - /* add prepostprocess */ - ov_model_t *new_model; - ov_compiled_model_t *compiled_model; - ov_infer_request_t *infer_request; - ov_tensor_t *input_tensor; + struct OpenVINOGraph { + void *weight_data; + ov_tensor_t *weights_tensor; + ov_model_t *model; + ov_compiled_model_t *compiled_model; + } graphs[MAX_GRAPHS]; + struct OpenVINOExecutionContext { + struct OpenVINOGraph *graph; + ov_infer_request_t *infer_request; + } execution_contexts[MAX_EXECUTION_CONTEXTS]; + unsigned int n_graphs; + unsigned int n_execution_contexts; } OpenVINOContext; /* @@ -181,6 +189,29 @@ wasi_nn_tensor_type_to_openvino_element_type(tensor_type wasi_nn_type) return UNDEFINED; } +static void +free_graph(struct OpenVINOGraph *graph) +{ + if (graph->weight_data) + os_free(graph->weight_data); + + if (graph->weights_tensor) + ov_tensor_free(graph->weights_tensor); + + if (graph->model) + ov_model_free(graph->model); + + if (graph->compiled_model) + ov_compiled_model_free(graph->compiled_model); +} + +static void +free_execution_context(struct OpenVINOExecutionContext *c) +{ + if (c->infer_request) + ov_infer_request_free(c->infer_request); +} + static wasi_nn_error uint32_array_to_int64_array(uint32_t array_size, uint32_t *src, int64_t **dst) { @@ -200,6 +231,8 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding, execution_target target, graph *g) { OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; + struct OpenVINOGraph *graph; + unsigned int graph_idx; wasi_nn_error ret = unsupported_operation; if (encoding != openvino) { @@ -225,39 +258,47 @@ load(void *ctx, graph_builder_array *builder, graph_encoding encoding, graph_builder xml = builder->buf[0]; graph_builder weight = builder->buf[1]; - /* if xml is a String with a model in IR */ - if (!(xml.buf[xml.size] == '\0' && xml.buf[xml.size - 1] != '\0')) { - NN_ERR_PRINTF("Invalid xml string."); - return invalid_argument; + graph_idx = ov_ctx->n_graphs; + if (graph_idx >= MAX_GRAPHS) { + return runtime_error; } + graph = &ov_ctx->graphs[graph_idx]; + memset(graph, 0, sizeof(*graph)); /* transfer weight to an ov tensor */ { - ov_ctx->weight_data = os_malloc(weight.size); - if (!ov_ctx->weight_data) + graph->weight_data = os_malloc(weight.size); + if (!graph->weight_data) goto fail; - memcpy(ov_ctx->weight_data, weight.buf, weight.size); + memcpy(graph->weight_data, weight.buf, weight.size); ov_element_type_e type = U8; int64_t dims[1] = { weight.size }; ov_shape_t shape = { 1, dims }; CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(type, shape, - ov_ctx->weight_data, - &ov_ctx->weights_tensor), + graph->weight_data, + &graph->weights_tensor), ret); } /* load model from buffer */ CHECK_OV_STATUS(ov_core_read_model_from_memory_buffer( ov_ctx->core, (char *)xml.buf, xml.size, - ov_ctx->weights_tensor, &ov_ctx->model), + graph->weights_tensor, &graph->model), ret); #ifndef NDEBUG - print_model_input_output_info(ov_ctx->model); + print_model_input_output_info(graph->model); #endif - ret = success; + CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0, + &graph->compiled_model), + ret); + + *g = graph_idx; + ov_ctx->n_graphs++; + return success; fail: + free_graph(graph); return ret; } @@ -265,20 +306,62 @@ __attribute__((visibility("default"))) wasi_nn_error load_by_name(void *ctx, const char *filename, uint32_t filename_len, graph *g) { OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; + struct OpenVINOGraph *graph; + unsigned int graph_idx; wasi_nn_error ret = unsupported_operation; - CHECK_OV_STATUS( - ov_core_read_model(ov_ctx->core, filename, NULL, &ov_ctx->model), ret); + graph_idx = ov_ctx->n_graphs; + if (graph_idx >= MAX_GRAPHS) { + return runtime_error; + } + graph = &ov_ctx->graphs[graph_idx]; - ret = success; + memset(graph, 0, sizeof(*graph)); + CHECK_OV_STATUS( + ov_core_read_model(ov_ctx->core, filename, NULL, &graph->model), ret); + + CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, graph->model, "CPU", 0, + &graph->compiled_model), + ret); + + *g = graph_idx; + ov_ctx->n_graphs++; + return success; fail: + free_graph(graph); return ret; } __attribute__((visibility("default"))) wasi_nn_error init_execution_context(void *ctx, graph g, graph_execution_context *exec_ctx) { + OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; + struct OpenVINOGraph *graph; + struct OpenVINOExecutionContext *exec; + unsigned int exec_idx; + wasi_nn_error ret; + + if (g >= ov_ctx->n_graphs) + return runtime_error; + graph = &ov_ctx->graphs[g]; + + exec_idx = ov_ctx->n_execution_contexts; + if (exec_idx >= MAX_EXECUTION_CONTEXTS) + return runtime_error; + exec = &ov_ctx->execution_contexts[exec_idx]; + + memset(exec, 0, sizeof(*exec)); + exec->graph = graph; + + CHECK_OV_STATUS(ov_compiled_model_create_infer_request( + graph->compiled_model, &exec->infer_request), + ret); + + *exec_ctx = exec_idx; + ov_ctx->n_execution_contexts++; return success; +fail: + return ret; } __attribute__((visibility("default"))) wasi_nn_error @@ -286,19 +369,15 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index, tensor *wasi_nn_tensor) { OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; + struct OpenVINOExecutionContext *exec; wasi_nn_error ret = unsupported_operation; ov_shape_t input_shape = { 0 }; + ov_tensor_t *input_tensor = NULL; int64_t *ov_dims = NULL; - ov_preprocess_prepostprocessor_t *ppp = NULL; - ov_preprocess_input_info_t *input_info = NULL; - ov_preprocess_input_tensor_info_t *input_tensor_info = NULL; - ov_layout_t *input_layout = NULL; - ov_preprocess_preprocess_steps_t *input_process = NULL; - ov_preprocess_input_model_info_t *p_input_model = NULL; - ov_layout_t *model_layout = NULL; - ov_preprocess_output_info_t *output_info = NULL; - ov_preprocess_output_tensor_info_t *output_tensor_info = NULL; + if (exec_ctx >= ov_ctx->n_execution_contexts) + return runtime_error; + exec = &ov_ctx->execution_contexts[exec_ctx]; /* wasi_nn_tensor -> ov_tensor */ { @@ -308,17 +387,6 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index, if (ret != success) goto fail; - /* NCHW -> NHWC */ - if (wasi_nn_tensor->dimensions->size == 4 || ov_dims[1] == 3) { - /* N */ - /* H */ - ov_dims[1] = ov_dims[2]; - /* W */ - ov_dims[2] = ov_dims[3]; - /* C */ - ov_dims[3] = 3; - } - CHECK_OV_STATUS(ov_shape_create(wasi_nn_tensor->dimensions->size, ov_dims, &input_shape), ret); @@ -334,101 +402,22 @@ set_input(void *ctx, graph_execution_context exec_ctx, uint32_t index, shape_info); CHECK_OV_STATUS(ov_tensor_create_from_host_ptr(input_type, input_shape, - wasi_nn_tensor->data, - &ov_ctx->input_tensor), + wasi_nn_tensor->data.buf, + &input_tensor), ret); } - /* set preprocess based on wasi_nn_tensor */ - { - CHECK_OV_STATUS( - ov_preprocess_prepostprocessor_create(ov_ctx->model, &ppp), ret); - - /* reuse user' created tensor's info */ - CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_input_info_by_index( - ppp, index, &input_info), - ret); - CHECK_OV_STATUS(ov_preprocess_input_info_get_tensor_info( - input_info, &input_tensor_info), - ret); - CHECK_OV_STATUS(ov_preprocess_input_tensor_info_set_from( - input_tensor_info, ov_ctx->input_tensor), - ret); - /* ! HAS TO BE NHWC. Match previous layout conversion */ - CHECK_OV_STATUS(ov_layout_create("NHWC", &input_layout), ret); - CHECK_OV_STATUS(ov_preprocess_input_tensor_info_set_layout( - input_tensor_info, input_layout), - ret); - - /* add RESIZE */ - CHECK_OV_STATUS(ov_preprocess_input_info_get_preprocess_steps( - input_info, &input_process), - ret); - CHECK_OV_STATUS( - ov_preprocess_preprocess_steps_resize(input_process, RESIZE_LINEAR), - ret); - - /* input model */ - CHECK_OV_STATUS( - ov_preprocess_input_info_get_model_info(input_info, &p_input_model), - ret); - // TODO: what if not? - CHECK_OV_STATUS(ov_layout_create("NCHW", &model_layout), ret); - CHECK_OV_STATUS(ov_preprocess_input_model_info_set_layout(p_input_model, - model_layout), - ret); - - /* output -> F32(possibility) */ - CHECK_OV_STATUS(ov_preprocess_prepostprocessor_get_output_info_by_index( - ppp, index, &output_info), - ret); - CHECK_OV_STATUS(ov_preprocess_output_info_get_tensor_info( - output_info, &output_tensor_info), - ret); - CHECK_OV_STATUS( - ov_preprocess_output_set_element_type(output_tensor_info, F32), - ret); - - CHECK_OV_STATUS( - ov_preprocess_prepostprocessor_build(ppp, &ov_ctx->new_model), ret); - } - - CHECK_OV_STATUS(ov_core_compile_model(ov_ctx->core, ov_ctx->new_model, - "CPU", 0, &ov_ctx->compiled_model), - ret); - - CHECK_OV_STATUS(ov_compiled_model_create_infer_request( - ov_ctx->compiled_model, &ov_ctx->infer_request), - ret); - /* install ov_tensor -> infer_request */ CHECK_OV_STATUS(ov_infer_request_set_input_tensor_by_index( - ov_ctx->infer_request, index, ov_ctx->input_tensor), + exec->infer_request, index, input_tensor), ret); ret = success; - fail: if (ov_dims) os_free(ov_dims); + if (input_tensor) + ov_tensor_free(input_tensor); ov_shape_free(&input_shape); - if (ppp) - ov_preprocess_prepostprocessor_free(ppp); - if (input_info) - ov_preprocess_input_info_free(input_info); - if (input_tensor_info) - ov_preprocess_input_tensor_info_free(input_tensor_info); - if (input_layout) - ov_layout_free(input_layout); - if (input_process) - ov_preprocess_preprocess_steps_free(input_process); - if (p_input_model) - ov_preprocess_input_model_info_free(p_input_model); - if (model_layout) - ov_layout_free(model_layout); - if (output_info) - ov_preprocess_output_info_free(output_info); - if (output_tensor_info) - ov_preprocess_output_tensor_info_free(output_tensor_info); return ret; } @@ -437,9 +426,14 @@ __attribute__((visibility("default"))) wasi_nn_error compute(void *ctx, graph_execution_context exec_ctx) { OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; + struct OpenVINOExecutionContext *exec; wasi_nn_error ret = unsupported_operation; - CHECK_OV_STATUS(ov_infer_request_infer(ov_ctx->infer_request), ret); + if (exec_ctx >= ov_ctx->n_execution_contexts) + return runtime_error; + exec = &ov_ctx->execution_contexts[exec_ctx]; + + CHECK_OV_STATUS(ov_infer_request_infer(exec->infer_request), ret); ret = success; fail: return ret; @@ -447,23 +441,33 @@ fail: __attribute__((visibility("default"))) wasi_nn_error get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index, - tensor_data output_tensor, uint32_t *output_tensor_size) + tensor_data *output_tensor, uint32_t *output_tensor_size) { OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; + struct OpenVINOExecutionContext *exec; wasi_nn_error ret = unsupported_operation; ov_tensor_t *ov_tensor = NULL; void *data = NULL; size_t byte_size = 0; + if (exec_ctx >= ov_ctx->n_execution_contexts) + return runtime_error; + exec = &ov_ctx->execution_contexts[exec_ctx]; + CHECK_OV_STATUS(ov_infer_request_get_output_tensor_by_index( - ov_ctx->infer_request, index, &ov_tensor), + exec->infer_request, index, &ov_tensor), ret); CHECK_OV_STATUS(ov_tensor_get_byte_size(ov_tensor, &byte_size), ret); + if (byte_size > output_tensor->size) { + ret = too_large; + goto fail; + } + CHECK_OV_STATUS(ov_tensor_data(ov_tensor, &data), ret); - memcpy(output_tensor, data, byte_size); + memcpy(output_tensor->buf, data, byte_size); *output_tensor_size = (uint32_t)byte_size; @@ -517,27 +521,16 @@ __attribute__((visibility("default"))) wasi_nn_error deinit_backend(void *ctx) { OpenVINOContext *ov_ctx = (OpenVINOContext *)ctx; + unsigned int i; if (!ov_ctx) return invalid_argument; - if (ov_ctx->weight_data) - os_free(ov_ctx->weight_data); + for (i = 0; i < ov_ctx->n_execution_contexts; i++) + free_execution_context(&ov_ctx->execution_contexts[i]); - if (ov_ctx->weights_tensor) - ov_tensor_free(ov_ctx->weights_tensor); - - if (ov_ctx->input_tensor) - ov_tensor_free(ov_ctx->input_tensor); - - if (ov_ctx->infer_request) - ov_infer_request_free(ov_ctx->infer_request); - - if (ov_ctx->compiled_model) - ov_compiled_model_free(ov_ctx->compiled_model); - - if (ov_ctx->model) - ov_model_free(ov_ctx->model); + for (i = 0; i < ov_ctx->n_graphs; i++) + free_graph(&ov_ctx->graphs[i]); if (ov_ctx->core) ov_core_free(ov_ctx->core); diff --git a/core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.h b/core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.h index ea03a226f..0233568c0 100644 --- a/core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.h +++ b/core/iwasm/libraries/wasi-nn/src/wasi_nn_openvino.h @@ -24,7 +24,7 @@ compute(void *ctx, graph_execution_context exec_ctx); __attribute__((visibility("default"))) wasi_nn_error get_output(void *ctx, graph_execution_context exec_ctx, uint32_t index, - tensor_data output_tensor, uint32_t *output_tensor_size); + tensor_data *output_tensor, uint32_t *output_tensor_size); __attribute__((visibility("default"))) wasi_nn_error init_backend(void **ctx); @@ -32,4 +32,4 @@ init_backend(void **ctx); __attribute__((visibility("default"))) wasi_nn_error deinit_backend(void *ctx); -#endif /* WASI_NN_OPENVINO_HPP */ \ No newline at end of file +#endif /* WASI_NN_OPENVINO_HPP */ diff --git a/core/iwasm/libraries/wasi-nn/src/wasi_nn_private.h b/core/iwasm/libraries/wasi-nn/src/wasi_nn_private.h index bb56f72fb..466f2cef4 100644 --- a/core/iwasm/libraries/wasi-nn/src/wasi_nn_private.h +++ b/core/iwasm/libraries/wasi-nn/src/wasi_nn_private.h @@ -9,7 +9,12 @@ #include "wasi_nn_types.h" #include "wasm_export.h" +#include "bh_platform.h" + typedef struct { + korp_mutex lock; + bool busy; + bool is_backend_ctx_initialized; bool is_model_loaded; graph_encoding backend; void *backend_ctx; @@ -27,7 +32,7 @@ typedef wasi_nn_error (*SET_INPUT)(void *, graph_execution_context, uint32_t, tensor *); typedef wasi_nn_error (*COMPUTE)(void *, graph_execution_context); typedef wasi_nn_error (*GET_OUTPUT)(void *, graph_execution_context, uint32_t, - tensor_data, uint32_t *); + tensor_data *, uint32_t *); /* wasi-nn general APIs */ typedef wasi_nn_error (*BACKEND_INITIALIZE)(void **); typedef wasi_nn_error (*BACKEND_DEINITIALIZE)(void *); diff --git a/core/iwasm/libraries/wasi-nn/src/wasi_nn_tensorflowlite.cpp b/core/iwasm/libraries/wasi-nn/src/wasi_nn_tensorflowlite.cpp index 09e12f0d2..0ca323b70 100644 --- a/core/iwasm/libraries/wasi-nn/src/wasi_nn_tensorflowlite.cpp +++ b/core/iwasm/libraries/wasi-nn/src/wasi_nn_tensorflowlite.cpp @@ -281,6 +281,11 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index, { TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx; + if (input_tensor->type != fp32) { + NN_ERR_PRINTF("unsupported input tensor type %u", input_tensor->type); + return runtime_error; + } + wasi_nn_error res; if (success != (res = is_valid_graph_execution_context(tfl_ctx, ctx))) return res; @@ -319,7 +324,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index, index); int size = model_tensor_size * sizeof(float); - bh_memcpy_s(it, size, input_tensor->data, size); + bh_memcpy_s(it, size, input_tensor->data.buf, size); } else { // TODO: Assuming uint8 quantized networks. TfLiteAffineQuantization *quant_info = @@ -337,7 +342,7 @@ set_input(void *tflite_ctx, graph_execution_context ctx, uint32_t index, NN_DBG_PRINTF("input tensor: (scale, offset) = (%f, %f)", scale, zero_point); - float *input_tensor_f = (float *)input_tensor->data; + float *input_tensor_f = (float *)input_tensor->data.buf; for (uint32_t i = 0; i < model_tensor_size; ++i) { it[i] = (uint8_t)(input_tensor_f[i] / scale + zero_point); } @@ -361,7 +366,7 @@ compute(void *tflite_ctx, graph_execution_context ctx) __attribute__((visibility("default"))) wasi_nn_error get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index, - tensor_data output_tensor, uint32_t *output_tensor_size) + tensor_data *output_tensor, uint32_t *output_tensor_size) { TFLiteContext *tfl_ctx = (TFLiteContext *)tflite_ctx; @@ -384,23 +389,34 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index, return too_large; } - uint32_t model_tensor_size = 1; - for (int i = 0; i < (int)tensor->dims->size; ++i) - model_tensor_size *= (uint32_t)tensor->dims->data[i]; - - if (*output_tensor_size < model_tensor_size) { - NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index); - return too_large; - } - if (tensor->quantization.type == kTfLiteNoQuantization) { NN_DBG_PRINTF("No quantization information"); - float *ot = - tfl_ctx->interpreters[ctx].interpreter->typed_output_tensor( - index); - - int size = model_tensor_size * sizeof(float); - bh_memcpy_s(output_tensor, size, ot, size); +#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 + if (output_tensor->size < tensor->bytes) { + NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index); + return too_large; + } +#else + /* + * for now, maintain the bug-to-bug compatibility with the old abi, + * where the size here is the number of fp32, not bytes. + */ + if (output_tensor->size < tensor->bytes / sizeof(float)) { + NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index); + return too_large; + } +#endif + bh_memcpy_s(output_tensor->buf, output_tensor->size, tensor->data.data, + tensor->bytes); +#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 + *output_tensor_size = tensor->bytes; +#else + /* + * for now, maintain the bug-to-bug compatibility with the old abi, + * where the size here is the number of fp32, not bytes. + */ + *output_tensor_size = tensor->bytes / sizeof(float); +#endif } else { // TODO: Assuming uint8 quantized networks. TfLiteAffineQuantization *quant_info = @@ -409,6 +425,27 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index, NN_ERR_PRINTF("Quantization per channel is not supported"); return runtime_error; } + + uint32_t model_tensor_size = 1; + for (int i = 0; i < (int)tensor->dims->size; ++i) + model_tensor_size *= (uint32_t)tensor->dims->data[i]; + +#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 + if (output_tensor->size / sizeof(float) < model_tensor_size) { + NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index); + return too_large; + } +#else + /* + * for now, maintain the bug-to-bug compatibility with the old abi, + * where the size here is the number of fp32, not bytes. + */ + if (output_tensor->size < model_tensor_size) { + NN_ERR_PRINTF("Insufficient memory to copy tensor %d", index); + return too_large; + } +#endif + uint8_t *ot = tfl_ctx->interpreters[ctx] .interpreter->typed_output_tensor(index); @@ -417,13 +454,22 @@ get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index, NN_DBG_PRINTF("output tensor: (scale, offset) = (%f, %f)", scale, zero_point); - float *output_tensor_f = (float *)output_tensor; + float *output_tensor_f = (float *)output_tensor->buf; for (uint32_t i = 0; i < model_tensor_size; ++i) { output_tensor_f[i] = (ot[i] - zero_point) * scale; } + +#if WASM_ENABLE_WASI_EPHEMERAL_NN != 0 + *output_tensor_size = model_tensor_size * sizeof(float); +#else + /* + * for now, maintain the bug-to-bug compatibility with the old abi, + * where the size here is the number of fp32, not bytes. + */ + *output_tensor_size = model_tensor_size; +#endif } - *output_tensor_size = model_tensor_size; return success; } diff --git a/core/iwasm/libraries/wasi-nn/src/wasi_nn_tensorflowlite.hpp b/core/iwasm/libraries/wasi-nn/src/wasi_nn_tensorflowlite.hpp index d6e04ab0e..4ded6e407 100644 --- a/core/iwasm/libraries/wasi-nn/src/wasi_nn_tensorflowlite.hpp +++ b/core/iwasm/libraries/wasi-nn/src/wasi_nn_tensorflowlite.hpp @@ -32,7 +32,7 @@ compute(void *tflite_ctx, graph_execution_context ctx); __attribute__((visibility("default"))) wasi_nn_error get_output(void *tflite_ctx, graph_execution_context ctx, uint32_t index, - tensor_data output_tensor, uint32_t *output_tensor_size); + tensor_data *output_tensor, uint32_t *output_tensor_size); __attribute__((visibility("default"))) wasi_nn_error init_backend(void **tflite_ctx); diff --git a/core/iwasm/libraries/wasi-nn/test/build.sh b/core/iwasm/libraries/wasi-nn/test/build.sh index dda400f16..14f6b9e09 100755 --- a/core/iwasm/libraries/wasi-nn/test/build.sh +++ b/core/iwasm/libraries/wasi-nn/test/build.sh @@ -3,6 +3,17 @@ # Copyright (C) 2019 Intel Corporation. All rights reserved. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +# on intel mac, this ends up with a lot of the following error. +# +# AttributeError: 'Sequential' object has no attribute '_get_save_spec'. +# +# * "pip install tensorflow" installs tensorflow 2.16.2 on intel mac. +# (because it's the last version before tf deprecated the target.) +# * keras 3 support in the version seems incomplete (thus the error) +# * a workaround: use keras 2 as mentioned in: +# https://github.com/tensorflow/tensorflow/releases/tag/v2.16.1 +# https://blog.tensorflow.org/2024/03/whats-new-in-tensorflow-216.html + CURR_PATH=$(cd $(dirname $0) && pwd -P) # WASM application that uses WASI-NN diff --git a/core/iwasm/libraries/wasi-nn/test/models/mult_outputs.py b/core/iwasm/libraries/wasi-nn/test/models/mult_outputs.py index 98a50129c..8506e1806 100755 --- a/core/iwasm/libraries/wasi-nn/test/models/mult_outputs.py +++ b/core/iwasm/libraries/wasi-nn/test/models/mult_outputs.py @@ -3,7 +3,7 @@ import tensorflow as tf import numpy as np -from keras.layers import AveragePooling2D, Conv2D +from tensorflow.keras.layers import AveragePooling2D, Conv2D from tensorflow.keras import Input, Model diff --git a/core/shared/platform/esp-idf/espidf_platform.c b/core/shared/platform/esp-idf/espidf_platform.c index d5f821d07..045c3a5f6 100644 --- a/core/shared/platform/esp-idf/espidf_platform.c +++ b/core/shared/platform/esp-idf/espidf_platform.c @@ -201,10 +201,20 @@ openat(int fd, const char *pathname, int flags, ...) int ret; char dir_path[DIR_PATH_LEN]; char *full_path; + mode_t mode = 0; + bool has_mode = false; + + if (flags & O_CREAT) { + va_list ap; + va_start(ap, flags); + mode = (mode_t)va_arg(ap, int); + va_end(ap); + has_mode = true; + } ret = fcntl(fd, F_GETPATH, dir_path); if (ret != 0) { - errno = -EINVAL; + errno = EINVAL; return -1; } @@ -214,7 +224,7 @@ openat(int fd, const char *pathname, int flags, ...) return -1; } - new_fd = open(full_path, flags); + new_fd = has_mode ? open(full_path, flags, mode) : open(full_path, flags); free(full_path); return new_fd; diff --git a/core/shared/utils/bh_vector.c b/core/shared/utils/bh_vector.c index 352ce7192..7f0c74b9b 100644 --- a/core/shared/utils/bh_vector.c +++ b/core/shared/utils/bh_vector.c @@ -35,8 +35,8 @@ extend_vector(Vector *vector, size_t length) if (length <= vector->max_elems) return true; - if (length < vector->size_elem * 3 / 2) - length = vector->size_elem * 3 / 2; + if (length < vector->max_elems * 3 / 2) + length = vector->max_elems * 3 / 2; if (!(data = alloc_vector_data(length, vector->size_elem))) { return false; @@ -194,12 +194,12 @@ bh_vector_append(Vector *vector, const void *elem_buf) goto just_return; } - /* make sure one more slot is used by the thread who allocas it */ + /* make sure one more slot is used by the thread who allocates it */ if (vector->lock) os_mutex_lock(vector->lock); if (!extend_vector(vector, vector->num_elems + 1)) { - LOG_ERROR("Append ector elem failed: extend vector failed.\n"); + LOG_ERROR("Append vector elem failed: extend vector failed.\n"); goto unlock_return; } diff --git a/doc/build_wamr.md b/doc/build_wamr.md index d8c64eb50..78e0711ec 100644 --- a/doc/build_wamr.md +++ b/doc/build_wamr.md @@ -102,6 +102,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM ### **Enable lib wasi-nn** - **WAMR_BUILD_WASI_NN**=1/0, default to disable if not set +> Note: WAMR_BUILD_WASI_NN without WAMR_BUILD_WASI_EPHEMERAL_NN is deprecated and will likely be removed in future versions of WAMR. Please consider to enable WAMR_BUILD_WASI_EPHEMERAL_NN as well. > Note: See [WASI-NN](../core/iwasm/libraries/wasi-nn) for more details. ### **Enable lib wasi-nn GPU mode** @@ -113,7 +114,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM - **WAMR_BUILD_WASI_NN_EXTERNAL_DELEGATE_PATH**=Path to the external delegate shared library (e.g. `libedgetpu.so.1.0` for Coral USB) ### **Enable lib wasi-nn with `wasi_ephemeral_nn` module support** -- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to disable if not set +- **WAMR_BUILD_WASI_EPHEMERAL_NN**=1/0, default to enable if not set ### **Disable boundary check with hardware trap** - **WAMR_DISABLE_HW_BOUND_CHECK**=1/0, default to enable if not set and supported by platform @@ -364,4 +365,4 @@ For Valgrind, begin with the following configurations and add additional ones as -DWAMR_DISABLE_HW_BOUND_CHECK=0 \ -DWAMR_DISABLE_WRITE_GS_BASE=0 #... -``` \ No newline at end of file +``` diff --git a/language-bindings/python/wamr-api/README.md b/language-bindings/python/wamr-api/README.md index b2ef1e105..58229da42 100644 --- a/language-bindings/python/wamr-api/README.md +++ b/language-bindings/python/wamr-api/README.md @@ -6,7 +6,7 @@ ### Pre-requisites #### Install requirements -Before proceeding it is necessary to make sure your Python environment is correctly configured. To do ths open a terminal session in this directory and perfom the following: +Before proceeding it is necessary to make sure your Python environment is correctly configured. To do this open a terminal session in this directory and perform the following: ```shell diff --git a/language-bindings/python/wasm-c-api/docs/design.md b/language-bindings/python/wasm-c-api/docs/design.md index a952731d2..3478ad021 100644 --- a/language-bindings/python/wasm-c-api/docs/design.md +++ b/language-bindings/python/wasm-c-api/docs/design.md @@ -353,12 +353,12 @@ writable and needs to be copied into a ctype array. #### variable arguments -A function with _variable arugments_ makes it hard to specify the required +A function with _variable arguments_ makes it hard to specify the required argument types for the function prototype. It leaves us one way to call it directly without any arguments type checking. ```python -libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_doulbe(3.14), "World!") +libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_double(3.14), "World!") ``` #### Use `c_bool` to represent `wasm_mutability_t ` @@ -373,7 +373,7 @@ libc.printf(b"Hello, an int %d, a float %f, a string %s\n", c_int(1), c_doulbe(3 ### bindgen.py -`bindge.py` is a tool to create WAMR python binding automatically. `binding.py` +`bindgen.py` is a tool to create WAMR python binding automatically. `binding.py` is generated. We should avoid modification on it. Additional helpers should go to `ffi.py`. diff --git a/product-mini/platforms/darwin/CMakeLists.txt b/product-mini/platforms/darwin/CMakeLists.txt index cd7c8bc88..8d1994281 100644 --- a/product-mini/platforms/darwin/CMakeLists.txt +++ b/product-mini/platforms/darwin/CMakeLists.txt @@ -114,6 +114,12 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS}") set (CMAKE_MACOSX_RPATH True) +# if enable wasi-nn, both wasi-nn-backends and iwasm +# need to use same WAMR (dynamic) libraries +if (WAMR_BUILD_WASI_NN EQUAL 1) + set (BUILD_SHARED_LIBS ON) +endif () + set (WAMR_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR}/../../..) include (${WAMR_ROOT_DIR}/build-scripts/runtime_lib.cmake) diff --git a/product-mini/platforms/linux-sgx/enclave-sample/App/pal_api.h b/product-mini/platforms/linux-sgx/enclave-sample/App/pal_api.h index 2db1fbb25..9b8077c04 100644 --- a/product-mini/platforms/linux-sgx/enclave-sample/App/pal_api.h +++ b/product-mini/platforms/linux-sgx/enclave-sample/App/pal_api.h @@ -79,7 +79,7 @@ struct wamr_pal_create_process_args { // Untrusted environment variable array pass to new process. // // The untrusted env vars to the command. And the last element of the array - // must be NULL to indicate the length of array. + // must be NULL to indicate the end of the array. // // Optional field. const char **env; diff --git a/tests/benchmarks/README.md b/tests/benchmarks/README.md index 2112829e0..95d85b1c0 100644 --- a/tests/benchmarks/README.md +++ b/tests/benchmarks/README.md @@ -8,6 +8,8 @@ Refer to the `README.md` under each folder for how to build and run the benchmar ## Install `llvm-profdata` +> PS: the `llvm-profdata` vesion needs to be the same major version with llvm libraries used to build wamrc. + The tool `llvm-profdata` is used when running the `test_pgo.sh` script under the benchmark folder. There are two ways to install it: 1. Refer to https://apt.llvm.org/, e.g. in Ubuntu 20.04, add lines below to /etc/apt/source.list @@ -18,19 +20,22 @@ deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal main # 15 deb http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-15 main +# 18 +deb http://apt.llvm.org/focal/ llvm-toolchain-focal-18 main +deb-src http://apt.llvm.org/focal/ llvm-toolchain-focal-18 main ``` Then run `sudo apt update`, `sudo apt install llvm`. And after installing: ```bash cd /usr/bin -sudo ln -s llvm-profdata-15 llvm-profdata +sudo ln -s llvm-profdata-18 llvm-profdata ``` 2. Build manually ```bash -git clone --depth 1 --branch release/15.x https://github.com/llvm/llvm-project.git +git clone --depth 1 --branch release/18.x https://github.com/llvm/llvm-project.git cd llvm-project mkdir build && cd build cmake ../llvm \ diff --git a/tests/benchmarks/coremark/run.sh b/tests/benchmarks/coremark/run.sh index 0d308bb68..b24486346 100755 --- a/tests/benchmarks/coremark/run.sh +++ b/tests/benchmarks/coremark/run.sh @@ -2,6 +2,7 @@ # Copyright (C) 2019 Intel Corporation. All rights reserved. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +set -e PLATFORM=$(uname -s | tr A-Z a-z) diff --git a/tests/benchmarks/coremark/test_pgo.sh b/tests/benchmarks/coremark/test_pgo.sh index 1c631312e..25bed2c45 100755 --- a/tests/benchmarks/coremark/test_pgo.sh +++ b/tests/benchmarks/coremark/test_pgo.sh @@ -2,6 +2,7 @@ # Copyright (C) 2019 Intel Corporation. All rights reserved. # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +set -e PLATFORM=$(uname -s | tr A-Z a-z) diff --git a/tests/fuzz/wasm-mutator-fuzz/server/app/main.py b/tests/fuzz/wasm-mutator-fuzz/server/app/main.py index 620625dd3..c1ee3bfc4 100644 --- a/tests/fuzz/wasm-mutator-fuzz/server/app/main.py +++ b/tests/fuzz/wasm-mutator-fuzz/server/app/main.py @@ -72,7 +72,7 @@ def to_json(inst, cls): class Fuzzing(db.Model): - __tablename__ = 'fazzing_task' + __tablename__ = 'fuzzing_task' id = db.Column(db.Integer, autoincrement=True, primary_key=True, nullable=False) repo = db.Column(db.String(200), nullable=False, default='') @@ -96,7 +96,7 @@ class TaskError(db.Model): __tablename__ = 'task_error' id = db.Column(db.Integer, autoincrement=True, primary_key=True, nullable=False) - fazzing_id = db.Column(db.Integer, db.ForeignKey("fazzing_task.id")) + fuzzing_id = db.Column(db.Integer, db.ForeignKey("fuzzing_task.id")) name = db.Column(db.String(200), nullable=False, default='') std_out = db.Column(db.Text, default='') data = db.Column(db.JSON) @@ -119,9 +119,9 @@ def to_data(data): def error_count(data): error = len(TaskError.query.filter( - TaskError.fazzing_id == data.get('id'), TaskError.status.in_([1, 2])).all()) + TaskError.fuzzing_id == data.get('id'), TaskError.status.in_([1, 2])).all()) end_error = len(TaskError.query.filter( - TaskError.fazzing_id == data.get('id'), TaskError.status == 0).all()) + TaskError.fuzzing_id == data.get('id'), TaskError.status == 0).all()) data['error'] = error data['end_error'] = end_error return data @@ -159,11 +159,11 @@ def show_fuzz_list(): id = data.get('id') if id: all_error = TaskError.query.filter( - TaskError.fazzing_id == id).with_entities(TaskError.id, TaskError.fazzing_id, + TaskError.fuzzing_id == id).with_entities(TaskError.id, TaskError.fuzzing_id, TaskError.create_time, TaskError.data, TaskError.name, TaskError.status, TaskError.update_time, TaskError.comment).order_by(TaskError.status.desc(), TaskError.update_time.desc(), TaskError.id.desc()).all() - data_message = [{'id': error['id'], "fuzzing_id": error['fazzing_id'], + data_message = [{'id': error['id'], "fuzzing_id": error['fuzzing_id'], "name": error['name'], "data": error['data'], 'create_time': error['create_time'].strftime('%Y-%m-%d %H:%M:%S'), 'update_time': error['update_time'].strftime('%Y-%m-%d %H:%M:%S'), @@ -204,7 +204,7 @@ def New_fuzzing(): # curd.set_error_status_to(list(map(lambda x: x.id, error_list)), db) # Fuzzing.query.filter_by(id=fuzz.id).delete() fuzz.data = {'error': "Clone repo Error"} - db.commit() + db.session.commit() return jsonify({"status": 0, "result": "", "msg": "Clone repo Error"}) wamr_path_parent = fuzz_dir.parent.parent @@ -277,7 +277,7 @@ def scheduler_run_task(): for fuzz in fuzz_query: all_error = TaskError.query.filter( - TaskError.fazzing_id == fuzz.id).with_entities(TaskError.name).all() + TaskError.fuzzing_id == fuzz.id).with_entities(TaskError.name).all() fuzz_cmd = wasm_mutator_dir / \ 'workspace' / f'build_{fuzz.id}' dir_list = filter(lambda x: x.startswith( @@ -287,7 +287,7 @@ def scheduler_run_task(): for dir in dir_list: cmd = f'cd {fuzz_cmd} && ./wasm_mutator_fuzz {dir}' status, resp = getstatusoutput(cmd) - task_error = TaskError(name=dir, std_out=resp, fazzing_id=fuzz.id, + task_error = TaskError(name=dir, std_out=resp, fuzzing_id=fuzz.id, create_time=datetime.utcnow() + timedelta(hours=8)) db.session.add(task_error) db.session.commit() @@ -312,7 +312,7 @@ def get_error_txt(): return jsonify({"status": 0, "results": [], 'msg': "Error"}) error = TaskError.query.get(id) fuzz_cmd = wasm_mutator_dir / \ - 'workspace' / f'build_{error.fazzing_id}' + 'workspace' / f'build_{error.fuzzing_id}' file_cmd = fuzz_cmd / error.name response = send_file(file_cmd, as_attachment=True, @@ -351,7 +351,7 @@ def get_cases_zip(): with ZipFile(memory_file, "w", ZIP_DEFLATED) as zf: for task_error in task_query: fuzz_cmd = wasm_mutator_dir / \ - 'workspace' / f'build_{task_error.fazzing_id}' + 'workspace' / f'build_{task_error.fuzzing_id}' file_cmd = fuzz_cmd / task_error.name zf.write(str(file_cmd), arcname=task_error.name) memory_file.seek(0) @@ -399,7 +399,7 @@ def error_restart(): if run_status: return jsonify({"status": 0, "results": [], 'msg': "There are already tasks in progress"}) task_query = TaskError.query.filter(TaskError.id.in_(id_list)).all() - fuzzing_id = task_query[0].fazzing_id + fuzzing_id = task_query[0].fuzzing_id fuzz_cmd = wasm_mutator_dir / \ 'workspace' / f'build_{fuzzing_id}' restart_cmd = wasm_mutator_dir / \ @@ -412,7 +412,7 @@ def error_restart(): if not Path(restart_cmd / 'wamr').exists(): print('------ error: clone repo not folder exists ------') # fuzz.data = {'error': "Clone repo Error"} - db.commit() + db.session.commit() return jsonify({"status": 0, "result": "", "msg": "Clone repo Error"}) wamr_path_parent = fuzz_dir.parent.parent wamr_path = wamr_path_parent / 'wamr' diff --git a/tests/requirement-engineering/gc-aot/build_spec_interpreter.sh b/tests/requirement-engineering/gc-aot/build_spec_interpreter.sh index 48d6343b1..0ecfe93f4 100755 --- a/tests/requirement-engineering/gc-aot/build_spec_interpreter.sh +++ b/tests/requirement-engineering/gc-aot/build_spec_interpreter.sh @@ -17,7 +17,7 @@ git apply ../../../wamr-test-suites/spec-test-script/gc_ignore_cases.patch # Set OCaml compiler environment eval $(opam config env) -echo "compile the reference intepreter" +echo "compile the reference interpreter" pushd interpreter make -popd \ No newline at end of file +popd diff --git a/tests/standalone/test-running-modes/test_c_embed_api_thoroughly.py b/tests/standalone/test-running-modes/test_c_embed_api_thoroughly.py index 63e871e4a..8dcde7e74 100755 --- a/tests/standalone/test-running-modes/test_c_embed_api_thoroughly.py +++ b/tests/standalone/test-running-modes/test_c_embed_api_thoroughly.py @@ -9,7 +9,7 @@ import os from collections import OrderedDict -def CLI_ARGS_GENREATOR(running_modes_supported: list[str]) -> list[str]: +def CLI_ARGS_GENERATOR(running_modes_supported: list[str]) -> list[str]: res = [] list_2d = [["--default-running-mode={} --module-running-mode={}".format(i, j) for i in running_modes_supported] for j in running_modes_supported] @@ -35,16 +35,16 @@ def main(): ] # Python 3.7+: Dictionary iteration order is guaranteed to be in order of insertion. - # just to be safe, using orderreddict + # just to be safe, using OrderedDict # key: value -> compile mode, {"compile_flag": CMake compile flag, "iwasm_cli_args": array of CLI args tested} test_options = OrderedDict({ - "INTERP": {"compile_flag": COMPILE_FLAGS[0], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:1])}, - "FAST_JIT": {"compile_flag": COMPILE_FLAGS[1], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:2])}, + "INTERP": {"compile_flag": COMPILE_FLAGS[0], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:1])}, + "FAST_JIT": {"compile_flag": COMPILE_FLAGS[1], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:2])}, "LLVM_JIT": {"compile_flag": COMPILE_FLAGS[2], - "cli_args": CLI_ARGS_GENREATOR([RUNNING_MODES[0], RUNNING_MODES[2]])}, - "MULTI_TIER_JIT": {"compile_flag": COMPILE_FLAGS[3], "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES)}, + "cli_args": CLI_ARGS_GENERATOR([RUNNING_MODES[0], RUNNING_MODES[2]])}, + "MULTI_TIER_JIT": {"compile_flag": COMPILE_FLAGS[3], "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES)}, "EAGER_JIT_WITH_BOTH_JIT": {"compile_flag": COMPILE_FLAGS[4], - "cli_args": CLI_ARGS_GENREATOR(RUNNING_MODES[:3])} + "cli_args": CLI_ARGS_GENERATOR(RUNNING_MODES[:3])} }) build_cmd = "./build_c_embed.sh \"{build_flag}\"" diff --git a/tests/standalone/test-running-modes/test_iwasm_thoroughly.py b/tests/standalone/test-running-modes/test_iwasm_thoroughly.py index a5af29101..3c631a6d5 100755 --- a/tests/standalone/test-running-modes/test_iwasm_thoroughly.py +++ b/tests/standalone/test-running-modes/test_iwasm_thoroughly.py @@ -29,7 +29,7 @@ def main(): ] # Python 3.7+: Dictionary iteration order is guaranteed to be in order of insertion. - # just to be safe, using orderreddict + # just to be safe, using OrderedDict # key: value -> compile mode, {"compile_flag": CMake compile flag, "iwasm_cli_args": array of CLI args tested} test_options = OrderedDict({ "INTERP": {"compile_flag": COMPILE_FLAGS[0], "iwasm_cli_args": IWASM_CLI_ARGS[:1]}, diff --git a/tests/unit/memory64/memory64_atomic_test.cc b/tests/unit/memory64/memory64_atomic_test.cc index 2f9703890..3b2f679b5 100644 --- a/tests/unit/memory64/memory64_atomic_test.cc +++ b/tests/unit/memory64/memory64_atomic_test.cc @@ -31,7 +31,7 @@ class memory64_atomic_test_suite : public testing::TestWithParam return true; fail: - if (!module) + if (module) wasm_runtime_unload(module); return false; @@ -56,11 +56,13 @@ class memory64_atomic_test_suite : public testing::TestWithParam if (exec_env) wasm_runtime_destroy_exec_env(exec_env); if (module_inst) + wasm_runtime_deinstantiate(module_inst); + if (module) wasm_runtime_unload(module); return false; } - void destory_exec_env() + void destroy_exec_env() { wasm_runtime_destroy_exec_env(exec_env); wasm_runtime_deinstantiate(module_inst); @@ -109,7 +111,7 @@ class memory64_atomic_test_suite : public testing::TestWithParam virtual void TearDown() { if (cleanup) { - destory_exec_env(); + destroy_exec_env(); wasm_runtime_destroy(); cleanup = false; } @@ -339,8 +341,8 @@ TEST_P(memory64_atomic_test_suite, atomic_opcodes_i64_rmw_cmpxchg) PUT_I64_TO_ADDR(wasm_argv + 2, 0x100F0E0D0C0B0A09); // new PUT_I64_TO_ADDR(wasm_argv + 4, 0xdeadcafebeefdead); - ASSERT_TRUE(wasm_runtime_call_wasm(exec_env, func_map["i64_atomic_rmw_cmpxchg"], - 6, wasm_argv)); + ASSERT_TRUE(wasm_runtime_call_wasm( + exec_env, func_map["i64_atomic_rmw_cmpxchg"], 6, wasm_argv)); i64 = 0x100F0E0D0C0B0A09; ASSERT_EQ(i64, GET_U64_FROM_ADDR(wasm_argv)); diff --git a/tests/unit/memory64/memory64_test.cc b/tests/unit/memory64/memory64_test.cc index af36f308c..2418d70ad 100644 --- a/tests/unit/memory64/memory64_test.cc +++ b/tests/unit/memory64/memory64_test.cc @@ -31,7 +31,7 @@ class memory64_test_suite : public testing::TestWithParam return true; fail: - if (!module) + if (module) wasm_runtime_unload(module); return false; @@ -56,11 +56,13 @@ class memory64_test_suite : public testing::TestWithParam if (exec_env) wasm_runtime_destroy_exec_env(exec_env); if (module_inst) + wasm_runtime_deinstantiate(module_inst); + if (module) wasm_runtime_unload(module); return false; } - void destory_exec_env() + void destroy_exec_env() { wasm_runtime_destroy_exec_env(exec_env); wasm_runtime_deinstantiate(module_inst); @@ -201,7 +203,7 @@ TEST_P(memory64_test_suite, memory_8GB) i64 = 0xbeefdead; ASSERT_EQ(i64, GET_U64_FROM_ADDR(wasm_argv)); - destory_exec_env(); + destroy_exec_env(); } TEST_P(memory64_test_suite, mem64_from_clang) @@ -228,7 +230,7 @@ TEST_P(memory64_test_suite, mem64_from_clang) i32 = 0x109; ASSERT_EQ(i32, wasm_argv[0]); - destory_exec_env(); + destroy_exec_env(); } INSTANTIATE_TEST_CASE_P(RunningMode, memory64_test_suite, diff --git a/tests/unit/running-modes/wasm_running_modes_test.cc b/tests/unit/running-modes/wasm_running_modes_test.cc index e18e64fb1..5f370dd64 100644 --- a/tests/unit/running-modes/wasm_running_modes_test.cc +++ b/tests/unit/running-modes/wasm_running_modes_test.cc @@ -21,7 +21,7 @@ std::string TEST_WASM1 = "/hello.wasm"; std::string TEST_WASM2 = "/mytest.wasm"; char *WASM_FILE_1; char *WASM_FILE_2; -std::vector running_mode_supportted = { Mode_Interp, +std::vector running_mode_supported = { Mode_Interp, #if WASM_ENABLE_FAST_JIT != 0 Mode_Fast_JIT, #endif @@ -76,7 +76,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam return true; fail: - if (!module) + if (module) wasm_runtime_unload(module); return false; @@ -101,11 +101,13 @@ class wasm_running_modes_test_suite : public testing::TestWithParam if (exec_env) wasm_runtime_destroy_exec_env(exec_env); if (module_inst) + wasm_runtime_deinstantiate(module_inst); + if (module) wasm_runtime_unload(module); return false; } - void destory_exec_env() + void destroy_exec_env() { wasm_runtime_destroy_exec_env(exec_env); wasm_runtime_deinstantiate(module_inst); @@ -139,7 +141,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam ASSERT_TRUE(ret); ASSERT_EQ(10, wasm_argv[0]); - destory_exec_env(); + destroy_exec_env(); } void run_wasm_complex(char *filename1, char *filename2, @@ -168,7 +170,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam ASSERT_TRUE(ret); ASSERT_EQ(10, wasm_argv[0]); - destory_exec_env(); + destroy_exec_env(); /* run wasm file 2 in running_mode */ ret = load_wasm_file(filename2); @@ -184,7 +186,7 @@ class wasm_running_modes_test_suite : public testing::TestWithParam ret = wasm_runtime_call_wasm(exec_env, main, 2, wasm_argv); ASSERT_TRUE(ret); - destory_exec_env(); + destroy_exec_env(); } public: @@ -246,7 +248,7 @@ TEST_F(wasm_running_modes_test_suite, wasm_runtime_is_running_mode_supported) // normal situation ASSERT_EQ(true, wasm_runtime_is_running_mode_supported( static_cast(Mode_Default))); - for (auto running_mode : running_mode_supportted) { + for (auto running_mode : running_mode_supported) { ASSERT_EQ(true, wasm_runtime_is_running_mode_supported(running_mode)); } @@ -264,7 +266,7 @@ TEST_F(wasm_running_modes_test_suite, wasm_runtime_set_default_running_mode) // normal situation: only set up ASSERT_EQ(true, wasm_runtime_set_default_running_mode( static_cast(Mode_Default))); - for (auto running_mode : running_mode_supportted) { + for (auto running_mode : running_mode_supported) { ASSERT_EQ(true, wasm_runtime_set_default_running_mode(running_mode)); } @@ -296,13 +298,13 @@ TEST_P(wasm_running_modes_test_suite, wasm_runtime_set_and_get_running_mode_complex) { RunningMode default_running_mode = GetParam(); - for (auto running_mode : running_mode_supportted) { + for (auto running_mode : running_mode_supported) { run_wasm_complex(WASM_FILE_1, WASM_FILE_2, default_running_mode, running_mode); } } INSTANTIATE_TEST_CASE_P(RunningMode, wasm_running_modes_test_suite, - testing::ValuesIn(running_mode_supportted)); + testing::ValuesIn(running_mode_supported)); -} \ No newline at end of file +} diff --git a/tests/wamr-test-suites/test_wamr.sh b/tests/wamr-test-suites/test_wamr.sh index 695a25332..6f498653f 100755 --- a/tests/wamr-test-suites/test_wamr.sh +++ b/tests/wamr-test-suites/test_wamr.sh @@ -367,39 +367,39 @@ function sightglass_test() function setup_wabt() { - WABT_VERSION=1.0.37 + # please sync with .github/actions/install-wasi-sdk-wabt/action.yml + case ${PLATFORM} in + cosmopolitan) + ;; + linux) + WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-ubuntu-20.04.tar.gz + WABT_VERSION=1.0.37 + ;; + darwin) + WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.36/wabt-1.0.36-macos-12.tar.gz + WABT_VERSION=1.0.36 + ;; + windows) + WABT_URL=https://github.com/WebAssembly/wabt/releases/download/1.0.37/wabt-1.0.37-windows.tar.gz + WABT_VERSION=1.0.37 + ;; + *) + echo "wabt platform for ${PLATFORM} in unknown" + exit 1 + ;; + esac + if [ ${WABT_BINARY_RELEASE} == "YES" ]; then echo "download a binary release and install" local WAT2WASM=${WORK_DIR}/wabt/out/gcc/Release/wat2wasm if [ ! -f ${WAT2WASM} ]; then - case ${PLATFORM} in - cosmopolitan) - ;; - linux) - WABT_PLATFORM=ubuntu-20.04 - ;; - darwin) - WABT_PLATFORM=macos-12 - ;; - windows) - WABT_PLATFORM=windows - ;; - *) - echo "wabt platform for ${PLATFORM} in unknown" - exit 1 - ;; - esac - if [ ! -f /tmp/wabt-${WABT_VERSION}-${WABT_PLATFORM}.tar.gz ]; then - curl -L \ - https://github.com/WebAssembly/wabt/releases/download/${WABT_VERSION}/wabt-${WABT_VERSION}-${WABT_PLATFORM}.tar.gz \ - -o /tmp/wabt-${WABT_VERSION}-${WABT_PLATFORM}.tar.gz - fi + pushd /tmp + wget -O wabt-tar.gz --progress=dot:giga ${WABT_URL} + tar xf wabt-tar.gz + popd - cd /tmp \ - && tar zxf wabt-${WABT_VERSION}-${WABT_PLATFORM}.tar.gz \ - && mkdir -p ${WORK_DIR}/wabt/out/gcc/Release/ \ - && install wabt-${WABT_VERSION}/bin/* ${WORK_DIR}/wabt/out/gcc/Release/ \ - && cd - + mkdir -p ${WORK_DIR}/wabt/out/gcc/Release/ + cp /tmp/wabt-${WABT_VERSION}/bin/* ${WORK_DIR}/wabt/out/gcc/Release/ fi else echo "download source code and compile and install" @@ -420,7 +420,7 @@ function setup_wabt() function compile_reference_interpreter() { - echo "compile the reference intepreter" + echo "compile the reference interpreter" pushd interpreter make if [ $? -ne 0 ] @@ -484,9 +484,9 @@ function spec_test() fi # As of version 1.0.36, wabt is still unable to correctly handle the GC proposal. - # + # # $ $ /opt/wabt-1.0.36/bin/wast2json --enable-all ../spec/test/core/br_if.wast - # + # # ../spec/test/core/br_if.wast:670:26: error: unexpected token "null", expected a numeric index or a name (e.g. 12 or $foo). # (func $f (param (ref null $t)) (result funcref) (local.get 0)) # @@ -553,6 +553,9 @@ function spec_test() popd echo $(pwd) + #TODO: remove it when we can assume wabt is installed + # especially for CI Or there is installation script in the project + # that we can rely on setup_wabt ln -sf ${WORK_DIR}/../spec-test-script/all.py . @@ -643,8 +646,8 @@ function spec_test() function wamr_compiler_test() { if [[ $1 != "aot" ]]; then - echo "WAMR compiler tests only support AOT mode" - exit 1 + echo "WAMR compiler tests only support AOT mode, skip $1" + return 0 fi echo "Now start WAMR compiler tests" @@ -899,51 +902,12 @@ function do_execute_in_running_mode() { local RUNNING_MODE="$1" - if [[ ${ENABLE_MULTI_MEMORY} -eq 1 ]]; then - if [[ "${RUNNING_MODE}" != "classic-interp" \ - && "${RUNNING_MODE}" != "aot" ]]; then - echo "support multi-memory in classic-interp mode and aot mode" - return 0 - fi - fi + # filter out uncompatible running mode based on targeting proposal features + # keep alpha order - if [[ ${ENABLE_MEMORY64} -eq 1 ]]; then - if [[ "${RUNNING_MODE}" != "classic-interp" \ - && "${RUNNING_MODE}" != "aot" ]]; then - echo "support memory64(wasm64) in classic-interp mode and aot mode" - return 0 - fi - fi - - if [[ ${ENABLE_MULTI_MODULE} -eq 1 ]]; then - if [[ "${RUNNING_MODE}" != "classic-interp" \ - && "${RUNNING_MODE}" != "fast-interp" \ - && "${RUNNING_MODE}" != "aot" ]]; then - echo "support multi-module in both interp modes" - return 0 - fi - fi - - if [[ ${SGX_OPT} == "--sgx" ]]; then - if [[ "${RUNNING_MODE}" != "classic-interp" \ - && "${RUNNING_MODE}" != "fast-interp" \ - && "${RUNNING_MODE}" != "aot" \ - && "${RUNNING_MODE}" != "fast-jit" ]]; then - echo "support sgx in both interp modes, fast-jit mode and aot mode" - return 0 - fi - fi - - if [[ ${ENABLE_SIMD} -eq 1 ]]; then - if [[ "${RUNNING_MODE}" != "jit" && "${RUNNING_MODE}" != "aot" && "${RUNNING_MODE}" != "fast-interp" ]]; then - echo "support simd in llvm-jit, aot and fast-interp mode" - return 0; - fi - fi - - if [[ ${TARGET} == "X86_32" ]]; then - if [[ "${RUNNING_MODE}" == "jit" || "${RUNNING_MODE}" == "fast-jit" ]]; then - echo "both llvm-jit mode and fast-jit mode do not support X86_32 target" + if [[ ${ENABLE_EH} -eq 1 ]]; then + if [[ "${RUNNING_MODE}" != "classic-interp" ]]; then + echo "support exception handling in classic-interp" return 0; fi fi @@ -958,9 +922,67 @@ function do_execute_in_running_mode() fi fi - if [[ ${ENABLE_EH} -eq 1 ]]; then + if [[ ${ENABLE_MEMORY64} -eq 1 ]]; then + if [[ "${RUNNING_MODE}" != "classic-interp" \ + && "${RUNNING_MODE}" != "aot" ]]; then + echo "support memory64(wasm64) in classic-interp mode and aot mode" + return 0 + fi + fi + + if [[ ${ENABLE_MULTI_MEMORY} -eq 1 ]]; then if [[ "${RUNNING_MODE}" != "classic-interp" ]]; then - echo "support exception handling in classic-interp" + echo "support multi-memory in classic-interp mode mode" + return 0 + fi + fi + + if [[ ${ENABLE_MULTI_MODULE} -eq 1 ]]; then + if [[ "${RUNNING_MODE}" != "classic-interp" \ + && "${RUNNING_MODE}" != "fast-interp" \ + && "${RUNNING_MODE}" != "aot" ]]; then + echo "support multi-module in both interp modes" + return 0 + fi + fi + + if [[ ${ENABLE_SIMD} -eq 1 ]]; then + if [[ "${RUNNING_MODE}" != "jit" && "${RUNNING_MODE}" != "aot" && "${RUNNING_MODE}" != "fast-interp" ]]; then + echo "support simd in llvm-jit, aot and fast-interp mode" + return 0; + fi + fi + + # filter out uncompatible running mode based on SGX support + if [[ ${SGX_OPT} == "--sgx" ]]; then + if [[ "${RUNNING_MODE}" != "classic-interp" \ + && "${RUNNING_MODE}" != "fast-interp" \ + && "${RUNNING_MODE}" != "aot" \ + && "${RUNNING_MODE}" != "fast-jit" ]]; then + echo "support sgx in both interp modes, fast-jit mode and aot mode" + return 0 + fi + fi + + # filter out uncompatible running mode based on architecture + if [[ ${TARGET} == "X86_32" ]]; then + if [[ "${RUNNING_MODE}" == "jit" || "${RUNNING_MODE}" == "fast-jit" || "${RUNNING_MODE}" == "multi-tier-jit" ]]; then + echo "both llvm-jit, fast-jit and multi-tier-jit mode do not support X86_32 target" + return 0; + fi + + if [[ ${ENABLE_MEMORY64} -eq 1 ]]; then + echo "memory64 does not support X86_32 target" + return 0; + fi + + if [[ ${ENABLE_MULTI_MEMORY} -eq 1 ]]; then + echo "multi-memory does not support X86_32 target" + return 0; + fi + + if [[ ${ENABLE_SIMD} -eq 1 ]]; then + echo "simd does not support X86_32 target" return 0; fi fi diff --git a/wamr-compiler/main.c b/wamr-compiler/main.c index 3c1c44ff7..410f4f668 100644 --- a/wamr-compiler/main.c +++ b/wamr-compiler/main.c @@ -137,9 +137,12 @@ print_help() printf(" 3 - Small code model\n"); printf(" -sgx Generate code for SGX platform (Intel Software Guard Extensions)\n"); printf(" --bounds-checks=1/0 Enable or disable the bounds checks for memory access:\n"); - printf(" by default it is disabled in all 64-bit platforms except SGX and\n"); - printf(" in these platforms runtime does bounds checks with hardware trap,\n"); - printf(" and by default it is enabled in all 32-bit platforms\n"); + printf(" This flag controls bounds checking with a software check. \n"); + printf(" On 64-bit platforms, it is disabled by default, using a hardware \n"); + printf(" trap if supported, except when SGX or memory64 is enabled,\n"); + printf(" which defaults to a software check.\n"); + printf(" On 32-bit platforms, the flag is enabled by default, using a software check\n"); + printf(" due to the lack of hardware support.\n"); printf(" CAVEAT: --bounds-checks=0 enables some optimizations\n"); printf(" which make the compiled AOT module incompatible\n"); printf(" with a runtime without the hardware bounds checks.\n"); diff --git a/wamr-wasi-extensions/CMakeLists.txt b/wamr-wasi-extensions/CMakeLists.txt new file mode 100644 index 000000000..bed9f6780 --- /dev/null +++ b/wamr-wasi-extensions/CMakeLists.txt @@ -0,0 +1,9 @@ +# Copyright (C) 2025 Midokura Japan KK. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +cmake_minimum_required (VERSION 3.14) + +project(wamr-wasi-extensions LANGUAGES C) + +add_subdirectory(nn) +add_subdirectory(socket) diff --git a/wamr-wasi-extensions/build_libs.sh b/wamr-wasi-extensions/build_libs.sh new file mode 100755 index 000000000..97f2fe104 --- /dev/null +++ b/wamr-wasi-extensions/build_libs.sh @@ -0,0 +1,15 @@ +#! /bin/sh + +# Copyright (C) 2025 Midokura Japan KK. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +set -e + +PREFIX=${1:-/tmp/wamr} +WASI_SDK=${WASI_SDK:-/opt/wasi-sdk} + +cmake -B build-lib \ +-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \ +-DCMAKE_INSTALL_PREFIX=${PREFIX} \ +. +cmake --build build-lib -t install diff --git a/wamr-wasi-extensions/build_samples.sh b/wamr-wasi-extensions/build_samples.sh new file mode 100755 index 000000000..5dfeeabf2 --- /dev/null +++ b/wamr-wasi-extensions/build_samples.sh @@ -0,0 +1,33 @@ +#! /bin/sh + +# Copyright (C) 2025 Midokura Japan KK. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +set -e + +PREFIX=${1:-/tmp/wamr} +WASI_SDK=${WASI_SDK:-/opt/wasi-sdk} + +cmake -B build-app-nn \ +-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \ +-DCMAKE_PREFIX_PATH=${PREFIX} \ +samples/nn +cmake --build build-app-nn + +cmake -B build-app-nn-cli \ +-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk.cmake \ +-DCMAKE_PREFIX_PATH=${PREFIX} \ +samples/nn-cli +cmake --build build-app-nn-cli + +cmake -B build-app-socket-nslookup \ +-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \ +-DCMAKE_PREFIX_PATH=${PREFIX} \ +samples/socket-nslookup +cmake --build build-app-socket-nslookup + +cmake -B build-app-socket-tcp-udp \ +-DCMAKE_TOOLCHAIN_FILE=${WASI_SDK}/share/cmake/wasi-sdk-pthread.cmake \ +-DCMAKE_PREFIX_PATH=${PREFIX} \ +samples/socket-tcp-udp +cmake --build build-app-socket-tcp-udp diff --git a/wamr-wasi-extensions/nn/CMakeLists.txt b/wamr-wasi-extensions/nn/CMakeLists.txt new file mode 100644 index 000000000..27c22c4b8 --- /dev/null +++ b/wamr-wasi-extensions/nn/CMakeLists.txt @@ -0,0 +1,25 @@ +# Copyright (C) 2025 Midokura Japan KK. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +add_library(wamr-wasi-nn INTERFACE) + +set(wasi_nn_header_dir ${CMAKE_CURRENT_SOURCE_DIR}/../../core/iwasm/libraries/wasi-nn/include) + +set(headers + ${wasi_nn_header_dir}/wasi_ephemeral_nn.h + ${wasi_nn_header_dir}/wasi_nn.h + ${wasi_nn_header_dir}/wasi_nn_types.h +) + +set_property(TARGET wamr-wasi-nn PROPERTY PUBLIC_HEADER ${headers}) + +target_include_directories(wamr-wasi-nn + INTERFACE + $ + $) + +install(TARGETS wamr-wasi-nn + EXPORT wamr-wasi-nn-config + PUBLIC_HEADER DESTINATION include/wamr) +install(EXPORT wamr-wasi-nn-config + DESTINATION lib/cmake/wamr-wasi-nn) diff --git a/wamr-wasi-extensions/samples/nn-cli/CMakeLists.txt b/wamr-wasi-extensions/samples/nn-cli/CMakeLists.txt new file mode 100644 index 000000000..df90b5ba4 --- /dev/null +++ b/wamr-wasi-extensions/samples/nn-cli/CMakeLists.txt @@ -0,0 +1,12 @@ +# Copyright (C) 2025 Midokura Japan KK. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +cmake_minimum_required(VERSION 3.14) + +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_STANDARD_REQUIRED YES) + +project(nn-cli LANGUAGES C) +add_executable(nn-cli main.c fileio.c map.c) +find_package(wamr-wasi-nn REQUIRED) +target_link_libraries(nn-cli wamr-wasi-nn) diff --git a/wamr-wasi-extensions/samples/nn-cli/fileio.c b/wamr-wasi-extensions/samples/nn-cli/fileio.c new file mode 100644 index 000000000..5d8163ba4 --- /dev/null +++ b/wamr-wasi-extensions/samples/nn-cli/fileio.c @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2025 Midokura Japan KK. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +/* + * modified copy-and-paste from: + * https://github.com/yamt/toywasm/blob/0eaad8cacd0cc7692946ff19b25994f106113be8/lib/fileio.c + */ + +#include + +#include +#include +#include +#include +#include + +#include "fileio.h" + +int +map_file(const char *path, void **pp, size_t *sizep) +{ + void *p; + size_t size; + ssize_t ssz; + int fd; + int ret; + + fd = open(path, O_RDONLY); + if (fd == -1) { + ret = errno; + assert(ret != 0); + return ret; + } + struct stat st; + ret = fstat(fd, &st); + if (ret == -1) { + ret = errno; + assert(ret != 0); + close(fd); + return ret; + } + size = st.st_size; + if (size > 0) { + p = malloc(size); + } + else { + /* Avoid a confusing error */ + p = malloc(1); + } + if (p == NULL) { + close(fd); + return ENOMEM; + } + ssz = read(fd, p, size); + if (ssz != size) { + ret = errno; + assert(ret != 0); + close(fd); + return ret; + } + close(fd); + *pp = p; + *sizep = size; + return 0; +} + +void +unmap_file(void *p, size_t sz) +{ + free(p); +} diff --git a/wamr-wasi-extensions/samples/nn-cli/fileio.h b/wamr-wasi-extensions/samples/nn-cli/fileio.h new file mode 100644 index 000000000..e268222bc --- /dev/null +++ b/wamr-wasi-extensions/samples/nn-cli/fileio.h @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2025 Midokura Japan KK. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +/* + * modified copy-and-paste from: + * https://github.com/yamt/toywasm/blob/0eaad8cacd0cc7692946ff19b25994f106113be8/lib/fileio.h + */ + +int +map_file(const char *filename, void **pp, size_t *szp); +void +unmap_file(void *p, size_t sz); diff --git a/wamr-wasi-extensions/samples/nn-cli/main.c b/wamr-wasi-extensions/samples/nn-cli/main.c new file mode 100644 index 000000000..0358158f3 --- /dev/null +++ b/wamr-wasi-extensions/samples/nn-cli/main.c @@ -0,0 +1,497 @@ +/* + * Copyright (C) 2025 Midokura Japan KK. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "fileio.h" +#include "map.h" + +static struct map graphs; +static struct map contexts; + +static void +load_graph(char *options) +{ + int target = wasi_ephemeral_nn_target_cpu; + int encoding = wasi_ephemeral_nn_encoding_openvino; + const char *id = "default"; + wasi_ephemeral_nn_graph_builder *builders = NULL; + size_t nbuilders = 0; + enum { + opt_id, + opt_file, + opt_encoding, + opt_target, + }; + static char *const keylistp[] = { + [opt_id] = "id", + [opt_file] = "file", + [opt_encoding] = "encoding", + [opt_target] = "target", + NULL, + }; + while (*options) { + extern char *suboptarg; + char *value; + const char *saved = options; + switch (getsubopt(&options, keylistp, &value)) { + case opt_id: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + id = value; + break; + case opt_file: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + builders = + realloc(builders, (nbuilders + 1) * sizeof(*builders)); + if (builders == NULL) { + exit(1); + } + wasi_ephemeral_nn_graph_builder *b = &builders[nbuilders++]; + int ret = map_file(value, (void *)&b->buf, (void *)&b->size); + if (ret != 0) { + fprintf(stderr, "map_file \"%s\" failed: %s\n", value, + strerror(ret)); + exit(1); + } + break; + case opt_encoding: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + encoding = atoi(value); + break; + case opt_target: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + target = atoi(value); + break; + case -1: + fprintf(stderr, "unknown subopt %s\n", saved); + exit(2); + } + } + + wasi_ephemeral_nn_error nnret; + wasi_ephemeral_nn_graph g; + nnret = wasi_ephemeral_nn_load(builders, nbuilders, encoding, target, &g); + size_t i; + for (i = 0; i < nbuilders; i++) { + wasi_ephemeral_nn_graph_builder *b = &builders[i]; + unmap_file(b->buf, b->size); + } + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "load failed with %d\n", (int)nnret); + exit(1); + } + map_set(&graphs, id, g); +} + +static void +init_execution_context(char *options) +{ + const char *id = "default"; + const char *graph_id = "default"; + enum { + opt_id, + opt_graph_id, + }; + static char *const keylistp[] = { + [opt_id] = "id", + [opt_graph_id] = "graph-id", + NULL, + }; + while (*options) { + extern char *suboptarg; + char *value; + const char *saved = options; + switch (getsubopt(&options, keylistp, &value)) { + case opt_id: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + id = value; + break; + case opt_graph_id: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + graph_id = value; + break; + case -1: + fprintf(stderr, "unknown subopt %s\n", saved); + exit(2); + } + } + + wasi_ephemeral_nn_graph g = map_get(&graphs, graph_id); + wasi_ephemeral_nn_graph_execution_context c; + wasi_ephemeral_nn_error nnret; + nnret = wasi_ephemeral_nn_init_execution_context(g, &c); + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "init_execution_context failed with %d\n", (int)nnret); + exit(1); + } + map_set(&contexts, id, c); +} + +static void +set_input(char *options) +{ + int ret; + const char *context_id = "default"; + uint32_t idx = 0; + wasi_ephemeral_nn_tensor tensor = { + .dimensions = { .buf = NULL, .size = 0, }, + .type = wasi_ephemeral_nn_type_fp32, + .data = NULL, + }; + void *buf = NULL; + size_t sz = 0; + enum { + opt_context_id, + opt_dim, + opt_type, + opt_idx, + opt_file, + }; + static char *const keylistp[] = { + [opt_context_id] = "context-id", + [opt_dim] = "dim", + [opt_type] = "type", + [opt_idx] = "idx", + [opt_file] = "file", + NULL, + }; + while (*options) { + extern char *suboptarg; + char *value; + const char *saved = options; + switch (getsubopt(&options, keylistp, &value)) { + case opt_context_id: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + context_id = value; + break; + case opt_dim: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + wasi_ephemeral_nn_tensor_dimensions *dims = &tensor.dimensions; + + dims->buf = + realloc(dims->buf, (dims->size + 1) * sizeof(*dims->buf)); + if (dims->buf == NULL) { + exit(1); + } + dims->buf[dims->size++] = atoi(value); + break; + case opt_type: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + tensor.type = atoi(value); + break; + case opt_file: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + if (buf != NULL) { + fprintf(stderr, "duplicated tensor data\n"); + exit(2); + } + ret = map_file(value, &buf, &sz); + if (ret != 0) { + fprintf(stderr, "map_file \"%s\" failed: %s\n", value, + strerror(ret)); + exit(1); + } + break; + case opt_idx: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + idx = atoi(value); + break; + case -1: + fprintf(stderr, "unknown subopt %s\n", saved); + exit(2); + } + } + + if (tensor.dimensions.size == 0) { + fprintf(stderr, "no dimension is given\n"); + exit(2); + } + if (buf == NULL) { + fprintf(stderr, "no tensor is given\n"); + exit(2); + } + + /* + * REVISIT: we can check the tensor size against type/dimensions + * and warn the user if unexpected. + */ + + wasi_ephemeral_nn_error nnret; + wasi_ephemeral_nn_graph_execution_context c = + map_get(&contexts, context_id); + tensor.data.buf = buf; + tensor.data.size = sz; + nnret = wasi_ephemeral_nn_set_input(c, idx, &tensor); + unmap_file(buf, sz); + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "set_input failed with %d\n", (int)nnret); + exit(1); + } +} + +static void +compute(char *options) +{ + const char *context_id = "default"; + enum { + opt_context_id, + }; + static char *const keylistp[] = { + [opt_context_id] = "context-id", + NULL, + }; + while (*options) { + extern char *suboptarg; + char *value; + const char *saved = options; + switch (getsubopt(&options, keylistp, &value)) { + case opt_context_id: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + context_id = value; + break; + case -1: + fprintf(stderr, "unknown subopt %s\n", saved); + exit(2); + } + } + + wasi_ephemeral_nn_graph_execution_context c = + map_get(&contexts, context_id); + wasi_ephemeral_nn_error nnret; + nnret = wasi_ephemeral_nn_compute(c); + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "compute failed with %d\n", (int)nnret); + exit(1); + } +} + +static void +get_output(char *options) +{ + int ret; + const char *outfile = NULL; + const char *context_id = "default"; + uint32_t idx = 0; + enum { + opt_context_id, + opt_idx, + opt_file, + }; + static char *const keylistp[] = { + [opt_context_id] = "context-id", + [opt_idx] = "idx", + [opt_file] = "file", + NULL, + }; + while (*options) { + extern char *suboptarg; + char *value; + const char *saved = options; + switch (getsubopt(&options, keylistp, &value)) { + case opt_context_id: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + context_id = value; + break; + case opt_file: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + outfile = value; + break; + case opt_idx: + if (value == NULL) { + fprintf(stderr, "no value for %s\n", saved); + exit(2); + } + idx = atoi(value); + break; + case -1: + fprintf(stderr, "unknown subopt %s\n", saved); + exit(2); + } + } + + int outfd = -1; + if (outfile != NULL) { + outfd = open(outfile, O_CREAT | O_TRUNC | O_WRONLY); + if (outfd == -1) { + fprintf(stderr, "failed to open output file \"%s\": %s\n", outfile, + strerror(errno)); + exit(1); + } + } + + wasi_ephemeral_nn_error nnret; + wasi_ephemeral_nn_graph_execution_context c = + map_get(&contexts, context_id); + void *resultbuf = NULL; + size_t resultbufsz = 256; + uint32_t resultsz; +retry: + resultbuf = realloc(resultbuf, resultbufsz); + if (resultbuf == NULL) { + exit(1); + } + nnret = + wasi_ephemeral_nn_get_output(c, 0, resultbuf, resultbufsz, &resultsz); + if (nnret == wasi_ephemeral_nn_error_too_large) { + resultbufsz *= 2; + goto retry; + } + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "get_output failed with %d\n", (int)nnret); + exit(1); + } + if (outfd != -1) { + ssize_t written = write(outfd, resultbuf, resultsz); + if (written == -1) { + fprintf(stderr, "failed to write: %s\n", strerror(errno)); + exit(1); + } + if (written == -1) { + fprintf(stderr, "unexpetecd write length %zu (expected %zu)\n", + written, (size_t)resultsz); + exit(1); + } + ret = close(outfd); + if (ret != 0) { + fprintf(stderr, "failed to close: %s\n", strerror(errno)); + exit(1); + } + } + else { + fprintf(stderr, "WARNING: discarding %zu bytes output\n", + (size_t)resultsz); + } +} + +enum longopt { + opt_load_graph = 0x100, + opt_init_execution_context, + opt_set_input, + opt_compute, + opt_get_output, +}; + +static const struct option longopts[] = { + { + "load-graph", + required_argument, + NULL, + opt_load_graph, + }, + { + "init-execution-context", + optional_argument, + NULL, + opt_init_execution_context, + }, + { + "set-input", + required_argument, + NULL, + opt_set_input, + }, + { + "compute", + optional_argument, + NULL, + opt_compute, + }, + { + "get-output", + optional_argument, + NULL, + opt_get_output, + }, + { + NULL, + 0, + NULL, + 0, + }, +}; + +int +main(int argc, char **argv) +{ + extern char *optarg; + int ch; + int longidx; + while ((ch = getopt_long(argc, argv, "", longopts, &longidx)) != -1) { + switch (ch) { + case opt_load_graph: + load_graph(optarg); + break; + case opt_init_execution_context: + init_execution_context(optarg ? optarg : ""); + break; + case opt_set_input: + set_input(optarg); + break; + case opt_compute: + compute(optarg ? optarg : ""); + break; + case opt_get_output: + get_output(optarg ? optarg : ""); + break; + default: + exit(2); + } + } + exit(0); +} diff --git a/wamr-wasi-extensions/samples/nn-cli/map.c b/wamr-wasi-extensions/samples/nn-cli/map.c new file mode 100644 index 000000000..3ed817242 --- /dev/null +++ b/wamr-wasi-extensions/samples/nn-cli/map.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2025 Midokura Japan KK. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include +#include +#include +#include +#include + +#include "map.h" + +static uintmax_t * +map_find_slot(struct map *m, const char *name) +{ + size_t i; + for (i = 0; i < m->nentries; i++) { + if (!strcmp(m->entries[i].k, name)) { + return &m->entries[i].v; + } + } + return NULL; +} + +static void +map_append(struct map *m, const char *k, uintmax_t v) +{ + m->entries = realloc(m->entries, (m->nentries + 1) * sizeof(*m->entries)); + if (m->entries == NULL) { + exit(1); + } + struct map_entry *e = &m->entries[m->nentries++]; + e->k = k; + e->v = v; +} + +void +map_set(struct map *m, const char *k, uintmax_t v) +{ + uintmax_t *p = map_find_slot(m, k); + if (p != NULL) { + fprintf(stderr, "duplicated id \"%s\"\n", k); + exit(1); + } + map_append(m, k, v); +} + +uintmax_t +map_get(struct map *m, const char *k) +{ + uintmax_t *p = map_find_slot(m, k); + if (p == NULL) { + fprintf(stderr, "id \"%s\" not found\n", k); + exit(1); + } + return *p; +} diff --git a/wamr-wasi-extensions/samples/nn-cli/map.h b/wamr-wasi-extensions/samples/nn-cli/map.h new file mode 100644 index 000000000..0059293c8 --- /dev/null +++ b/wamr-wasi-extensions/samples/nn-cli/map.h @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2025 Midokura Japan KK. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include + +struct map { + struct map_entry { + const char *k; + uintmax_t v; + } * entries; + size_t nentries; +}; + +void +map_set(struct map *m, const char *k, uintmax_t v); +uintmax_t +map_get(struct map *m, const char *k); diff --git a/wamr-wasi-extensions/samples/nn/CMakeLists.txt b/wamr-wasi-extensions/samples/nn/CMakeLists.txt new file mode 100644 index 000000000..59d607f64 --- /dev/null +++ b/wamr-wasi-extensions/samples/nn/CMakeLists.txt @@ -0,0 +1,13 @@ +# Copyright (C) 2025 Midokura Japan KK. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +cmake_minimum_required(VERSION 3.14) + +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_STANDARD_REQUIRED YES) +set(CMAKE_C_EXTENSIONS NO) + +project(nn-classification-openvino LANGUAGES C) +add_executable(nn-classification-openvino "app.c") +find_package(wamr-wasi-nn REQUIRED) +target_link_libraries(nn-classification-openvino wamr-wasi-nn) diff --git a/wamr-wasi-extensions/samples/nn/app.c b/wamr-wasi-extensions/samples/nn/app.c new file mode 100644 index 000000000..15f637732 --- /dev/null +++ b/wamr-wasi-extensions/samples/nn/app.c @@ -0,0 +1,173 @@ +/* + * Copyright (C) 2025 Midokura Japan KK. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +#include + +/* + * what this application does is basically same as: + * https://github.com/bytecodealliance/wasmtime/tree/efa236e58d09570baaf27865da33fb852fcf40a5/crates/wasi-nn/examples/classification-example + * + * map_file/unmap_file are copy-and-pasted from: + * https://github.com/yamt/toywasm/blob/0eaad8cacd0cc7692946ff19b25994f106113be8/lib/fileio.c + */ + +int +map_file(const char *path, void **pp, size_t *sizep) +{ + void *p; + size_t size; + ssize_t ssz; + int fd; + int ret; + + fd = open(path, O_RDONLY); + if (fd == -1) { + ret = errno; + assert(ret != 0); + return ret; + } + struct stat st; + ret = fstat(fd, &st); + if (ret == -1) { + ret = errno; + assert(ret != 0); + close(fd); + return ret; + } + size = st.st_size; + if (size > 0) { + p = malloc(size); + } + else { + /* Avoid a confusing error */ + p = malloc(1); + } + if (p == NULL) { + close(fd); + return ENOMEM; + } + ssz = read(fd, p, size); + if (ssz != size) { + ret = errno; + assert(ret != 0); + close(fd); + return ret; + } + close(fd); + *pp = p; + *sizep = size; + return 0; +} + +void +unmap_file(void *p, size_t sz) +{ + free(p); +} + +static void +print_result(const float *result, size_t sz) +{ + /* + * just dump the raw result. + * you can postprocess the output with eg. "sort -k2nr | head" + */ + int i; + for (i = 0; i < sz / sizeof(float); i++) { + printf("%d %f\n", i, result[i]); + } +} + +int +main(int argc, char **argv) +{ + wasi_ephemeral_nn_error nnret; + int ret; + void *xml; + size_t xmlsz; + ret = map_file("fixture/model.xml", &xml, &xmlsz); + if (ret != 0) { + fprintf(stderr, "failed to load fixture/model.xml: %s\n", + strerror(ret)); + exit(1); + } + void *weights; + size_t weightssz; + ret = map_file("fixture/model.bin", &weights, &weightssz); + if (ret != 0) { + fprintf(stderr, "failed to load fixture/model.bin: %s\n", + strerror(ret)); + exit(1); + } + /* note: openvino takes two buffers, namely IR and weights */ + wasi_ephemeral_nn_graph_builder builders[2] = { { + .buf = xml, + .size = xmlsz, + }, + { + .buf = weights, + .size = weightssz, + } }; + wasi_ephemeral_nn_graph g; + nnret = + wasi_ephemeral_nn_load(builders, 2, wasi_ephemeral_nn_encoding_openvino, + wasi_ephemeral_nn_target_cpu, &g); + unmap_file(xml, xmlsz); + unmap_file(weights, weightssz); + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "load failed with %d\n", (int)nnret); + exit(1); + } + wasi_ephemeral_nn_graph_execution_context ctx; + nnret = wasi_ephemeral_nn_init_execution_context(g, &ctx); + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "init_execution_context failed with %d\n", (int)nnret); + exit(1); + } + void *tensordata; + size_t tensordatasz; + ret = map_file("fixture/tensor.bgr", &tensordata, &tensordatasz); + if (ret != 0) { + fprintf(stderr, "failed to load fixture/tensor.bgr: %s\n", + strerror(ret)); + exit(1); + } + wasi_ephemeral_nn_tensor tensor = { + .dimensions = { .buf = (uint32_t[]){1, 3, 224, 224,}, .size = 4, }, + .type = wasi_ephemeral_nn_type_fp32, + .data.buf = tensordata, + .data.size = tensordatasz, + }; + nnret = wasi_ephemeral_nn_set_input(ctx, 0, &tensor); + unmap_file(tensordata, tensordatasz); + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "set_input failed with %d\n", (int)nnret); + exit(1); + } + nnret = wasi_ephemeral_nn_compute(ctx); + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "compute failed with %d\n", (int)nnret); + exit(1); + } + float result[1001]; + uint32_t resultsz; + nnret = wasi_ephemeral_nn_get_output(ctx, 0, (void *)result, sizeof(result), + &resultsz); + if (nnret != wasi_ephemeral_nn_error_success) { + fprintf(stderr, "get_output failed with %d\n", (int)nnret); + exit(1); + } + print_result(result, resultsz); +} diff --git a/wamr-wasi-extensions/samples/socket-nslookup/CMakeLists.txt b/wamr-wasi-extensions/samples/socket-nslookup/CMakeLists.txt new file mode 100644 index 000000000..3c437524a --- /dev/null +++ b/wamr-wasi-extensions/samples/socket-nslookup/CMakeLists.txt @@ -0,0 +1,11 @@ +cmake_minimum_required(VERSION 3.14) + +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_STANDARD_REQUIRED YES) +set(CMAKE_C_EXTENSIONS NO) + +project(socket-nslookup LANGUAGES C) +add_executable(socket-nslookup ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/iwasm/libraries/lib-socket/test/nslookup.c) +find_package(wamr-wasi-socket REQUIRED) +target_link_libraries(socket-nslookup wamr-wasi-socket) +target_link_options(socket-nslookup PRIVATE -Wl,--max-memory=262144) diff --git a/wamr-wasi-extensions/samples/socket-tcp-udp/CMakeLists.txt b/wamr-wasi-extensions/samples/socket-tcp-udp/CMakeLists.txt new file mode 100644 index 000000000..d166094a7 --- /dev/null +++ b/wamr-wasi-extensions/samples/socket-tcp-udp/CMakeLists.txt @@ -0,0 +1,10 @@ +cmake_minimum_required(VERSION 3.14) + +set(CMAKE_C_STANDARD 99) +set(CMAKE_C_STANDARD_REQUIRED YES) + +project(socket-tcp-udp LANGUAGES C) +add_executable(socket-tcp-udp ${CMAKE_CURRENT_SOURCE_DIR}/../../../core/iwasm/libraries/lib-socket/test/tcp_udp.c) +find_package(wamr-wasi-socket REQUIRED) +target_link_libraries(socket-tcp-udp wamr-wasi-socket) +target_link_options(socket-tcp-udp PRIVATE -Wl,--max-memory=262144) diff --git a/wamr-wasi-extensions/socket/CMakeLists.txt b/wamr-wasi-extensions/socket/CMakeLists.txt new file mode 100644 index 000000000..74bbf31c4 --- /dev/null +++ b/wamr-wasi-extensions/socket/CMakeLists.txt @@ -0,0 +1,26 @@ +set(wasi_socket_dir ${CMAKE_CURRENT_SOURCE_DIR}/../../core/iwasm/libraries/lib-socket) +set(wasi_socket_header_dir ${wasi_socket_dir}/inc) + +set(srcs ${wasi_socket_dir}/src/wasi/wasi_socket_ext.c) +set(headers + ${wasi_socket_header_dir}/wasi_socket_ext.h +) + +add_library(wamr-wasi-socket STATIC ${srcs}) +set_property(TARGET wamr-wasi-socket PROPERTY PUBLIC_HEADER ${headers}) +target_include_directories(wamr-wasi-socket + PUBLIC + $ + $) + +# as this is a library, be extra conservative about wasm features +# to improve compatibilities. as this particular library is just a +# simple static stub, extra wasm features won't benefit us much anyway. +# note that LLVM-19 enables reference-types by default. +target_compile_options(wamr-wasi-socket PRIVATE -mno-reference-types) + +install(TARGETS wamr-wasi-socket + EXPORT wamr-wasi-socket-config + PUBLIC_HEADER DESTINATION include) +install(EXPORT wamr-wasi-socket-config + DESTINATION lib/cmake/wamr-wasi-socket) diff --git a/wamr-wasi-extensions/test.sh b/wamr-wasi-extensions/test.sh new file mode 100755 index 000000000..1d366dfe9 --- /dev/null +++ b/wamr-wasi-extensions/test.sh @@ -0,0 +1,11 @@ +#! /bin/sh + +# Copyright (C) 2025 Midokura Japan KK. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +set -e + +PREFIX=${1:-/tmp/wamr} + +./build_libs.sh ${PREFIX} +./build_samples.sh ${PREFIX}