Merge pull request #2146 from bytecodealliance/main

Merge branch main into dev/exce_handling
This commit is contained in:
Wenyong Huang 2023-04-23 15:18:46 +08:00 committed by GitHub
commit dfee9c3ff3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
133 changed files with 23428 additions and 1925 deletions

View File

@ -12,7 +12,7 @@ import sys
def fetch_version_from_code():
"""
search the semantic version definition in build-scripts/config_common.cmake
search the semantic version definition in core/version.h
"""
major, minor, patch = "", "", ""
with open("core/version.h", encoding="utf-8") as f:

View File

@ -14,6 +14,10 @@ on:
description: workfing directory
type: string
required: true
llvm_cache_key:
description: the cache key of llvm libraries
type: string
required: true
runner:
description: OS of compilation
type: string
@ -33,6 +37,19 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: get cached LLVM libraries
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
./core/deps/llvm/build/bin
./core/deps/llvm/build/include
./core/deps/llvm/build/lib
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ inputs.llvm_cache_key }}
fail-on-cache-miss: true
- name: generate iwasm binary release
run: |
cmake -S . -B build \
@ -53,6 +70,7 @@ jobs:
-DWAMR_BUILD_BULK_MEMORY=1 \
-DWAMR_BUILD_LIB_PTHREAD=1 \
-DWAMR_BUILD_LIB_PTHREAD_SEMAPHORE=1 \
-DWAMR_BUILD_LIB_WASI_THREADS=1 \
-DWAMR_BUILD_LIBC_BUILTIN=1 \
-DWAMR_BUILD_LIBC_WASI=1 \
-DWAMR_BUILD_REF_TYPES=1 \

View File

@ -96,14 +96,23 @@ jobs:
cmake -S ./llvm -B build \
-G Ninja \
-DCMAKE_INSTALL_PREFIX=../wamr-lldb \
-DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;lldb" \
-DLLVM_TARGETS_TO_BUILD=X86 \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DLLVM_BUILD_BENCHMARKS:BOOL=OFF \
-DLLVM_BUILD_DOCS:BOOL=OFF -DLLVM_BUILD_EXAMPLES:BOOL=OFF \
-DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF -DLLVM_BUILD_TESTS:BOOL=OFF \
-DLLVM_ENABLE_BINDINGS:BOOL=OFF -DLLVM_INCLUDE_BENCHMARKS:BOOL=OFF \
-DLLVM_INCLUDE_DOCS:BOOL=OFF -DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \
-DLLVM_INCLUDE_TESTS:BOOL=OFF -DLLVM_ENABLE_LLD:BOOL=ON
-DCMAKE_BUILD_TYPE:STRING="Release" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DLLVM_ENABLE_PROJECTS="clang;lldb" \
-DLLVM_TARGETS_TO_BUILD:STRING="X86;WebAssembly" \
-DLLVM_BUILD_BENCHMARKS:BOOL=OFF \
-DLLVM_BUILD_DOCS:BOOL=OFF \
-DLLVM_BUILD_EXAMPLES:BOOL=OFF \
-DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF \
-DLLVM_BUILD_TESTS:BOOL=OFF \
-DLLVM_INCLUDE_BENCHMARKS:BOOL=OFF \
-DLLVM_INCLUDE_DOCS:BOOL=OFF \
-DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \
-DLLVM_INCLUDE_TESTS:BOOL=OFF \
-DLLVM_ENABLE_BINDINGS:BOOL=OFF \
-DLLVM_ENABLE_LIBXML2:BOOL=ON \
-DLLDB_ENABLE_PYTHON:BOOL=OFF \
-DLLVM_ENABLE_LLD:BOOL=ON
cmake --build build --target lldb install --parallel $(nproc)
working-directory: core/deps/llvm-project
@ -118,13 +127,20 @@ jobs:
-DCMAKE_BUILD_TYPE:STRING="Release" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON \
-DLLVM_ENABLE_PROJECTS="clang;lldb" \
-DLLVM_TARGETS_TO_BUILD:STRING="X86;WebAssembly" \
-DLLVM_BUILD_BENCHMARKS:BOOL=OFF \
-DLLVM_BUILD_DOCS:BOOL=OFF \
-DLLVM_BUILD_EXAMPLES:BOOL=OFF \
-DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF \
-DLLVM_BUILD_TESTS:BOOL=OFF \
-DLLVM_INCLUDE_BENCHMARKS:BOOL=OFF \
-DLLVM_INCLUDE_DOCS:BOOL=OFF \
-DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \
-DLLVM_INCLUDE_TESTS:BOOL=OFF \
-DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \
-DLLVM_BUILD_BENCHMARKS:BOOL=OFF \
-DLLVM_BUILD_DOCS:BOOL=OFF \
-DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF \
-DLLVM_ENABLE_BINDINGS:BOOL=OFF \
-DLLVM_TARGETS_TO_BUILD:STRING="X86;WebAssembly" \
-DLLVM_ENABLE_LIBXML2:BOOL=ON \
-DLLDB_ENABLE_PYTHON:BOOL=OFF \
-DLLDB_BUILD_FRAMEWORK:BOOL=OFF

View File

@ -24,21 +24,24 @@ jobs:
uses: actions/setup-node@v3
with:
node-version: 14.x
- name: set vscode extension to correct version
- name: set vscode extension to correct version
run: |
npm install -g json
json -I -f package.json -e "this.version=\"${{ inputs.ver_num }}\""
working-directory: test-tools/wamr-ide/VSCode-Extension
- name: generate wamr ide vscode extension
env:
credentials: ${{ secrets.TOKEN }}
run: |
npm install -g vsce
rm -rf node_modules
npm install
vsce package
working-directory: test-tools/wamr-ide/VSCode-Extension
- name: publish wamr ide vscode extension to the vsce marketplace
if: ${{ github.repository == 'bytecodealliance/wasm-micro-runtime' }}
run: |
vsce publish -p ${{ secrets.TOKEN }}
working-directory: test-tools/wamr-ide/VSCode-Extension

View File

@ -38,7 +38,7 @@ jobs:
- uses: actions/checkout@v3
- name: get cached LLVM libraries
id: cache_llvm
id: retrieve_llvm_libs
uses: actions/cache@v3
with:
path: |
@ -48,11 +48,7 @@ jobs:
./core/deps/llvm/build/libexec
./core/deps/llvm/build/share
key: ${{ inputs.llvm_cache_key }}
- name: Build llvm and clang from source
if: steps.cache_llvm.outputs.cache-hit != 'true'
run: /usr/bin/env python3 ./build_llvm.py --arch X86
working-directory: build-scripts
fail-on-cache-miss: true
- name: generate wamrc binary release
run: |

View File

@ -115,6 +115,84 @@ jobs:
cmake --build . --config Release --parallel 4
working-directory: wamr-compiler
build_iwasm_linux_gcc4_8:
runs-on: ubuntu-latest
container:
image: ubuntu:14.04
strategy:
matrix:
make_options_run_mode: [
# Running mode
$CLASSIC_INTERP_BUILD_OPTIONS,
$FAST_INTERP_BUILD_OPTIONS,
$FAST_JIT_BUILD_OPTIONS,
]
make_options_feature: [
# Features
"-DWAMR_BUILD_CUSTOM_NAME_SECTION=1",
"-DWAMR_BUILD_DEBUG_AOT=1",
"-DWAMR_BUILD_DEBUG_INTERP=1",
"-DWAMR_BUILD_DUMP_CALL_STACK=1",
"-DWAMR_BUILD_LIB_PTHREAD=1",
"-DWAMR_BUILD_LIB_WASI_THREADS=1",
"-DWAMR_BUILD_LOAD_CUSTOM_SECTION=1",
"-DWAMR_BUILD_MINI_LOADER=1",
"-DWAMR_BUILD_MEMORY_PROFILING=1",
"-DWAMR_BUILD_MULTI_MODULE=1",
"-DWAMR_BUILD_PERF_PROFILING=1",
"-DWAMR_BUILD_REF_TYPES=1",
"-DWAMR_BUILD_SIMD=1",
"-DWAMR_BUILD_TAIL_CALL=1",
"-DWAMR_DISABLE_HW_BOUND_CHECK=1",
]
exclude:
# uncompatiable feature and platform
# uncompatiable mode and feature
# MULTI_MODULE only on INTERP mode
- make_options_run_mode: $FAST_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MULTI_MODULE=1"
# SIMD only on JIT/AOT mode
- make_options_run_mode: $CLASSIC_INTERP_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_SIMD=1"
- make_options_run_mode: $FAST_INTERP_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_SIMD=1"
# DEBUG_INTERP only on CLASSIC INTERP mode
- make_options_run_mode: $FAST_INTERP_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_INTERP=1"
- make_options_run_mode: $FAST_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_INTERP=1"
# DEBUG_AOT only on JIT/AOT mode
- make_options_run_mode: $CLASSIC_INTERP_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_AOT=1"
- make_options_run_mode: $FAST_INTERP_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_AOT=1"
# TODO: DEBUG_AOT on JIT
- make_options_run_mode: $FAST_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_DEBUG_AOT=1"
# MINI_LOADER only on INTERP mode
- make_options_run_mode: $FAST_JIT_BUILD_OPTIONS
make_options_feature: "-DWAMR_BUILD_MINI_LOADER=1"
steps:
- name: checkout
uses: actions/checkout@v3
- name: Install dependencies
run: apt update && apt install -y make g++-4.8 gcc-4.8 wget git
- name: Install cmake
run: |
wget https://github.com/Kitware/CMake/releases/download/v3.26.1/cmake-3.26.1-linux-x86_64.tar.gz -O cmake.tar.gz
tar xzf cmake.tar.gz
cp cmake-3.26.1-linux-x86_64/bin/cmake /usr/local/bin
cp -r cmake-3.26.1-linux-x86_64/share/cmake-3.26/ /usr/local/share/
- name: Build iwasm
run: |
mkdir build && cd build
cmake .. ${{ matrix.make_options_run_mode }} ${{ matrix.make_options_feature }} -DCMAKE_C_COMPILER=gcc-4.8 -DCMAKE_CXX_COMPILER=g++-4.8
cmake --build . --config Release --parallel 4
working-directory: product-mini/platforms/linux
build_iwasm:
needs:
[build_llvm_libraries_on_ubuntu_2004, build_llvm_libraries_on_ubuntu_2204]
@ -138,6 +216,7 @@ jobs:
"-DWAMR_BUILD_DEBUG_INTERP=1",
"-DWAMR_BUILD_DUMP_CALL_STACK=1",
"-DWAMR_BUILD_LIB_PTHREAD=1",
"-DWAMR_BUILD_LIB_WASI_THREADS=1",
"-DWAMR_BUILD_LOAD_CUSTOM_SECTION=1",
"-DWAMR_BUILD_MINI_LOADER=1",
"-DWAMR_BUILD_MEMORY_PROFILING=1",
@ -273,7 +352,7 @@ jobs:
os: [ubuntu-20.04, ubuntu-22.04]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz",
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-20/wasi-sdk-20.0-linux.tar.gz",
]
wabt_release:
[
@ -333,13 +412,14 @@ jobs:
strategy:
matrix:
os: [ubuntu-20.04, ubuntu-22.04]
include:
- os: ubuntu-20.04
wasi_sdk_release: "https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz"
wabt_release: "https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-ubuntu.tar.gz"
- os: ubuntu-22.04
wasi_sdk_release: "https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz"
wabt_release: "https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-ubuntu.tar.gz"
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-20/wasi-sdk-20.0-linux.tar.gz",
]
wabt_release:
[
"https://github.com/WebAssembly/wabt/releases/download/1.0.31/wabt-1.0.31-ubuntu.tar.gz",
]
steps:
- name: checkout
uses: actions/checkout@v3
@ -349,7 +429,7 @@ jobs:
cd /opt
sudo wget ${{ matrix.wasi_sdk_release }}
sudo tar -xzf wasi-sdk-*.tar.gz
sudo mv wasi-sdk-19.0 wasi-sdk
sudo mv wasi-sdk-20.0 wasi-sdk
- name: download and install wabt
run: |
@ -358,22 +438,6 @@ jobs:
sudo tar -xzf wabt-1.0.31-*.tar.gz
sudo mv wabt-1.0.31 wabt
- name: build wasi-libc (needed for wasi-threads)
run: |
mkdir wasi-libc
cd wasi-libc
git init
# "Rename thread_spawn import" commit on main branch
git fetch https://github.com/WebAssembly/wasi-libc \
8f5275796a82f8ecfd0833a4f3f444fa37ed4546
git checkout FETCH_HEAD
make -j \
AR=/opt/wasi-sdk/bin/llvm-ar \
NM=/opt/wasi-sdk/bin/llvm-nm \
CC=/opt/wasi-sdk/bin/clang \
THREAD_MODEL=posix
working-directory: core/deps
- name: Build Sample [basic]
run: |
cd samples/basic
@ -431,15 +495,22 @@ jobs:
run: |
cd samples/wasi-threads
mkdir build && cd build
cmake -DWASI_SYSROOT=`pwd`/../../../core/deps/wasi-libc/sysroot ..
cmake ..
cmake --build . --config Release --parallel 4
./iwasm wasm-apps/no_pthread.wasm
test:
needs: [build_iwasm, build_llvm_libraries_on_ubuntu_2004, build_wamrc]
runs-on: ubuntu-20.04
needs:
[
build_iwasm,
build_llvm_libraries_on_ubuntu_2004,
build_llvm_libraries_on_ubuntu_2204,
build_wamrc,
]
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-20.04, ubuntu-22.04]
running_mode:
[
"classic-interp",
@ -459,10 +530,15 @@ jobs:
]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-linux.tar.gz",
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-20/wasi-sdk-20.0-linux.tar.gz",
]
llvm_cache_key:
["${{ needs.build_llvm_libraries_on_ubuntu_2004.outputs.cache_key }}"]
include:
- os: ubuntu-20.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2004.outputs.cache_key }}
ubuntu_version: "20.04"
- os: ubuntu-22.04
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
ubuntu_version: "22.04"
exclude:
# uncompatiable modes and features
# classic-interp and fast-interp don't support simd
@ -475,24 +551,16 @@ jobs:
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
# fast-jit doesn't support multi module, simd, and threads
# fast-jit doesn't support multi module, simd
- running_mode: "fast-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $SIMD_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $THREADS_TEST_OPTIONS
- running_mode: "fast-jit"
test_option: $WASI_TEST_OPTIONS
# multi-tier-jit doesn't support multi module, simd, and threads
# multi-tier-jit doesn't support multi module, simd
- running_mode: "multi-tier-jit"
test_option: $MULTI_MODULES_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $SIMD_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $THREADS_TEST_OPTIONS
- running_mode: "multi-tier-jit"
test_option: $WASI_TEST_OPTIONS
steps:
- name: checkout
uses: actions/checkout@v3
@ -503,24 +571,7 @@ jobs:
cd /opt
sudo wget ${{ matrix.wasi_sdk_release }}
sudo tar -xzf wasi-sdk-*.tar.gz
sudo mv wasi-sdk-19.0 wasi-sdk
- name: build wasi-libc (needed for wasi-threads)
if: matrix.test_option == '$WASI_TEST_OPTIONS'
run: |
mkdir wasi-libc
cd wasi-libc
git init
# "Rename thread_spawn import" commit on main branch
git fetch https://github.com/WebAssembly/wasi-libc \
8f5275796a82f8ecfd0833a4f3f444fa37ed4546
git checkout FETCH_HEAD
make \
AR=/opt/wasi-sdk/bin/llvm-ar \
NM=/opt/wasi-sdk/bin/llvm-nm \
CC=/opt/wasi-sdk/bin/clang \
THREAD_MODEL=posix
working-directory: core/deps
sudo mv wasi-sdk-20.0 wasi-sdk
- name: set env variable(if llvm are used)
if: matrix.running_mode == 'aot' || matrix.running_mode == 'jit' || matrix.running_mode == 'multi-tier-jit'
@ -557,12 +608,12 @@ jobs:
- name: Build WASI thread tests
if: matrix.test_option == '$WASI_TEST_OPTIONS'
run: WASI_SYSROOT=../../../../../core/deps/wasi-libc/sysroot bash build.sh
run: bash build.sh
working-directory: ./core/iwasm/libraries/lib-wasi-threads/test/
- name: build socket api tests
if: matrix.test_option == '$WASI_TEST_OPTIONS'
run: WASI_SYSROOT=../../../../../core/deps/wasi-libc/sysroot bash build.sh
run: bash build.sh
working-directory: ./core/iwasm/libraries/lib-socket/test/
- name: run tests
@ -577,7 +628,7 @@ jobs:
# Add another apt repository as some packages cannot
# be downloaded with the github default repository
sudo curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo tee /etc/apt/trusted.gpg.d/microsoft.asc &&
sudo apt-add-repository https://packages.microsoft.com/ubuntu/20.04/prod &&
sudo apt-add-repository https://packages.microsoft.com/ubuntu/${{ matrix.ubuntu_version }}/prod &&
sudo apt-get update &&
sudo apt install -y g++-multilib lib32gcc-9-dev

View File

@ -115,6 +115,7 @@ jobs:
"-DWAMR_BUILD_DEBUG_INTERP=1",
"-DWAMR_BUILD_DUMP_CALL_STACK=1",
"-DWAMR_BUILD_LIB_PTHREAD=1",
"-DWAMR_BUILD_LIB_WASI_THREADS=1",
"-DWAMR_BUILD_LOAD_CUSTOM_SECTION=1",
"-DWAMR_BUILD_MINI_LOADER=1",
"-DWAMR_BUILD_MEMORY_PROFILING=1",
@ -217,7 +218,7 @@ jobs:
os: [macos-latest]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-macos.tar.gz",
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-20/wasi-sdk-20.0-macos.tar.gz",
]
wabt_release:
[
@ -249,7 +250,7 @@ jobs:
os: [macos-latest]
wasi_sdk_release:
[
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-19/wasi-sdk-19.0-macos.tar.gz",
"https://github.com/WebAssembly/wasi-sdk/releases/download/wasi-sdk-20/wasi-sdk-20.0-macos.tar.gz",
]
wabt_release:
[
@ -264,7 +265,7 @@ jobs:
cd /opt
sudo wget ${{ matrix.wasi_sdk_release }}
sudo tar -xzf wasi-sdk-*.tar.gz
sudo mv wasi-sdk-19.0 wasi-sdk
sudo mv wasi-sdk-20.0 wasi-sdk
- name: download and install wabt
run: |
@ -273,22 +274,6 @@ jobs:
sudo tar -xzf wabt-1.0.31-*.tar.gz
sudo mv wabt-1.0.31 wabt
- name: build wasi-libc (needed for wasi-threads)
run: |
mkdir wasi-libc
cd wasi-libc
git init
# "Rename thread_spawn import" commit on main branch
git fetch https://github.com/WebAssembly/wasi-libc \
8f5275796a82f8ecfd0833a4f3f444fa37ed4546
git checkout FETCH_HEAD
make \
AR=/opt/wasi-sdk/bin/llvm-ar \
NM=/opt/wasi-sdk/bin/llvm-nm \
CC=/opt/wasi-sdk/bin/clang \
THREAD_MODEL=posix
working-directory: core/deps
- name: Build Sample [basic]
run: |
cd samples/basic
@ -339,6 +324,6 @@ jobs:
run: |
cd samples/wasi-threads
mkdir build && cd build
cmake -DWASI_SYSROOT=`pwd`/../../../core/deps/wasi-libc/sysroot ..
cmake ..
cmake --build . --config Release --parallel 4
./iwasm wasm-apps/no_pthread.wasm

View File

@ -80,6 +80,7 @@ jobs:
# "-DWAMR_BUILD_DEBUG_INTERP=1",
"-DWAMR_BUILD_DUMP_CALL_STACK=1",
"-DWAMR_BUILD_LIB_PTHREAD=1",
"-DWAMR_BUILD_LIB_WASI_THREADS=1",
"-DWAMR_BUILD_LOAD_CUSTOM_SECTION=1",
"-DWAMR_BUILD_MINI_LOADER=1",
"-DWAMR_BUILD_MEMORY_PROFILING=1",
@ -265,9 +266,9 @@ jobs:
mkdir wasi-libc
cd wasi-libc
git init
# "Rename thread_spawn import" commit on main branch
# "Fix a_store operation in atomic.h" commit on main branch
git fetch https://github.com/WebAssembly/wasi-libc \
8f5275796a82f8ecfd0833a4f3f444fa37ed4546
1dfe5c302d1c5ab621f7abf04620fae92700fd22
git checkout FETCH_HEAD
make \
AR=/opt/wasi-sdk/bin/llvm-ar \

View File

@ -48,6 +48,20 @@ concurrency:
jobs:
build:
runs-on: windows-latest
strategy:
matrix:
build_options: [
"-DWAMR_BUILD_AOT=1 -DWAMR_BUILD_INTERP=0",
"-DWAMR_BUILD_AOT=0",
"-DWAMR_BUILD_TAIL_CALL=1",
"-DWAMR_BUILD_CUSTOM_NAME_SECTION=1",
"-DWAMR_DISABLE_HW_BOUND_CHECK=1",
"-DWAMR_BUILD_REF_TYPES=1",
"-DWAMR_BUILD_SIMD=1",
"-DWAMR_BUILD_DEBUG_INTERP=1",
"-DWAMR_BUILD_LIB_PTHREAD=1",
"-DWAMR_BUILD_LIB_WASI_THREADS=1"
]
steps:
- uses: actions/checkout@v3
@ -55,80 +69,9 @@ jobs:
run: |
cd core/deps
git clone https://github.com/nodejs/uvwasi.git
- name: Build iwasm [default]
- name: Build iwasm
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake ..
cmake .. ${{ matrix.build_options }}
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [aot only]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_AOT=1 -DWAMR_BUILD_INTERP=0
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [interp only]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_AOT=0
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [tail call]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_TAIL_CALL=1
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [custom name section]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_CUSTOM_NAME_SECTION=1
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [disable hardware boundary check]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_DISABLE_HW_BOUND_CHECK=1
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [reference types]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_REF_TYPES=1
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [128-bit SIMD]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_SIMD=1
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [source debugger]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_DEBUG_INTERP=1
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [lib pthread]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_LIB_PTHREAD=1
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build
- name: Build iwasm [lib wasi-thread]
run: |
cd product-mini/platforms/windows
mkdir build && cd build
cmake .. -DWAMR_BUILD_LIB_WASI_THREADS=1
cmake --build . --config Release --parallel 4
cd .. && rm -force -r build

View File

@ -52,7 +52,7 @@ jobs:
#
#
if [[ -z ${new_ver} ]]; then
echo "::error::please indicate the right semantic version in build-scripts/config_common.cmake"
echo "::error::please indicate the right semantic version in core/version.h"
echo "new_ver=''" >> "$GITHUB_OUTPUT"
echo "new_tag=''" >> "$GITHUB_OUTPUT"
exit 1

View File

@ -49,36 +49,56 @@ jobs:
draft: false
body: ${{ env.RELEASE_NOTE }}
#
# LLVM_LIBRARIES
build_llvm_libraries_on_ubuntu_2004:
needs: [create_tag, create_release]
uses: ./.github/workflows/build_llvm_libraries.yml
with:
os: "ubuntu-20.04"
arch: "X86"
build_llvm_libraries_on_ubuntu_2204:
needs: [create_tag, create_release]
uses: ./.github/workflows/build_llvm_libraries.yml
with:
os: "ubuntu-22.04"
arch: "X86"
build_llvm_libraries_on_macos:
needs: [create_tag, create_release]
uses: ./.github/workflows/build_llvm_libraries.yml
with:
os: "macos-latest"
arch: "X86"
#
# WAMRC
release_wamrc_on_ubuntu_2004:
needs: [create_tag, create_release]
needs: [create_tag, create_release, build_llvm_libraries_on_ubuntu_2004]
uses: ./.github/workflows/build_wamrc.yml
with:
# can't take an env variable here
llvm_cache_key: ubuntu-20.04-build-llvm_libraries_ex
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2004.outputs.cache_key }}
release: true
runner: ubuntu-20.04
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver}}
release_wamrc_on_ubuntu_2204:
needs: [create_tag, create_release]
needs: [create_tag, create_release, build_llvm_libraries_on_ubuntu_2204 ]
uses: ./.github/workflows/build_wamrc.yml
with:
# can't take an env variable here
llvm_cache_key: ubuntu-22.04-build-llvm_libraries_ex
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
release: true
runner: ubuntu-22.04
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver }}
release_wamrc_on_ubuntu_macos:
needs: [create_tag, create_release]
needs: [create_tag, create_release, build_llvm_libraries_on_macos]
uses: ./.github/workflows/build_wamrc.yml
with:
# can't take an env variable here
llvm_cache_key: macos-latest-build-llvm_libraries_ex
llvm_cache_key: ${{ needs.build_llvm_libraries_on_macos.outputs.cache_key }}
release: true
runner: macos-latest
upload_url: ${{ needs.create_release.outputs.upload_url }}
@ -87,28 +107,31 @@ jobs:
#
# IWASM
release_iwasm_on_ubuntu_2004:
needs: [create_tag, create_release]
needs: [create_tag, create_release, build_llvm_libraries_on_ubuntu_2004]
uses: ./.github/workflows/build_iwasm_release.yml
with:
cwd: product-mini/platforms/linux
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2004.outputs.cache_key }}
runner: ubuntu-20.04
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver}}
release_iwasm_on_ubuntu_2204:
needs: [create_tag, create_release]
needs: [create_tag, create_release, build_llvm_libraries_on_ubuntu_2204]
uses: ./.github/workflows/build_iwasm_release.yml
with:
cwd: product-mini/platforms/linux
llvm_cache_key: ${{ needs.build_llvm_libraries_on_ubuntu_2204.outputs.cache_key }}
runner: ubuntu-22.04
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver}}
release_iwasm_on_macos:
needs: [create_tag, create_release]
needs: [create_tag, create_release, build_llvm_libraries_on_macos]
uses: ./.github/workflows/build_iwasm_release.yml
with:
cwd: product-mini/platforms/darwin
llvm_cache_key: ${{ needs.build_llvm_libraries_on_macos.outputs.cache_key }}
runner: macos-latest
upload_url: ${{ needs.create_release.outputs.upload_url }}
ver_num: ${{ needs.create_tag.outputs.new_ver}}

View File

@ -101,7 +101,7 @@ jobs:
- name: Enable WAMR for NuttX
run: |
find nuttx/boards -name defconfig | xargs sed -i '$a\CONFIG_INTERPRETERS_WAMR=y\nCONFIG_INTERPRETERS_WAMR_AOT=y\nCONFIG_INTERPRETERS_WAMR_FAST=y\nCONFIG_INTERPRETERS_WAMR_LOG=y\nCONFIG_INTERPRETERS_WAMR_LIBC_BUILTIN=y\nCONFIG_INTERPRETERS_WAMR_REF_TYPES=y\nCONFIG_INTERPRETERS_WAMR_ENABLE_SPEC_TEST=y\nCONFIG_INTERPRETERS_WAMR_SHARED_MEMORY=y\nCONFIG_INTERPRETERS_WAMR_BULK_MEMORY=y\n'
find nuttx/boards -name defconfig | xargs sed -i '$a\CONFIG_INTERPRETERS_WAMR=y\nCONFIG_INTERPRETERS_WAMR_STACKSIZE=32768\nCONFIG_INTERPRETERS_WAMR_AOT=y\nCONFIG_INTERPRETERS_WAMR_FAST=y\nCONFIG_INTERPRETERS_WAMR_LOG=y\nCONFIG_INTERPRETERS_WAMR_LIBC_BUILTIN=y\nCONFIG_INTERPRETERS_WAMR_REF_TYPES=y\nCONFIG_INTERPRETERS_WAMR_ENABLE_SPEC_TEST=y\nCONFIG_INTERPRETERS_WAMR_SHARED_MEMORY=y\nCONFIG_INTERPRETERS_WAMR_BULK_MEMORY=y\n'
find nuttx/boards -name defconfig | xargs sed -i '$a\CONFIG_EOL_IS_LF=y\nCONFIG_ARM_SEMIHOSTING_HOSTFS=y\nCONFIG_ARM_SEMIHOSTING_HOSTFS_CACHE_COHERENCE=y\nCONFIG_RISCV_SEMIHOSTING_HOSTFS=y\nCONFIG_FS_HOSTFS=y\nCONFIG_LIBC_FLOATINGPOINT=y\n'
- name: Build wamrc

172
README.md
View File

@ -1,5 +1,5 @@
WebAssembly Micro Runtime
=========================
# WebAssembly Micro Runtime
**A [Bytecode Alliance][BA] project**
@ -7,51 +7,37 @@ WebAssembly Micro Runtime
**[Guide](https://wamr.gitbook.io/)**  **[Website](https://bytecodealliance.github.io/wamr.dev)**  **[Chat](https://bytecodealliance.zulipchat.com/#narrow/stream/290350-wamr)**
[Build WAMR](./doc/build_wamr.md) | [Build AOT Compiler](./README.md#build-wamrc-aot-compiler) | [Embed WAMR](./doc/embed_wamr.md) | [Export Native API](./doc/export_native_api.md) | [Build WASM Apps](./doc/build_wasm_app.md) | [Samples](./README.md#samples)
[Build WAMR](./doc/build_wamr.md) | [Build AOT Compiler](./wamr-compiler/README.md) | [Embed WAMR](./doc/embed_wamr.md) | [Export Native API](./doc/export_native_api.md) | [Build Wasm Apps](./doc/build_wasm_app.md) | [Samples](./README.md#samples)
WebAssembly Micro Runtime (WAMR) is a lightweight standalone WebAssembly (WASM) runtime with small footprint, high performance and highly configurable features for applications cross from embedded, IoT, edge to Trusted Execution Environment (TEE), smart contract, cloud native and so on. It includes a few parts as below:
- The [**"iwasm" VM core**](./README.md#iwasm-vm-core) to run WASM applications, supporting interpreter mode, AOT mode (Ahead-of-Time compilation) and JIT modes (Just-in-Time compilation, LLVM JIT and Fast JIT are supported)
WebAssembly Micro Runtime (WAMR) is a lightweight standalone WebAssembly (Wasm) runtime with small footprint, high performance and highly configurable features for applications cross from embedded, IoT, edge to Trusted Execution Environment (TEE), smart contract, cloud native and so on. It includes a few parts as below:
- [**VMcore**](./core/iwasm/): A set of runtime libraries for loading and running Wasm modules. It supports several execution modes including interpreter, Ahead-of-Time compilation(AoT) and Just-in-Time compilation (JIT). The WAMR supports two JIT tiers - Fast JIT, LLVM JIT, and dynamic tier-up from Fast JIT to LLVM JIT.
- [**iwasm**](./product-mini/): The executable binary built with WAMR VMcore supports WASI and command line interface.
- [**wamrc**](./wamr-compiler/): The AOT compiler to compile Wasm file into AOT file
- Useful components and tools for building real solutions with WAMR vmcore:
- [App-framework](./core/app-framework/README.md): A framework for supporting APIs for the Wasm applications
- [App-manager](./core/app-mgr/README.md): a framework for dynamical loading the Wasm module remotely
- [WAMR-IDE](./test-tools/wamr-ide): An experimental VSCode extension for developping WebAssembly applications with C/C++
- The [**"wamrc" AOT compiler**](./README.md#build-wamrc-aot-compiler) to compile WASM file into AOT file for best performance and smaller runtime footprint, which is run by "iwasm" VM Core
- The [**application framework**](./README.md#application-framework) and the supporting APIs for the WASM applications
- The [**dynamic management**](./README.md#remote-application-management) of the WASM applications
Getting started
==================
- [Build iwasm VM core](./doc/build_wamr.md) on [Linux](./doc/build_wamr.md#linux), [SGX](./doc/linux_sgx.md), [MacOS](./doc/build_wamr.md#macos) and [Windows](./doc/build_wamr.md#windows), and [Build wamrc AOT compiler](./README.md#build-wamrc-aot-compiler)
- [Embed WAMR into host applications](./doc/embed_wamr.md)
- [Embed into C/C++](./doc/embed_wamr.md), [Embed into Python](./language-bindings/python), [Embed into Go](./language-bindings/go)
- [Register native APIs for WASM applications](./doc/export_native_api.md)
- [Build WASM applications](./doc/build_wasm_app.md)
- [Port WAMR to a new platform](./doc/port_wamr.md)
- [Benchmarks](./tests/benchmarks) and [Samples](./samples)
- [VS Code development container](./doc/devcontainer.md)
iwasm VM core
=========================
### Key features
- Full compliant to the W3C WASM MVP
- Full compliant to the W3C Wasm MVP
- Small runtime binary size (~85K for interpreter and ~50K for AOT) and low memory usage
- Near to native speed by AOT and JIT
- Self-implemented AOT module loader to enable AOT working on Linux, Windows, MacOS, Android, SGX and MCU systems
- Choices of WASM application libc support: the built-in libc subset for the embedded environment or [WASI](https://github.com/WebAssembly/WASI) for the standard libc
- Choices of Wasm application libc support: the built-in libc subset for the embedded environment or [WASI](https://github.com/WebAssembly/WASI) for the standard libc
- [The simple C APIs to embed WAMR into host environment](./doc/embed_wamr.md), see [how to integrate WAMR](./doc/embed_wamr.md) and the [API list](./core/iwasm/include/wasm_export.h)
- [The mechanism to export native APIs to WASM applications](./doc/export_native_api.md), see [how to register native APIs](./doc/export_native_api.md)
- [The mechanism to export native APIs to Wasm applications](./doc/export_native_api.md), see [how to register native APIs](./doc/export_native_api.md)
- [Multiple modules as dependencies](./doc/multi_module.md), ref to [document](./doc/multi_module.md) and [sample](samples/multi-module)
- [Multi-thread, pthread APIs and thread management](./doc/pthread_library.md), ref to [document](./doc/pthread_library.md) and [sample](samples/multi-thread)
- [wasi-threads](./doc/pthread_impls.md#wasi-threads-new), ref to [document](./doc/pthread_impls.md#wasi-threads-new) and [sample](samples/wasi-threads)
- [Linux SGX (Intel Software Guard Extension) support](./doc/linux_sgx.md), ref to [document](./doc/linux_sgx.md)
- [Source debugging support](./doc/source_debugging.md), ref to [document](./doc/source_debugging.md)
- [WAMR-IDE (Experimental)](./test-tools/wamr-ide) to develop WebAssembly applications with build, run and debug support, ref to [document](./test-tools/wamr-ide)
- [XIP (Execution In Place) support](./doc/xip.md), ref to [document](./doc/xip.md)
- [Berkeley/Posix Socket support](./doc/socket_api.md), ref to [document](./doc/socket_api.md) and [sample](./samples/socket-api)
- [Multi-tier JIT](./product-mini#linux) and [Running mode control](https://bytecodealliance.github.io/wamr.dev/blog/introduction-to-wamr-running-modes/)
- Language bindings: [Go](./language-bindings/go/README.md), [Python](./language-bindings/python/README.md)
### WASM post-MVP features
### Wasm post-MVP features
- [wasm-c-api](https://github.com/WebAssembly/wasm-c-api), ref to [document](doc/wasm_c_api.md) and [sample](samples/wasm-c-api)
- [128-bit SIMD](https://github.com/WebAssembly/simd), ref to [samples/workload](samples/workload)
- [Reference Types](https://github.com/WebAssembly/reference-types), ref to [document](doc/ref_types.md) and [sample](samples/ref-types)
@ -60,111 +46,40 @@ iwasm VM core
- [Multi-value](https://github.com/WebAssembly/multi-value), [Tail-call](https://github.com/WebAssembly/tail-call), [Shared memory](https://github.com/WebAssembly/threads/blob/main/proposals/threads/Overview.md#shared-linear-memory)
### Supported architectures and platforms
The iwasm supports the following architectures:
The WAMR VMcore supports the following architectures:
- X86-64, X86-32
- ARM, THUMB (ARMV7 Cortex-M7 and Cortex-A15 are tested)
- AArch64 (Cortex-A57 and Cortex-A53 are tested)
- RISCV64, RISCV32 (RISC-V LP64 and RISC-V LP64D are tested)
- XTENSA, MIPS, ARC
The following platforms are supported, click each link below for how to build iwasm on that platform. Refer to [WAMR porting guide](./doc/port_wamr.md) for how to port WAMR to a new platform.
The following platforms are supported, click each link below for how to build iwasm on that platform. Refer to [WAMR porting guide](./doc/port_wamr.md) for how to port WAMR to a new platform.
- [Linux](./product-mini/README.md#linux), [Linux SGX (Intel Software Guard Extension)](./doc/linux_sgx.md), [MacOS](./product-mini/README.md#macos), [Android](./product-mini/README.md#android), [Windows](./product-mini/README.md#windows), [Windows (MinGW)](./product-mini/README.md#mingw)
- [Zephyr](./product-mini/README.md#zephyr), [AliOS-Things](./product-mini/README.md#alios-things), [VxWorks](./product-mini/README.md#vxworks), [NuttX](./product-mini/README.md#nuttx), [RT-Thread](./product-mini/README.md#RT-Thread), [ESP-IDF](./product-mini/README.md#esp-idf)
- [Linux](./doc/build_wamr.md#linux), [Linux SGX (Intel Software Guard Extension)](./doc/linux_sgx.md), [MacOS](./doc/build_wamr.md#macos), [Android](./doc/build_wamr.md#android), [Windows](./doc/build_wamr.md#windows), [Windows (MinGW)](./doc/build_wamr.md#mingw)
- [Zephyr](./doc/build_wamr.md#zephyr), [AliOS-Things](./doc/build_wamr.md#alios-things), [VxWorks](./doc/build_wamr.md#vxworks), [NuttX](./doc/build_wamr.md#nuttx), [RT-Thread](./doc/build_wamr.md#RT-Thread), [ESP-IDF](./doc/build_wamr.md#esp-idf)
### Build iwasm VM core (mini product)
## Getting started
- [Build VM core](./doc/build_wamr.md) and [Build wamrc AOT compiler](./wamr-compiler/README.md)
- [Build iwasm (mini product)](./product-mini/README.md): [Linux](./product-mini/README.md#linux), [SGX](./doc/linux_sgx.md), [MacOS](./product-mini/README.md#macos) and [Windows](./product-mini/README.md#windows)
- [Embed into C/C++](./doc/embed_wamr.md), [Embed into Python](./language-bindings/python), [Embed into Go](./language-bindings/go)
- [Register native APIs for Wasm applications](./doc/export_native_api.md)
- [Build wamrc AOT compiler](./wamr-compiler/README.md)
- [Build Wasm applications](./doc/build_wasm_app.md)
- [Port WAMR to a new platform](./doc/port_wamr.md)
- [VS Code development container](./doc/devcontainer.md)
- [Samples](./samples) and [Benchmarks](./tests/benchmarks)
WAMR supports building the iwasm VM core only (no app framework) to the mini product. The WAMR mini product takes the WASM application file name or AOT file name as input and then executes it. For the detailed procedure, please see **[build WAMR VM core](./doc/build_wamr.md)** and **[build and run WASM application](./doc/build_wasm_app.md)**. Also we can click the link of each platform above to see how to build iwasm on it.
### Build wamrc AOT compiler
Both wasm binary file and AOT file are supported by iwasm. The wamrc AOT compiler is to compile wasm binary file to AOT file which can also be run by iwasm. Execute following commands to build **wamrc** compiler for Linux:
```shell
cd wamr-compiler
./build_llvm.sh (or "./build_llvm_xtensa.sh" to support xtensa target)
mkdir build && cd build
cmake .. (or "cmake .. -DWAMR_BUILD_PLATFORM=darwin" for MacOS)
make
# wamrc is generated under current directory
```
For **Windows**
```shell
cd wamr-compiler
python build_llvm.py
mkdir build && cd build
cmake ..
cmake --build . --config Release
# wamrc.exe is generated under .\Release directory
```
### Performance and Footprint
- [Performance and footprint data](https://github.com/bytecodealliance/wasm-micro-runtime/wiki/Performance): checkout [here](https://github.com/bytecodealliance/wasm-micro-runtime/wiki/Performance) for the performance and footprint data
- [Memory usage tunning](./doc/memory_tune.md): checkout [here](./doc/memory_tune.md) for the memory model and how to tune the memory usage
- [Memory usage profiling](./doc/build_wamr.md#enable-memory-profiling-experiment): checkout [here](./doc/build_wamr.md#enable-memory-profiling-experiment) for how to profile the memory usage
### Performance and memory
- [Blog: The WAMR memory model](https://bytecodealliance.github.io/wamr.dev/blog/the-wamr-memory-model/)
- [Blog: Understand WAMR heaps](https://bytecodealliance.github.io/wamr.dev/blog/understand-the-wamr-heaps/) and [stacks](https://bytecodealliance.github.io/wamr.dev/blog/understand-the-wamr-stacks/)
- [Blog: Introduction to WAMR running modes](https://bytecodealliance.github.io/wamr.dev/blog/introduction-to-wamr-running-modes/)
- [Memory usage tunning](./doc/memory_tune.md): the memory model and how to tune the memory usage
- [Memory usage profiling](./doc/build_wamr.md#enable-memory-profiling-experiment): how to profile the memory usage
- [Benchmarks](./tests/benchmarks): checkout these links for how to run the benchmarks: [PolyBench](./tests/benchmarks/polybench), [CoreMark](./tests/benchmarks/coremark), [Sightglass](./tests/benchmarks/sightglass), [JetStream2](./tests/benchmarks/jetstream)
- [Performance and footprint data](https://github.com/bytecodealliance/wasm-micro-runtime/wiki/Performance): the performance and footprint data
### User cases
WAMR is widely used in a lot areas, here are some cases:
- [Hyperledger Private Data Objects](https://github.com/hyperledger-labs/private-data-objects/blob/main/common/interpreter/wawaka_wasm/README.md)
- [Inclavare Containers](https://github.com/alibaba/inclavare-containers)
- [Fassm](https://github.com/faasm/faasm)
- [Waft](https://developer.aliyun.com/article/787582)
- [Envoy Proxy](https://github.com/envoyproxy/envoy)
- [Apache Teaclave](https://teaclave.apache.org/docs/executing-wasm)
Application framework
===================================
By using the iwasm VM core, we are flexible to build different application frameworks for the specific domains, although it would take quite some effort.
The WAMR has offered a comprehensive framework for programming WASM applications for device and IoT usages. The framework supports running multiple applications, that are based on the event driven programming model. Here are the supporting API sets by the [WAMR application framework library](./doc/wamr_api.md) :
- Timer, Inter-app communication (request/response and pub/sub), Sensor, Connectivity and data transmission, 2D graphic UI
Browse the folder [core/app-framework](./core/app-framework) for how to extend the application framework.
# Remote application management
The WAMR application manager supports [remote application management](./core/app-mgr) from the host environment or the cloud through any physical communications such as TCP, UPD, UART, BLE, etc. Its modular design makes it able to support application management for different managed runtimes.
The tool [host_tool](./test-tools/host-tool) communicates to the WAMR app manager for installing/uninstalling the WASM applications on companion chip from the host system. And the [IoT App Store Demo](./test-tools/IoT-APP-Store-Demo/) shows the conception of remotely managing the device applications from the cloud.
WAMR SDK
==========
Usually there are two tasks for integrating the WAMR into a particular project:
- Select what WAMR components (vmcore, libc, app-mgr, app-framework components) to be integrated, and get the associated source files added into the project building configuration
- Generate the APP SDK for developing the WASM apps on the selected libc and framework components
The **[WAMR SDK](./wamr-sdk)** tools is helpful to finish the two tasks quickly. It supports menu configuration for selecting WAMR components and builds the WAMR to a SDK package that includes **runtime SDK** and **APP SDK**. The runtime SDK is used for building the native application and the APP SDK should be shipped to WASM application developers.
Samples
=================
The WAMR [samples](./samples) integrate the iwasm VM core, application manager and selected application framework components.
- [**basic**](./samples/basic): Demonstrating how to use runtime exposed API's to call WASM functions, how to register native functions and call them, and how to call WASM function from native function.
- **[simple](./samples/simple/README.md)**: The runtime is integrated with most of the WAMR APP libraries, and a few WASM applications are provided for testing the WAMR APP API set. It uses **built-in libc** and executes apps in **interpreter** mode by default.
- **[file](./samples/file/README.md)**: Demonstrating the supported file interaction API of WASI. This sample can also demonstrate the SGX IPFS (Intel Protected File System), enabling an enclave to seal and unseal data at rest.
- **[littlevgl](./samples/littlevgl/README.md)**: Demonstrating the graphic user interface application usage on WAMR. The whole [LVGL](https://github.com/lvgl/lvgl) 2D user graphic library and the UI application are built into WASM application. It uses **WASI libc** and executes apps in **AOT mode** by default.
- **[gui](./samples/gui/README.md)**: Move the [LVGL](https://github.com/lvgl/lvgl) library into the runtime and define a WASM application interface by wrapping the littlevgl API. It uses **WASI libc** and executes apps in **interpreter** mode by default.
- **[multi-thread](./samples/multi-thread/)**: Demonstrating how to run wasm application which creates multiple threads to execute wasm functions concurrently, and uses mutex/cond by calling pthread related API's.
- **[spawn-thread](./samples/spawn-thread)**: Demonstrating how to execute wasm functions of the same wasm application concurrently, in threads created by host embedder or runtime, but not the wasm application itself.
- **[multi-module](./samples/multi-module)**: Demonstrating the [multiple modules as dependencies](./doc/multi_module.md) feature which implements the [load-time dynamic linking](https://webassembly.org/docs/dynamic-linking/).
- **[ref-types](./samples/ref-types)**: Demonstrating how to call wasm functions with argument of externref type introduced by [reference types proposal](https://github.com/WebAssembly/reference-types).
- **[wasm-c-api](./samples/wasm-c-api/README.md)**: Demonstrating how to run some samples from [wasm-c-api proposal](https://github.com/WebAssembly/wasm-c-api) and showing the supported API's.
- **[socket-api](./samples/socket-api/README.md)**: Demonstrating how to run wasm tcp server and tcp client applications, and how they communicate with each other.
- **[workload](./samples/workload/README.md)**: Demonstrating how to build and run some complex workloads, e.g. tensorflow-lite, XNNPACK, wasm-av1, meshoptimizer and bwa.
- **[sgx-ra](./samples/sgx-ra/README.md)**: Demonstrating how to execute Remote Attestation on SGX with [librats](https://github.com/inclavare-containers/librats), which enables mutual attestation with other runtimes or other entities that support librats to ensure that each is running within the TEE.
Project Technical Steering Committee
@ -191,11 +106,8 @@ use, modify, distribute and sell your own products based on WAMR.
Any contributions you make will be under the same license.
# More resources
Check out the [Wiki documents ](https://github.com/bytecodealliance/wasm-micro-runtime/wiki) for more resources:
- [Community news and events](https://github.com/bytecodealliance/wasm-micro-runtime/wiki/Events)
- [Roadmap](https://github.com/bytecodealliance/wasm-micro-runtime/wiki/Roadmap)
- [WAMR TSC meetings](https://github.com/bytecodealliance/wasm-micro-runtime/wiki/TSC-meeting)
- Technical documents
- [Who use WAMR?](https://github.com/bytecodealliance/wasm-micro-runtime/wiki)
- [WAMR Blogs](https://bytecodealliance.github.io/wamr.dev/blog/)
- [Community news and events](https://bytecodealliance.github.io/wamr.dev/events/)
- [WAMR TSC meetings](https://github.com/bytecodealliance/wasm-micro-runtime/wiki/TSC-meeting-notes)

View File

@ -1,88 +1,219 @@
## WAMR-1.2.1
### Breaking Changes
### New Features
### Bug Fixes
- libc-wasi/posix.c: Fix POLL{RD,WR}NORM in uClibc (#2069)
- Fix bh_assert for 64-bit platforms (#2071)
- wamr-ide: Modify Dockerfile to update base image version and fix build issue (#2068)
- Fix module_malloc/module_free issues (#2072)
- Fix use after free when dumping call stack (#2084)
- Fix compilation errors of workload xnnpack and meshoptimizer (#2081)
- Fix typo in Fast JIT's BUILD_COND_BR Macro (#2092)
- Fix sanitizer pointer overflow warning when perform pointer arithmetic (#2098)
- Update sample workload tensorflow (#2101)
- Fix ref.func forward-declared function check (#2099)
- Fix interpreter read linear memory size for multi-threading (#2088)
### Enhancements
- Limit the minimal size of bh_hashmap (#2073)
- Bump tensorflow to 2.11.1 in /core/iwasm/libraries/wasi-nn/test (#2061)
- Bump tensorflow to 2.11.1 in install_tensorflow.sh (#2076)
- Add support for universal binaries on OSX (#2060)
- Update documents (#2100)
### Others
- spectest/nuttx: Increase stack size of iwasm task (#2082)
- ci: Refactor windows build definition (#2087)
- ci: Enable WASI threads in CI (#2086)
- Use wasi-sdk-20 to build wasi-threads cases in CI (#2095)
---
## WAMR-1.2.0
### Breaking Changes
### New Features
- Implement two-level Multi-tier JIT engine: tier-up from Fast JIT to LLVM JIT to get quick cold startup and better performance
- Enable running mode control for runtime, wasm module instance and iwasm
- Implement wasi-threads feature
- Upgrade toolkits: upgrade to llvm-15.0, wasi-sdk-19.0, emsdk-3.1.28 and so on
- Port WAMR to the FreeBSD platform
- Refactor wasi-nn to simplify the support for multiple frameworks
- wasi-nn: Enable GPU support
- wasi-nn: Support multiple TFLite models
- Add WAMR API bindings in Python
- Add libsodium benchmark
### Bug Fixes
- Fix wasm-c-api import func link issue in wasm_instance_new
- Fix watchpoint segfault when using debug interp without server
- libc-wasi: Fix spurious poll timeout
- Fix typo verify_module in aot_compiler.c
- Fix failure about preopen of reactor modules
- Fix equal check in AOT XIP float cmp intrinsic
- Fix issue of resolving func name in custom name section
- Fix go language binding build on macos arm64
- Prevent undefined behavior from c_api_func_imports == NULL
- Fix potential block issue in source debugger
- SGX IPFS: Fix a segfault and support seeking beyond the end of files while using SEEK_CUR/SEEK_END
- Fix undef error about WAMR_BUILD_MEMORY_PROFILING
- Fix jit memory overwritten after instance deinstantiate
- Fix stack alignment issue on ia32
- Fix explicit casts and types in espidf_socket.c
- Fix potential integer overflow issue in wasm-c-api
- Fix libc-wasi build failure when using clang
- Fix wamrapi python binding for darwin
- Fix getting port issue in posix os_socket_bind
- Fix key error in build_llvm.py
- nuttx: Add missing pthread.h header
- Fix os_socket_addr_resolve() for IPv6
- Enhance/Fix sample socket-api and workload
- Fix fast-jit build error
- Fix dead lock in source debugger
- fix debugger: Set termination flags also when in debug mode
### Enhancements
- Add WAMR-IDE vscode extension to the Visual Studio Marketplace
- Refine Windows thread waiting list operations
- Improve wasm-c-api instantiation-time linking
- Enable platform support for esp-idf v5.0.1
- Readme refactoring
- Add architecture diagram for wasm function
- Add architecture document for wasm export
- Add architecture diagram for wasm globals and classic-interp stack frame
- Use boringssl instead of openssl to implement wasm cache loading
- Implement i32.rem_s and i32.rem_u intrinsic
- Perfect the codebase for wamr-ide
- Remove unnecessary ret value control when spec test is enabled
- Use float version library routine for XIP aot_intrinsic_xxx APIs
- Register missing symbols for f32 to 64 bit integer conversion
- Report error in instantiation when meeting unlinked import globals
- Add more types and APIs for attr_container
- Simplify fcmp intrinsic logic for AOT/XIP
- Add some missing macros for int literals in wamr-sdk libc-builtin-sysroot stdint.h
- nuttx: Mock socket APIs if NET is disabled
- Main thread spread exception when thread-mgr is enabled
- Implement opcode atomic.wait and atomic.notify for Fast JIT
- Add docker images auto check and setup support for WAMR-IDE
- Make memory profiling show native stack usage
- Enable gcc-4.8 compilation
- Enable specifying out-of-source platform configuration cmake file
- Add gh api call for fetching llvm version (#1942) Fixes
- Don't terminate other threads when create thread failed
- Modify poll_oneoff in libc-wasi to make it interruptible
- Expose wasm_runtime_call_indirect
- Make a workaround for EGO when fstat returns NOT_SUPPORT
- Re-org calling post instantiation functions
- Enable custom llvm build flags
- support SSH for git clone llvm
- Support dump call stack on exception and dump call stack on nuttx
- Update document for source debugging
- Document some info about estimating memory usage
- Document the summary of two pthread implementations
- Refine aot compiler check suspend_flags and fix issue of multi-tier jit
### Others
- Enable XIP in CI daily test
- Integrate wasi test suite to wamr-test-suites and CI
- Add CI for wasi-threads tests
- Update CIs and documents to make naming of generated binaries consist
- Enable CI wasi test suite for x86-32 classic/fast interpreter
- CI: Enable libc-wasi compilation test on NuttX
- CI: Enable Multi-tier JIT by default for released iwasm binary
- Enable CI build for gcc 4.8 on linux
---
## WAMR-1.1.2
### Breaking Changes
Remove the LLVM MCJIT mode, replace it with LLVM ORC JIT eager mode
Add option to pass user data to the allocator functions of RuntimeInitArgs
Change how iwasm returns:
return 1 if an exception was thrown, else
return the wasi exit code if the wasm app is a wasi app, else
keep the same behavior as before
Enable bulk memory by default
- Remove the LLVM MCJIT mode, replace it with LLVM ORC JIT eager mode
- Add option to pass user data to the allocator functions of RuntimeInitArgs
- Change how iwasm returns:
- return 1 if an exception was thrown, else
- return the wasi exit code if the wasm app is a wasi app, else
- keep the same behavior as before
- Enable bulk memory by default
### New Features
Add control for the native stack check with hardware trap
Add memory watchpoint support to debugger
Add wasm_module_obtain() to clone wasm_module_t
Implement Fast JIT dump call stack and perf profiling
esp-idf: Add socket support for esp-idf platform
- Add control for the native stack check with hardware trap
- Add memory watchpoint support to debugger
- Add wasm_module_obtain() to clone wasm_module_t
- Implement Fast JIT dump call stack and perf profiling
- esp-idf: Add socket support for esp-idf platform
### Bug Fixes
Fix XIP issue caused by rem_s on RISC-V
Fix XIP issues of fp to int cast and int rem/div
Fix missing float cmp for XIP
Correct the arch name for armv7a on NuttX
Fix issue of restoring wasm operand stack
Fix issue of thumb relocation R_ARM_THM_MOVT_ABS
Fix fast jit issue of translating opcode i32.rem_s/i64.rem_s
Fix interp/fast-jit float min/max issues
Fix missing intrinsics for risc-v which were reported by spec test
wasm-c-api: Fix init/destroy thread env multiple times issue
Fix wasm-c-api import func link issue in wasm_instance_new
Fix sample ref-types/wasm-c-api build error with wat2wasm low version
Fix zephyr sample build errors
Fix source debugger error handling: continue executing when detached
Fix scenario where the timeout for atomic wait is set to negative number
Fix link cxx object file error when building wamrc for docker image
Fix XIP issue of handling 64-bit const in 32-bit target
- Fix XIP issue caused by rem_s on RISC-V
- Fix XIP issues of fp to int cast and int rem/div
- Fix missing float cmp for XIP
- Correct the arch name for armv7a on NuttX
- Fix issue of restoring wasm operand stack
- Fix issue of thumb relocation R_ARM_THM_MOVT_ABS
- Fix fast jit issue of translating opcode i32.rem_s/i64.rem_s
- Fix interp/fast-jit float min/max issues
- Fix missing intrinsics for risc-v which were reported by spec test
- wasm-c-api: Fix init/destroy thread env multiple times issue
- Fix wasm-c-api import func link issue in wasm_instance_new
- Fix sample ref-types/wasm-c-api build error with wat2wasm low version
- Fix zephyr sample build errors
- Fix source debugger error handling: continue executing when detached
- Fix scenario where the timeout for atomic wait is set to negative number
- Fix link cxx object file error when building wamrc for docker image
- Fix XIP issue of handling 64-bit const in 32-bit target
### Enhancements
Refactor the layout of interpreter and AOT module instance
Refactor LLVM JIT: remove mcjit and legacy pass manager, upgrade to ORCv2 JIT
Refine Fast JIT call indirect and call native process
Refine Fast JIT accessing memory/table instance and global data
Refine AOT exception check when function return
Enable source debugger reconnection
Add wasm_runtime_get_wasi_exit_code
linux-sgx: Use non-destructive modes for opening files using SGX IPFS
Add wasm_runtime_unregister_natives
Implement invokeNative asm code for MinGW
Add wamr Blog link and Gitbook link to readme
Remove unnecessary app heap memory clean operations to reduce process RSS
Normalize how the global heap pool is configured across iwasm apps
Refine the stack frame size check in interpreter
Enlarge the default wasm operand stack size to 64KB
Use cmake POSITION_INDEPENDENT_CODE instead of hardcoding -pie -fPIE
Implement R_ARM_THM_MOVT_[ABS|REPL] for thumb
Suppress the warnings when building with GCC11
samples/native-lib: Add a bit more complicated example
Add mutex initializer for wasm-c-api engine operations
XIP adaptation for xtensa platform
Update libuv version number
Remove an improper assumption when creating wasm_trap
Avoid initialize LLVM repeatedly
linux-sgx: Improve the remote attestation
linux-sgx: Improve the documentation of SGX-RA sample
linux-sgx: Allow to open files with arbitrary paths in the sandbox using IPFS
Avoid raising exception when debugging with VSCode
wamr-test-suites: Update runtest.py to support python3
Enable Nuttx spec test option and register aot symbols
Use wabt binary instead of building from source in spec test
nuttx: Enable ref types by Kconfig
Update xtensa LLVM version to 15.x
Add bh_print_proc_mem() to dump memory info of current process
Create trap for error message when wasm_instance_new fails
wamr-test-suites: Add support for ARM/RISCV by QEMU
Enable to compile WAMR on platforms that don't support IPV6
Fix warnings in the posix socket implementation
Update document for MacOS compilation
Install patched LLDB on vscode extension activation
Add ARM aeabi memcpy/memmove/memset symbols for AOT bulk memory ops
Enable wasm cache loading in wasm-c-api
- Refactor the layout of interpreter and AOT module instance
- Refactor LLVM JIT: remove mcjit and legacy pass manager, upgrade to ORCv2 JIT
- Refine Fast JIT call indirect and call native process
- Refine Fast JIT accessing memory/table instance and global data
- Refine AOT exception check when function return
- Enable source debugger reconnection
- Add wasm_runtime_get_wasi_exit_code
- linux-sgx: Use non-destructive modes for opening files using SGX IPFS
- Add wasm_runtime_unregister_natives
- Implement invokeNative asm code for MinGW
- Add wamr Blog link and Gitbook link to readme
- Remove unnecessary app heap memory clean operations to reduce process RSS
- Normalize how the global heap pool is configured across iwasm apps
- Refine the stack frame size check in interpreter
- Enlarge the default wasm operand stack size to 64KB
- Use cmake POSITION_INDEPENDENT_CODE instead of hardcoding -pie -fPIE
- Implement R_ARM_THM_MOVT_[ABS|REPL] for thumb
- Suppress the warnings when building with GCC11
- samples/native-lib: Add a bit more complicated example
- Add mutex initializer for wasm-c-api engine operations
- XIP adaptation for xtensa platform
- Update libuv version number
- Remove an improper assumption when creating wasm_trap
- Avoid initialize LLVM repeatedly
- linux-sgx: Improve the remote attestation
- linux-sgx: Improve the documentation of SGX-RA sample
- linux-sgx: Allow to open files with arbitrary paths in the sandbox using IPFS
- Avoid raising exception when debugging with VSCode
- wamr-test-suites: Update runtest.py to support python3
- Enable Nuttx spec test option and register aot symbols
- Use wabt binary instead of building from source in spec test
- nuttx: Enable ref types by Kconfig
- Update xtensa LLVM version to 15.x
- Add bh_print_proc_mem() to dump memory info of current process
- Create trap for error message when wasm_instance_new fails
- wamr-test-suites: Add support for ARM/RISCV by QEMU
- Enable to compile WAMR on platforms that don't support IPV6
- Fix warnings in the posix socket implementation
- Update document for MacOS compilation
- Install patched LLDB on vscode extension activation
- Add ARM aeabi memcpy/memmove/memset symbols for AOT bulk memory ops
- Enable wasm cache loading in wasm-c-api
### Others
Add CIs to release new version and publish binary files
Add more compilation groups of fast jit into CI
Enable spec test on nuttx and daily run it
- Add CIs to release new version and publish binary files
- Add more compilation groups of fast jit into CI
- Enable spec test on nuttx and daily run it
---
@ -228,3 +359,5 @@ Enable spec test on nuttx and daily run it
### Others
---

View File

@ -7,7 +7,7 @@ import * as console from './console'
import * as timer from './timer'
@external("env", "wasm_response_send")
declare function wasm_response_send(buffer: ArrayBuffer, size: i32): void;
declare function wasm_response_send(buffer: ArrayBuffer, size: i32): bool;
@external("env", "wasm_register_resource")
declare function wasm_register_resource(url: ArrayBuffer): void;
@ -492,4 +492,4 @@ export function on_response(buffer_offset: i32, size: i32): void {
trans.cb(resp);
}
}
}

View File

@ -223,6 +223,9 @@ endif ()
if (WAMR_BUILD_LIB_PTHREAD_SEMAPHORE EQUAL 1)
message (" Lib pthread semaphore enabled")
endif ()
if (WAMR_BUILD_LIB_WASI_THREADS EQUAL 1)
message (" Lib wasi-threads enabled")
endif ()
if (WAMR_BUILD_LIBC_EMCC EQUAL 1)
message (" Libc emcc enabled")
endif ()

View File

@ -51,7 +51,7 @@ endif()
idf_component_register(SRCS ${WAMR_RUNTIME_LIB_SOURCE} ${PLATFORM_SHARED_SOURCE}
INCLUDE_DIRS ${IWASM_DIR}/include ${UTILS_SHARED_DIR} ${PLATFORM_SHARED_DIR} ${PLATFORM_SHARED_DIR}/../include
REQUIRES pthread
REQUIRES pthread lwip esp_timer
)

View File

@ -1,28 +1,27 @@
Application framework
=======
# Application framework
By using the WAMR VM core, we are flexible to build different application frameworks for the specific domains, although it would take quite some effort.
The WAMR has offered a comprehensive framework for programming WASM applications for device and IoT usages. The framework supports running multiple applications, that are based on the event driven programming model. Here are the supporting API sets by the [WAMR application framework library](../doc/wamr_api.md) :
- Timer, Inter-app communication (request/response and pub/sub), Sensor, Connectivity and data transmission, 2D graphic UI
Browse the folder [core/app-framework](./app-framework) for how to extend the application framework.
## Directory structure
This folder "app-native-shared" is for the source files shared by both WASM APP and native runtime
This folder "app-native-shared" is for the source files shared by both WASM APP and native runtime
- The c files in this directory are compiled into both the WASM APP and runtime.
- The header files for distributing to SDK are placed in the "bi-inc" folder.
This folder "template" contains a pre-defined directory structure for a framework component. The developers can copy the template folder to create new components to the application framework.
This folder "template" contains a pre-defined directory structure for a framework component. The developers can copy the template folder to create new components to the application framework.
Every other subfolder is framework component. Each component contains two library parts: **app and native**.
Every other subfolder is framework component. Each component contains two library parts: **app and native**.
- The "base" component provide timer API and inter-app communication support. It must be enabled if other components are selected.
- Under the "app" folder of a component, the subfolder "wa_inc" holds all header files that should be included by the WASM applications
## Application framework basic model
The app framework is built on top of two fundamental operations:
@ -116,10 +115,6 @@ Generally you should follow following steps to create a new component:
```
## Sensor component working flow
![](../../doc/pics/sensor_callflow.PNG)

View File

@ -1,10 +1,8 @@
WASM application management
=======
## structure
# Remote application management
The WAMR application manager supports [remote application management](../core/app-mgr) from the host environment or the cloud through any physical communications such as TCP, UPD, UART, BLE, etc. Its modular design makes it able to support application management for different managed runtimes.
The tool [host_tool](../test-tools/host-tool) communicates to the WAMR app manager for installing/uninstalling the WASM applications on companion chip from the host system. And the [IoT App Store Demo](../test-tools/IoT-APP-Store-Demo/) shows the conception of remotely managing the device applications from the cloud.
<img src="../../doc/pics/wamr-arch.JPG" width="80%">

View File

@ -6,6 +6,6 @@ cd ${DEPS_ROOT}
echo "Downloading tensorflow in ${PWD}..."
git clone https://github.com/tensorflow/tensorflow.git tensorflow-src \
--branch v2.9.2
--branch v2.11.1
exit 0

View File

@ -0,0 +1,14 @@
# vmcore architecture
- [WAMR memory model overview](https://bytecodealliance.github.io/wamr.dev/blog/the-wamr-memory-model/)
## Wasm function
- [Wasm function architecture](./doc/wasm_function.MD)
## Exports
- [Wasm export architecture](./doc/wasm_exports.MD)
## globals
- [Wasm globals architecture](./doc/wasm_globals.MD)
## classic interpreter
- [classic interpreter](./doc/classic_interpreter.MD)

View File

@ -922,17 +922,17 @@ lookup_post_instantiate_func(AOTModuleInstance *module_inst,
static bool
execute_post_instantiate_functions(AOTModuleInstance *module_inst,
bool is_sub_inst)
bool is_sub_inst, WASMExecEnv *exec_env_main)
{
AOTModule *module = (AOTModule *)module_inst->module;
AOTFunctionInstance *initialize_func = NULL;
AOTFunctionInstance *post_inst_func = NULL;
AOTFunctionInstance *call_ctors_func = NULL;
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMModuleInstanceCommon *module_inst_main = NULL;
WASMExecEnv *exec_env_tls = NULL;
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls();
#endif
WASMExecEnv *exec_env = NULL;
WASMExecEnv *exec_env = NULL, *exec_env_created = NULL;
bool ret = false;
#if WASM_ENABLE_LIBC_WASI != 0
@ -973,25 +973,46 @@ execute_post_instantiate_functions(AOTModuleInstance *module_inst,
return true;
}
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (is_sub_inst) {
exec_env = exec_env_tls = wasm_runtime_get_exec_env_tls();
if (exec_env_tls) {
/* Temporarily replace exec_env_tls's module inst to current
module inst to avoid checking failure when calling the
wasm functions, and ensure that the exec_env's module inst
is the correct one. */
module_inst_main = exec_env_tls->module_inst;
exec_env_tls->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
}
bh_assert(exec_env_main);
#ifdef OS_ENABLE_HW_BOUND_CHECK
bh_assert(exec_env_tls == exec_env_main);
(void)exec_env_tls;
#endif
if (!exec_env
&& !(exec_env =
wasm_exec_env_create((WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
aot_set_exception(module_inst, "allocate memory failed");
return false;
exec_env = exec_env_main;
/* Temporarily replace parent exec_env's module inst to current
module inst to avoid checking failure when calling the
wasm functions, and ensure that the exec_env's module inst
is the correct one. */
module_inst_main = exec_env_main->module_inst;
exec_env->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
else {
/* Try using the existing exec_env */
#ifdef OS_ENABLE_HW_BOUND_CHECK
exec_env = exec_env_tls;
#endif
#if WASM_ENABLE_THREAD_MGR != 0
if (!exec_env)
exec_env = wasm_clusters_search_exec_env(
(WASMModuleInstanceCommon *)module_inst);
#endif
if (!exec_env) {
if (!(exec_env = exec_env_created = wasm_exec_env_create(
(WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
aot_set_exception(module_inst, "allocate memory failed");
return false;
}
}
else {
/* Temporarily replace exec_env's module inst with current
module inst to ensure that the exec_env's module inst
is the correct one. */
module_inst_main = exec_env->module_inst;
exec_env->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
}
/* Execute start function for both main insance and sub instance */
@ -1029,17 +1050,17 @@ execute_post_instantiate_functions(AOTModuleInstance *module_inst,
ret = true;
fail:
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (is_sub_inst && exec_env_tls) {
bh_assert(exec_env == exec_env_tls);
/* Restore the exec_env_tls's module inst */
exec_env_tls->module_inst = module_inst_main;
if (is_sub_inst) {
/* Restore the parent exec_env's module inst */
exec_env_main->module_inst = module_inst_main;
}
else {
if (module_inst_main)
/* Restore the existing exec_env's module inst */
exec_env->module_inst = module_inst_main;
if (exec_env_created)
wasm_exec_env_destroy(exec_env_created);
}
else
wasm_exec_env_destroy(exec_env);
#else
wasm_exec_env_destroy(exec_env);
#endif
return ret;
}
@ -1065,8 +1086,9 @@ check_linked_symbol(AOTModule *module, char *error_buf, uint32 error_buf_size)
}
AOTModuleInstance *
aot_instantiate(AOTModule *module, bool is_sub_inst, uint32 stack_size,
uint32 heap_size, char *error_buf, uint32 error_buf_size)
aot_instantiate(AOTModule *module, bool is_sub_inst, WASMExecEnv *exec_env_main,
uint32 stack_size, uint32 heap_size, char *error_buf,
uint32 error_buf_size)
{
AOTModuleInstance *module_inst;
const uint32 module_inst_struct_size =
@ -1206,7 +1228,8 @@ aot_instantiate(AOTModule *module, bool is_sub_inst, uint32 stack_size,
}
#endif
if (!execute_post_instantiate_functions(module_inst, is_sub_inst)) {
if (!execute_post_instantiate_functions(module_inst, is_sub_inst,
exec_env_main)) {
set_error_buf(error_buf, error_buf_size, module_inst->cur_exception);
goto fail;
}
@ -1321,8 +1344,9 @@ invoke_native_with_hw_bound_check(WASMExecEnv *exec_env, void *func_ptr,
uint16 result_count = func_type->result_count;
const uint8 *types = func_type->types;
#ifdef BH_PLATFORM_WINDOWS
const char *exce;
int result;
bool has_exception;
char exception[EXCEPTION_BUF_LEN];
#endif
bool ret;
@ -1356,14 +1380,14 @@ invoke_native_with_hw_bound_check(WASMExecEnv *exec_env, void *func_ptr,
void (*NativeFunc)(WASMExecEnv *, uint32) =
(void (*)(WASMExecEnv *, uint32))func_ptr;
NativeFunc(exec_env, argv[0]);
ret = aot_get_exception(module_inst) ? false : true;
ret = aot_copy_exception(module_inst, NULL) ? false : true;
}
else if (result_count == 1
&& types[param_count] == VALUE_TYPE_I32) {
uint32 (*NativeFunc)(WASMExecEnv *, uint32) =
(uint32(*)(WASMExecEnv *, uint32))func_ptr;
argv_ret[0] = NativeFunc(exec_env, argv[0]);
ret = aot_get_exception(module_inst) ? false : true;
ret = aot_copy_exception(module_inst, NULL) ? false : true;
}
else {
ret = wasm_runtime_invoke_native(exec_env, func_ptr, func_type,
@ -1377,8 +1401,8 @@ invoke_native_with_hw_bound_check(WASMExecEnv *exec_env, void *func_ptr,
argv_ret);
}
#ifdef BH_PLATFORM_WINDOWS
if ((exce = aot_get_exception(module_inst))
&& strstr(exce, "native stack overflow")) {
has_exception = aot_copy_exception(module_inst, exception);
if (has_exception && strstr(exception, "native stack overflow")) {
/* After a stack overflow, the stack was left
in a damaged state, let the CRT repair it */
result = _resetstkoflw();
@ -1541,7 +1565,7 @@ aot_call_function(WASMExecEnv *exec_env, AOTFunctionInstance *function,
func_type, NULL, NULL, argv, argc, argv);
#if WASM_ENABLE_DUMP_CALL_STACK != 0
if (aot_get_exception(module_inst)) {
if (aot_copy_exception(module_inst, NULL)) {
if (aot_create_call_stack(exec_env)) {
aot_dump_call_stack(exec_env, true, NULL, 0);
}
@ -1552,43 +1576,10 @@ aot_call_function(WASMExecEnv *exec_env, AOTFunctionInstance *function,
aot_free_frame(exec_env);
#endif
return ret && !aot_get_exception(module_inst) ? true : false;
return ret && !aot_copy_exception(module_inst, NULL) ? true : false;
}
}
bool
aot_create_exec_env_and_call_function(AOTModuleInstance *module_inst,
AOTFunctionInstance *func, unsigned argc,
uint32 argv[])
{
WASMExecEnv *exec_env = NULL, *existing_exec_env = NULL;
bool ret;
#if defined(OS_ENABLE_HW_BOUND_CHECK)
existing_exec_env = exec_env = wasm_runtime_get_exec_env_tls();
#elif WASM_ENABLE_THREAD_MGR != 0
existing_exec_env = exec_env =
wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst);
#endif
if (!existing_exec_env) {
if (!(exec_env =
wasm_exec_env_create((WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
aot_set_exception(module_inst, "allocate memory failed");
return false;
}
}
ret = wasm_runtime_call_wasm(exec_env, func, argc, argv);
/* don't destroy the exec_env if it isn't created in this function */
if (!existing_exec_env)
wasm_exec_env_destroy(exec_env);
return ret;
}
void
aot_set_exception(AOTModuleInstance *module_inst, const char *exception)
{
@ -1611,8 +1602,16 @@ aot_get_exception(AOTModuleInstance *module_inst)
return wasm_get_exception(module_inst);
}
bool
aot_copy_exception(AOTModuleInstance *module_inst, char *exception_buf)
{
/* The field offsets of cur_exception in AOTModuleInstance and
WASMModuleInstance are the same */
return wasm_copy_exception(module_inst, exception_buf);
}
static bool
execute_malloc_function(AOTModuleInstance *module_inst,
execute_malloc_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
AOTFunctionInstance *malloc_func,
AOTFunctionInstance *retain_func, uint32 size,
uint32 *p_result)
@ -1620,6 +1619,8 @@ execute_malloc_function(AOTModuleInstance *module_inst,
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls();
#endif
WASMExecEnv *exec_env_created = NULL;
WASMModuleInstanceCommon *module_inst_old = NULL;
uint32 argv[2], argc;
bool ret;
@ -1630,60 +1631,125 @@ execute_malloc_function(AOTModuleInstance *module_inst,
argc = 2;
}
if (exec_env) {
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (exec_env_tls != NULL) {
bh_assert(exec_env_tls->module_inst
== (WASMModuleInstanceCommon *)module_inst);
ret = aot_call_function(exec_env_tls, malloc_func, argc, argv);
if (retain_func && ret) {
ret = aot_call_function(exec_env_tls, retain_func, 1, argv);
if (exec_env_tls) {
bh_assert(exec_env_tls == exec_env);
}
}
else
#endif
{
ret = aot_create_exec_env_and_call_function(module_inst, malloc_func,
argc, argv);
if (retain_func && ret) {
ret = aot_create_exec_env_and_call_function(module_inst,
retain_func, 1, argv);
bh_assert(exec_env->module_inst
== (WASMModuleInstanceCommon *)module_inst);
}
else {
/* Try using the existing exec_env */
#ifdef OS_ENABLE_HW_BOUND_CHECK
exec_env = exec_env_tls;
#endif
#if WASM_ENABLE_THREAD_MGR != 0
if (!exec_env)
exec_env = wasm_clusters_search_exec_env(
(WASMModuleInstanceCommon *)module_inst);
#endif
if (!exec_env) {
if (!(exec_env = exec_env_created = wasm_exec_env_create(
(WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
wasm_set_exception(module_inst, "allocate memory failed");
return false;
}
}
else {
/* Temporarily replace exec_env's module inst with current
module inst to ensure that the exec_env's module inst
is the correct one. */
module_inst_old = exec_env->module_inst;
exec_env->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
}
ret = aot_call_function(exec_env, malloc_func, argc, argv);
if (retain_func && ret)
ret = aot_call_function(exec_env, retain_func, 1, argv);
if (module_inst_old)
/* Restore the existing exec_env's module inst */
exec_env->module_inst = module_inst_old;
if (exec_env_created)
wasm_exec_env_destroy(exec_env_created);
if (ret)
*p_result = argv[0];
return ret;
}
static bool
execute_free_function(AOTModuleInstance *module_inst,
execute_free_function(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
AOTFunctionInstance *free_func, uint32 offset)
{
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls();
#endif
WASMExecEnv *exec_env_created = NULL;
WASMModuleInstanceCommon *module_inst_old = NULL;
uint32 argv[2];
bool ret;
argv[0] = offset;
if (exec_env) {
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (exec_env_tls != NULL) {
bh_assert(exec_env_tls->module_inst
== (WASMModuleInstanceCommon *)module_inst);
return aot_call_function(exec_env_tls, free_func, 1, argv);
}
else
if (exec_env_tls) {
bh_assert(exec_env_tls == exec_env);
}
#endif
{
return aot_create_exec_env_and_call_function(module_inst, free_func, 1,
argv);
bh_assert(exec_env->module_inst
== (WASMModuleInstanceCommon *)module_inst);
}
else {
/* Try using the existing exec_env */
#ifdef OS_ENABLE_HW_BOUND_CHECK
exec_env = exec_env_tls;
#endif
#if WASM_ENABLE_THREAD_MGR != 0
if (!exec_env)
exec_env = wasm_clusters_search_exec_env(
(WASMModuleInstanceCommon *)module_inst);
#endif
if (!exec_env) {
if (!(exec_env = exec_env_created = wasm_exec_env_create(
(WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
wasm_set_exception(module_inst, "allocate memory failed");
return false;
}
}
else {
/* Temporarily replace exec_env's module inst with current
module inst to ensure that the exec_env's module inst
is the correct one. */
module_inst_old = exec_env->module_inst;
exec_env->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
}
ret = aot_call_function(exec_env, free_func, 1, argv);
if (module_inst_old)
/* Restore the existing exec_env's module inst */
exec_env->module_inst = module_inst_old;
if (exec_env_created)
wasm_exec_env_destroy(exec_env_created);
return ret;
}
uint32
aot_module_malloc(AOTModuleInstance *module_inst, uint32 size,
void **p_native_addr)
aot_module_malloc_internal(AOTModuleInstance *module_inst,
WASMExecEnv *exec_env, uint32 size,
void **p_native_addr)
{
AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
AOTModule *module = (AOTModule *)module_inst->module;
@ -1720,8 +1786,8 @@ aot_module_malloc(AOTModuleInstance *module_inst, uint32 size,
aot_lookup_function(module_inst, malloc_func_name, malloc_func_sig);
if (!malloc_func
|| !execute_malloc_function(module_inst, malloc_func, retain_func,
size, &offset)) {
|| !execute_malloc_function(module_inst, exec_env, malloc_func,
retain_func, size, &offset)) {
return 0;
}
addr = offset ? (uint8 *)memory_inst->memory_data + offset : NULL;
@ -1744,8 +1810,9 @@ aot_module_malloc(AOTModuleInstance *module_inst, uint32 size,
}
uint32
aot_module_realloc(AOTModuleInstance *module_inst, uint32 ptr, uint32 size,
void **p_native_addr)
aot_module_realloc_internal(AOTModuleInstance *module_inst,
WASMExecEnv *exec_env, uint32 ptr, uint32 size,
void **p_native_addr)
{
AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
uint8 *addr = NULL;
@ -1762,6 +1829,7 @@ aot_module_realloc(AOTModuleInstance *module_inst, uint32 ptr, uint32 size,
}
/* Only support realloc in WAMR's app heap */
(void)exec_env;
if (!addr) {
if (memory_inst->heap_handle
@ -1780,7 +1848,8 @@ aot_module_realloc(AOTModuleInstance *module_inst, uint32 ptr, uint32 size,
}
void
aot_module_free(AOTModuleInstance *module_inst, uint32 ptr)
aot_module_free_internal(AOTModuleInstance *module_inst, WASMExecEnv *exec_env,
uint32 ptr)
{
AOTMemoryInstance *memory_inst = aot_get_default_memory(module_inst);
AOTModule *module = (AOTModule *)module_inst->module;
@ -1814,11 +1883,32 @@ aot_module_free(AOTModuleInstance *module_inst, uint32 ptr)
free_func = aot_lookup_function(module_inst, "__unpin", "(i)i");
if (free_func)
execute_free_function(module_inst, free_func, ptr);
execute_free_function(module_inst, exec_env, free_func, ptr);
}
}
}
uint32
aot_module_malloc(AOTModuleInstance *module_inst, uint32 size,
void **p_native_addr)
{
return aot_module_malloc_internal(module_inst, NULL, size, p_native_addr);
}
uint32
aot_module_realloc(AOTModuleInstance *module_inst, uint32 ptr, uint32 size,
void **p_native_addr)
{
return aot_module_realloc_internal(module_inst, NULL, ptr, size,
p_native_addr);
}
void
aot_module_free(AOTModuleInstance *module_inst, uint32 ptr)
{
aot_module_free_internal(module_inst, NULL, ptr);
}
uint32
aot_module_dup_data(AOTModuleInstance *module_inst, const char *src,
uint32 size)

View File

@ -343,8 +343,9 @@ aot_unload(AOTModule *module);
* @return return the instantiated AOT module instance, NULL if failed
*/
AOTModuleInstance *
aot_instantiate(AOTModule *module, bool is_sub_inst, uint32 stack_size,
uint32 heap_size, char *error_buf, uint32 error_buf_size);
aot_instantiate(AOTModule *module, bool is_sub_inst, WASMExecEnv *exec_env_main,
uint32 stack_size, uint32 heap_size, char *error_buf,
uint32 error_buf_size);
/**
* Deinstantiate a AOT module instance, destroy the resources.
@ -387,11 +388,6 @@ bool
aot_call_function(WASMExecEnv *exec_env, AOTFunctionInstance *function,
unsigned argc, uint32 argv[]);
bool
aot_create_exec_env_and_call_function(AOTModuleInstance *module_inst,
AOTFunctionInstance *function,
unsigned argc, uint32 argv[]);
/**
* Set AOT module instance exception with exception string
*
@ -415,6 +411,27 @@ aot_set_exception_with_id(AOTModuleInstance *module_inst, uint32 id);
const char *
aot_get_exception(AOTModuleInstance *module_inst);
/**
* @brief Copy exception in buffer passed as parameter. Thread-safe version of
* `aot_get_exception()`
* @note Buffer size must be no smaller than EXCEPTION_BUF_LEN
* @return true if exception found, false otherwise
*/
bool
aot_copy_exception(AOTModuleInstance *module_inst, char *exception_buf);
uint32
aot_module_malloc_internal(AOTModuleInstance *module_inst, WASMExecEnv *env,
uint32 size, void **p_native_addr);
uint32
aot_module_realloc_internal(AOTModuleInstance *module_inst, WASMExecEnv *env,
uint32 ptr, uint32 size, void **p_native_addr);
void
aot_module_free_internal(AOTModuleInstance *module_inst, WASMExecEnv *env,
uint32 ptr);
uint32
aot_module_malloc(AOTModuleInstance *module_inst, uint32 size,
void **p_native_addr);

View File

@ -294,7 +294,7 @@ apply_relocation(AOTModule *module, uint8 *target_section_addr,
snprintf(error_buf, error_buf_size,
"Load relocation section failed: "
"invalid relocation type %d.",
reloc_type);
(int)reloc_type);
return false;
}

View File

@ -0,0 +1,18 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#if defined(__aarch64__)
#if WASM_ENABLE_SIMD == 0
#include "invokeNative_aarch64.s"
#else
#include "invokeNative_aarch64_simd.s"
#endif
#else
#if WASM_ENABLE_SIMD == 0
#include "invokeNative_em64.s"
#else
#include "invokeNative_em64_simd.s"
#endif
#endif

View File

@ -14,6 +14,17 @@ if (WAMR_DISABLE_APP_ENTRY EQUAL 1)
list(REMOVE_ITEM c_source_all "${IWASM_COMMON_DIR}/wasm_application.c")
endif ()
if (CMAKE_OSX_ARCHITECTURES)
string(TOLOWER "${CMAKE_OSX_ARCHITECTURES}" OSX_ARCHS)
list(FIND OSX_ARCHS arm64 OSX_AARCH64)
list(FIND OSX_ARCHS x86_64 OSX_X86_64)
if (NOT "${OSX_AARCH64}" STREQUAL "-1" AND NOT "${OSX_X86_64}" STREQUAL "-1")
set(OSX_UNIVERSAL_BUILD 1)
endif()
endif()
if (WAMR_BUILD_INVOKE_NATIVE_GENERAL EQUAL 1)
# Use invokeNative C version instead of asm code version
# if WAMR_BUILD_INVOKE_NATIVE_GENERAL is explicitly set.
@ -24,6 +35,8 @@ if (WAMR_BUILD_INVOKE_NATIVE_GENERAL EQUAL 1)
# in arm and mips need to be 8-bytes aligned, and some arguments
# of x86_64 are passed by registers but not stack
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_general.c)
elseif (OSX_UNIVERSAL_BUILD EQUAL 1)
set (source_all ${c_source_all} ${IWASM_COMMON_DIR}/arch/invokeNative_osx_universal.s)
elseif (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
if (NOT WAMR_BUILD_SIMD EQUAL 1)
if (WAMR_BUILD_PLATFORM STREQUAL "windows")

View File

@ -195,6 +195,7 @@ execute_main(WASMModuleInstanceCommon *module_inst, int32 argc, char *argv[])
if (argv_buf_offset)
wasm_runtime_module_free(module_inst, argv_buf_offset);
return ret;
}
@ -203,7 +204,7 @@ wasm_application_execute_main(WASMModuleInstanceCommon *module_inst, int32 argc,
char *argv[])
{
bool ret;
#if WASM_ENABLE_MEMORY_PROFILING != 0
#if (WASM_ENABLE_MEMORY_PROFILING != 0) || (WASM_ENABLE_DUMP_CALL_STACK != 0)
WASMExecEnv *exec_env;
#endif
@ -220,7 +221,18 @@ wasm_application_execute_main(WASMModuleInstanceCommon *module_inst, int32 argc,
wasm_runtime_dump_perf_profiling(module_inst);
#endif
return (ret && !wasm_runtime_get_exception(module_inst)) ? true : false;
if (ret)
ret = wasm_runtime_get_exception(module_inst) == NULL;
#if WASM_ENABLE_DUMP_CALL_STACK != 0
if (!ret) {
exec_env = wasm_runtime_get_exec_env_singleton(module_inst);
if (exec_env)
wasm_runtime_dump_call_stack(exec_env);
}
#endif
return ret;
}
/**

View File

@ -687,8 +687,8 @@ wasm_store_delete(wasm_store_t *store)
return;
}
DEINIT_VEC(store->modules, wasm_module_vec_delete);
DEINIT_VEC(store->instances, wasm_instance_vec_delete);
DEINIT_VEC(store->modules, wasm_module_vec_delete);
if (store->foreigns) {
bh_vector_destroy(store->foreigns);
wasm_runtime_free(store->foreigns);
@ -1905,7 +1905,7 @@ wasm_trap_new_internal(wasm_store_t *store,
}
/* fill in message */
if (strlen(error_info) > 0) {
if (error_info && strlen(error_info) > 0) {
if (!(trap->message = malloc_internal(sizeof(wasm_byte_vec_t)))) {
goto failed;
}

View File

@ -1196,20 +1196,21 @@ wasm_runtime_unload(WASMModuleCommon *module)
WASMModuleInstanceCommon *
wasm_runtime_instantiate_internal(WASMModuleCommon *module, bool is_sub_inst,
uint32 stack_size, uint32 heap_size,
char *error_buf, uint32 error_buf_size)
WASMExecEnv *exec_env_main, uint32 stack_size,
uint32 heap_size, char *error_buf,
uint32 error_buf_size)
{
#if WASM_ENABLE_INTERP != 0
if (module->module_type == Wasm_Module_Bytecode)
return (WASMModuleInstanceCommon *)wasm_instantiate(
(WASMModule *)module, is_sub_inst, stack_size, heap_size, error_buf,
error_buf_size);
(WASMModule *)module, is_sub_inst, exec_env_main, stack_size,
heap_size, error_buf, error_buf_size);
#endif
#if WASM_ENABLE_AOT != 0
if (module->module_type == Wasm_Module_AoT)
return (WASMModuleInstanceCommon *)aot_instantiate(
(AOTModule *)module, is_sub_inst, stack_size, heap_size, error_buf,
error_buf_size);
(AOTModule *)module, is_sub_inst, exec_env_main, stack_size,
heap_size, error_buf, error_buf_size);
#endif
set_error_buf(error_buf, error_buf_size,
"Instantiate module failed, invalid module type");
@ -1222,7 +1223,7 @@ wasm_runtime_instantiate(WASMModuleCommon *module, uint32 stack_size,
uint32 error_buf_size)
{
return wasm_runtime_instantiate_internal(
module, false, stack_size, heap_size, error_buf, error_buf_size);
module, false, NULL, stack_size, heap_size, error_buf, error_buf_size);
}
void
@ -2331,12 +2332,6 @@ wasm_set_exception(WASMModuleInstance *module_inst, const char *exception)
if (exec_env) {
wasm_cluster_spread_exception(exec_env, exception ? false : true);
}
#if WASM_ENABLE_SHARED_MEMORY
if (exception) {
notify_stale_threads_on_exception(
(WASMModuleInstanceCommon *)module_inst);
}
#endif
#else
(void)exec_env;
#endif
@ -2486,6 +2481,62 @@ wasm_runtime_get_custom_data(WASMModuleInstanceCommon *module_inst_comm)
return module_inst->custom_data;
}
uint32
wasm_runtime_module_malloc_internal(WASMModuleInstanceCommon *module_inst,
WASMExecEnv *exec_env, uint32 size,
void **p_native_addr)
{
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode)
return wasm_module_malloc_internal((WASMModuleInstance *)module_inst,
exec_env, size, p_native_addr);
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT)
return aot_module_malloc_internal((AOTModuleInstance *)module_inst,
exec_env, size, p_native_addr);
#endif
return 0;
}
uint32
wasm_runtime_module_realloc_internal(WASMModuleInstanceCommon *module_inst,
WASMExecEnv *exec_env, uint32 ptr,
uint32 size, void **p_native_addr)
{
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode)
return wasm_module_realloc_internal((WASMModuleInstance *)module_inst,
exec_env, ptr, size, p_native_addr);
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT)
return aot_module_realloc_internal((AOTModuleInstance *)module_inst,
exec_env, ptr, size, p_native_addr);
#endif
return 0;
}
void
wasm_runtime_module_free_internal(WASMModuleInstanceCommon *module_inst,
WASMExecEnv *exec_env, uint32 ptr)
{
#if WASM_ENABLE_INTERP != 0
if (module_inst->module_type == Wasm_Module_Bytecode) {
wasm_module_free_internal((WASMModuleInstance *)module_inst, exec_env,
ptr);
return;
}
#endif
#if WASM_ENABLE_AOT != 0
if (module_inst->module_type == Wasm_Module_AoT) {
aot_module_free_internal((AOTModuleInstance *)module_inst, exec_env,
ptr);
return;
}
#endif
}
uint32
wasm_runtime_module_malloc(WASMModuleInstanceCommon *module_inst, uint32 size,
void **p_native_addr)
@ -3144,6 +3195,21 @@ uint32_t
wasm_runtime_get_wasi_exit_code(WASMModuleInstanceCommon *module_inst)
{
WASIContext *wasi_ctx = wasm_runtime_get_wasi_ctx(module_inst);
#if WASM_ENABLE_THREAD_MGR != 0
WASMCluster *cluster;
WASMExecEnv *exec_env;
exec_env = wasm_runtime_get_exec_env_singleton(module_inst);
if (exec_env && (cluster = wasm_exec_env_get_cluster(exec_env))) {
/**
* The main thread may exit earlier than other threads, and
* the exit_code of wasi_ctx may be changed by other thread
* when it runs into wasi_proc_exit, here we wait until all
* other threads exit to avoid getting invalid exit_code.
*/
wasm_cluster_wait_for_all_except_self(cluster, exec_env);
}
#endif
return wasi_ctx->exit_code;
}

View File

@ -498,8 +498,9 @@ wasm_runtime_unload(WASMModuleCommon *module);
/* Internal API */
WASMModuleInstanceCommon *
wasm_runtime_instantiate_internal(WASMModuleCommon *module, bool is_sub_inst,
uint32 stack_size, uint32 heap_size,
char *error_buf, uint32 error_buf_size);
WASMExecEnv *exec_env_main, uint32 stack_size,
uint32 heap_size, char *error_buf,
uint32 error_buf_size);
/* Internal API */
void
@ -508,8 +509,8 @@ wasm_runtime_deinstantiate_internal(WASMModuleInstanceCommon *module_inst,
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN WASMModuleInstanceCommon *
wasm_runtime_instantiate(WASMModuleCommon *module, uint32 stack_size,
uint32 heap_size, char *error_buf,
wasm_runtime_instantiate(WASMModuleCommon *module, uint32 default_stack_size,
uint32 host_managed_heap_size, char *error_buf,
uint32 error_buf_size);
/* See wasm_export.h for description */
@ -675,6 +676,23 @@ wasm_runtime_set_custom_data(WASMModuleInstanceCommon *module_inst,
WASM_RUNTIME_API_EXTERN void *
wasm_runtime_get_custom_data(WASMModuleInstanceCommon *module_inst);
/* Internal API */
uint32
wasm_runtime_module_malloc_internal(WASMModuleInstanceCommon *module_inst,
WASMExecEnv *exec_env, uint32 size,
void **p_native_addr);
/* Internal API */
uint32
wasm_runtime_module_realloc_internal(WASMModuleInstanceCommon *module_inst,
WASMExecEnv *exec_env, uint32 ptr,
uint32 size, void **p_native_addr);
/* Internal API */
void
wasm_runtime_module_free_internal(WASMModuleInstanceCommon *module_inst,
WASMExecEnv *exec_env, uint32 ptr);
/* See wasm_export.h for description */
WASM_RUNTIME_API_EXTERN uint32
wasm_runtime_module_malloc(WASMModuleInstanceCommon *module_inst, uint32 size,

View File

@ -5,6 +5,9 @@
#include "bh_log.h"
#include "wasm_shared_memory.h"
#if WASM_ENABLE_THREAD_MGR != 0
#include "../libraries/thread-mgr/thread_manager.h"
#endif
static bh_list shared_memory_list_head;
static bh_list *const shared_memory_list = &shared_memory_list_head;
@ -18,26 +21,20 @@ enum {
/* clang-format on */
typedef struct AtomicWaitInfo {
korp_mutex wait_list_lock;
bh_list wait_list_head;
bh_list *wait_list;
/* WARNING: insert to the list allowed only in acquire_wait_info
otherwise there will be data race as described in PR #2016 */
} AtomicWaitInfo;
typedef struct AtomicWaitNode {
bh_list_link l;
uint8 status;
korp_mutex wait_lock;
korp_cond wait_cond;
} AtomicWaitNode;
typedef struct AtomicWaitAddressArgs {
uint32 index;
void **addr;
} AtomicWaitAddressArgs;
/* Atomic wait map */
static HashMap *wait_map;
static korp_mutex wait_map_lock;
static uint32
wait_address_hash(void *address);
@ -54,17 +51,11 @@ wasm_shared_memory_init()
if (os_mutex_init(&shared_memory_list_lock) != 0)
return false;
if (os_mutex_init(&wait_map_lock) != 0) {
os_mutex_destroy(&shared_memory_list_lock);
return false;
}
/* wait map not exists, create new map */
if (!(wait_map = bh_hash_map_create(32, true, (HashFunc)wait_address_hash,
(KeyEqualFunc)wait_address_equal, NULL,
destroy_wait_info))) {
os_mutex_destroy(&shared_memory_list_lock);
os_mutex_destroy(&wait_map_lock);
return false;
}
@ -74,11 +65,8 @@ wasm_shared_memory_init()
void
wasm_shared_memory_destroy()
{
bh_hash_map_destroy(wait_map);
os_mutex_destroy(&shared_memory_list_lock);
os_mutex_destroy(&wait_map_lock);
if (wait_map) {
bh_hash_map_destroy(wait_map);
}
}
static WASMSharedMemNode *
@ -101,61 +89,6 @@ search_module(WASMModuleCommon *module)
return NULL;
}
static void
wait_map_address_count_callback(void *key, void *value,
void *p_total_elem_count)
{
*(uint32 *)p_total_elem_count = *(uint32 *)p_total_elem_count + 1;
}
static void
create_list_of_waiter_addresses(void *key, void *value, void *user_data)
{
AtomicWaitAddressArgs *data = (AtomicWaitAddressArgs *)user_data;
data->addr[data->index++] = key;
}
void
notify_stale_threads_on_exception(WASMModuleInstanceCommon *module_inst)
{
AtomicWaitAddressArgs args = { 0 };
uint32 i = 0, total_elem_count = 0;
uint64 total_elem_count_size = 0;
os_mutex_lock(&wait_map_lock); /* Make the two traversals atomic */
/* count number of addresses in wait_map */
bh_hash_map_traverse(wait_map, wait_map_address_count_callback,
(void *)&total_elem_count);
if (!total_elem_count) {
os_mutex_unlock(&wait_map_lock);
return;
}
/* allocate memory */
total_elem_count_size = (uint64)sizeof(void *) * total_elem_count;
if (total_elem_count_size >= UINT32_MAX
|| !(args.addr = wasm_runtime_malloc((uint32)total_elem_count_size))) {
LOG_ERROR(
"failed to allocate memory for list of atomic wait addresses");
os_mutex_unlock(&wait_map_lock);
return;
}
/* set values in list of addresses */
bh_hash_map_traverse(wait_map, create_list_of_waiter_addresses, &args);
os_mutex_unlock(&wait_map_lock);
/* notify */
for (i = 0; i < args.index; i++) {
wasm_runtime_atomic_notify(module_inst, args.addr[i], UINT32_MAX);
}
/* free memory allocated to args data */
wasm_runtime_free(args.addr);
}
WASMSharedMemNode *
wasm_module_get_shared_memory(WASMModuleCommon *module)
{
@ -274,7 +207,7 @@ notify_wait_list(bh_list *wait_list, uint32 count)
AtomicWaitNode *node, *next;
uint32 i, notify_count = count;
if ((count == UINT32_MAX) || (count > wait_list->len))
if (count > wait_list->len)
notify_count = wait_list->len;
node = bh_list_first_elem(wait_list);
@ -285,11 +218,9 @@ notify_wait_list(bh_list *wait_list, uint32 count)
bh_assert(node);
next = bh_list_elem_next(node);
os_mutex_lock(&node->wait_lock);
node->status = S_NOTIFIED;
/* wakeup */
os_cond_signal(&node->wait_cond);
os_mutex_unlock(&node->wait_lock);
node = next;
}
@ -298,18 +229,15 @@ notify_wait_list(bh_list *wait_list, uint32 count)
}
static AtomicWaitInfo *
acquire_wait_info(void *address, bool create)
acquire_wait_info(void *address, AtomicWaitNode *wait_node)
{
AtomicWaitInfo *wait_info = NULL;
bh_list_status ret;
os_mutex_lock(&wait_map_lock); /* Make find + insert atomic */
if (address)
wait_info = (AtomicWaitInfo *)bh_hash_map_find(wait_map, address);
if (!create) {
os_mutex_unlock(&wait_map_lock);
if (!wait_node) {
return wait_info;
}
@ -317,7 +245,7 @@ acquire_wait_info(void *address, bool create)
if (!wait_info) {
if (!(wait_info = (AtomicWaitInfo *)wasm_runtime_malloc(
sizeof(AtomicWaitInfo)))) {
goto fail1;
return NULL;
}
memset(wait_info, 0, sizeof(AtomicWaitInfo));
@ -325,33 +253,19 @@ acquire_wait_info(void *address, bool create)
wait_info->wait_list = &wait_info->wait_list_head;
ret = bh_list_init(wait_info->wait_list);
bh_assert(ret == BH_LIST_SUCCESS);
/* init wait list lock */
if (0 != os_mutex_init(&wait_info->wait_list_lock)) {
goto fail2;
}
(void)ret;
if (!bh_hash_map_insert(wait_map, address, (void *)wait_info)) {
goto fail3;
wasm_runtime_free(wait_info);
return NULL;
}
}
os_mutex_unlock(&wait_map_lock);
bh_assert(wait_info);
ret = bh_list_insert(wait_info->wait_list, wait_node);
bh_assert(ret == BH_LIST_SUCCESS);
(void)ret;
return wait_info;
fail3:
os_mutex_destroy(&wait_info->wait_list_lock);
fail2:
wasm_runtime_free(wait_info);
fail1:
os_mutex_unlock(&wait_map_lock);
return NULL;
}
static void
@ -365,27 +279,25 @@ destroy_wait_info(void *wait_info)
while (node) {
next = bh_list_elem_next(node);
os_mutex_destroy(&node->wait_lock);
os_cond_destroy(&node->wait_cond);
wasm_runtime_free(node);
node = next;
}
os_mutex_destroy(&((AtomicWaitInfo *)wait_info)->wait_list_lock);
wasm_runtime_free(wait_info);
}
}
static bool
map_remove_wait_info(HashMap *wait_map_, AtomicWaitInfo *wait_info,
void *address)
static void
map_try_release_wait_info(HashMap *wait_map_, AtomicWaitInfo *wait_info,
void *address)
{
if (wait_info->wait_list->len > 0) {
return false;
return;
}
bh_hash_map_remove(wait_map_, address, NULL, NULL);
return true;
destroy_wait_info(wait_info);
}
uint32
@ -396,7 +308,11 @@ wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
AtomicWaitInfo *wait_info;
AtomicWaitNode *wait_node;
WASMSharedMemNode *node;
bool check_ret, is_timeout, no_wait, removed_from_map;
#if WASM_ENABLE_THREAD_MGR != 0
WASMExecEnv *exec_env;
#endif
uint64 timeout_left, timeout_wait, timeout_1sec;
bool check_ret, is_timeout, no_wait;
bh_assert(module->module_type == Wasm_Module_Bytecode
|| module->module_type == Wasm_Module_AoT);
@ -418,81 +334,107 @@ wasm_runtime_atomic_wait(WASMModuleInstanceCommon *module, void *address,
return -1;
}
/* acquire the wait info, create new one if not exists */
wait_info = acquire_wait_info(address, true);
#if WASM_ENABLE_THREAD_MGR != 0
exec_env =
wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst);
bh_assert(exec_env);
#endif
node = search_module((WASMModuleCommon *)module_inst->module);
bh_assert(node);
/* Lock the shared_mem_lock for the whole atomic wait process,
and use it to os_cond_reltimedwait */
os_mutex_lock(&node->shared_mem_lock);
no_wait = (!wait64 && *(uint32 *)address != (uint32)expect)
|| (wait64 && *(uint64 *)address != expect);
if (no_wait) {
os_mutex_unlock(&node->shared_mem_lock);
return 1;
}
if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
os_mutex_unlock(&node->shared_mem_lock);
wasm_runtime_set_exception(module, "failed to create wait node");
return -1;
}
memset(wait_node, 0, sizeof(AtomicWaitNode));
if (0 != os_cond_init(&wait_node->wait_cond)) {
os_mutex_unlock(&node->shared_mem_lock);
wasm_runtime_free(wait_node);
wasm_runtime_set_exception(module, "failed to init wait cond");
return -1;
}
wait_node->status = S_WAITING;
/* Acquire the wait info, create new one if not exists */
wait_info = acquire_wait_info(address, wait_node);
if (!wait_info) {
os_mutex_unlock(&node->shared_mem_lock);
os_cond_destroy(&wait_node->wait_cond);
wasm_runtime_free(wait_node);
wasm_runtime_set_exception(module, "failed to acquire wait_info");
return -1;
}
node = search_module((WASMModuleCommon *)module_inst->module);
os_mutex_lock(&node->shared_mem_lock);
no_wait = (!wait64 && *(uint32 *)address != (uint32)expect)
|| (wait64 && *(uint64 *)address != expect);
os_mutex_unlock(&node->shared_mem_lock);
/* unit of timeout is nsec, convert it to usec */
timeout_left = (uint64)timeout / 1000;
timeout_1sec = 1e6;
if (no_wait) {
return 1;
while (1) {
if (timeout < 0) {
/* wait forever until it is notified or terminatied
here we keep waiting and checking every second */
os_cond_reltimedwait(&wait_node->wait_cond, &node->shared_mem_lock,
(uint64)timeout_1sec);
if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
#if WASM_ENABLE_THREAD_MGR != 0
/* terminated by other thread */
|| wasm_cluster_is_thread_terminated(exec_env)
#endif
) {
break;
}
}
else {
timeout_wait =
timeout_left < timeout_1sec ? timeout_left : timeout_1sec;
os_cond_reltimedwait(&wait_node->wait_cond, &node->shared_mem_lock,
timeout_wait);
if (wait_node->status == S_NOTIFIED /* notified by atomic.notify */
|| timeout_left <= timeout_wait /* time out */
#if WASM_ENABLE_THREAD_MGR != 0
/* terminated by other thread */
|| wasm_cluster_is_thread_terminated(exec_env)
#endif
) {
break;
}
timeout_left -= timeout_wait;
}
}
else {
bh_list_status ret;
if (!(wait_node = wasm_runtime_malloc(sizeof(AtomicWaitNode)))) {
wasm_runtime_set_exception(module, "failed to create wait node");
return -1;
}
memset(wait_node, 0, sizeof(AtomicWaitNode));
if (0 != os_mutex_init(&wait_node->wait_lock)) {
wasm_runtime_free(wait_node);
return -1;
}
if (0 != os_cond_init(&wait_node->wait_cond)) {
os_mutex_destroy(&wait_node->wait_lock);
wasm_runtime_free(wait_node);
return -1;
}
wait_node->status = S_WAITING;
os_mutex_lock(&wait_info->wait_list_lock);
ret = bh_list_insert(wait_info->wait_list, wait_node);
os_mutex_unlock(&wait_info->wait_list_lock);
bh_assert(ret == BH_LIST_SUCCESS);
(void)ret;
}
/* condition wait start */
os_mutex_lock(&wait_node->wait_lock);
os_cond_reltimedwait(&wait_node->wait_cond, &wait_node->wait_lock,
timeout < 0 ? BHT_WAIT_FOREVER
: (uint64)timeout / 1000);
is_timeout = wait_node->status == S_WAITING ? true : false;
os_mutex_unlock(&wait_node->wait_lock);
os_mutex_lock(&node->shared_mem_lock);
os_mutex_lock(&wait_info->wait_list_lock);
check_ret = is_wait_node_exists(wait_info->wait_list, wait_node);
bh_assert(check_ret);
(void)check_ret;
/* Remove wait node */
/* Remove wait node from wait list */
bh_list_remove(wait_info->wait_list, wait_node);
os_mutex_destroy(&wait_node->wait_lock);
os_cond_destroy(&wait_node->wait_cond);
wasm_runtime_free(wait_node);
/* Release wait info if no wait nodes attached */
removed_from_map = map_remove_wait_info(wait_map, wait_info, address);
os_mutex_unlock(&wait_info->wait_list_lock);
if (removed_from_map)
destroy_wait_info(wait_info);
/* Release wait info if no wait nodes are attached */
map_try_release_wait_info(wait_map, wait_info, address);
os_mutex_unlock(&node->shared_mem_lock);
(void)check_ret;
return is_timeout ? 2 : 0;
}
@ -509,35 +451,41 @@ wasm_runtime_atomic_notify(WASMModuleInstanceCommon *module, void *address,
bh_assert(module->module_type == Wasm_Module_Bytecode
|| module->module_type == Wasm_Module_AoT);
node = search_module((WASMModuleCommon *)module_inst->module);
if (node)
os_mutex_lock(&node->shared_mem_lock);
out_of_bounds =
((uint8 *)address < module_inst->memories[0]->memory_data
|| (uint8 *)address + 4 > module_inst->memories[0]->memory_data_end);
if (out_of_bounds) {
if (node)
os_mutex_unlock(&node->shared_mem_lock);
wasm_runtime_set_exception(module, "out of bounds memory access");
return -1;
}
wait_info = acquire_wait_info(address, false);
/* Nobody wait on this address */
if (!wait_info) {
if (node)
os_mutex_unlock(&node->shared_mem_lock);
/* Currently we have only one memory instance */
if (!module_inst->memories[0]->is_shared) {
/* Always return 0 for ushared linear memory since there is
no way to create a waiter on it */
return 0;
}
os_mutex_lock(&wait_info->wait_list_lock);
notify_result = notify_wait_list(wait_info->wait_list, count);
os_mutex_unlock(&wait_info->wait_list_lock);
node = search_module((WASMModuleCommon *)module_inst->module);
bh_assert(node);
if (node)
/* Lock the shared_mem_lock for the whole atomic notify process,
and use it to os_cond_signal */
os_mutex_lock(&node->shared_mem_lock);
wait_info = acquire_wait_info(address, NULL);
/* Nobody wait on this address */
if (!wait_info) {
os_mutex_unlock(&node->shared_mem_lock);
return 0;
}
/* Notify each wait node in the wait list */
notify_result = notify_wait_list(wait_info->wait_list, count);
os_mutex_unlock(&node->shared_mem_lock);
return notify_result;
}

View File

@ -39,9 +39,6 @@ wasm_shared_memory_init();
void
wasm_shared_memory_destroy();
void
notify_stale_threads_on_exception(WASMModuleInstanceCommon *module);
WASMSharedMemNode *
wasm_module_get_shared_memory(WASMModuleCommon *module);

View File

@ -671,9 +671,16 @@ bool
check_suspend_flags(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
{
LLVMValueRef terminate_addr, terminate_flags, flag, offset, res;
LLVMBasicBlockRef terminate_check_block, non_terminate_block;
LLVMBasicBlockRef terminate_block, non_terminate_block;
AOTFuncType *aot_func_type = func_ctx->aot_func->func_type;
LLVMBasicBlockRef terminate_block;
bool is_shared_memory =
comp_ctx->comp_data->memories[0].memory_flags & 0x02 ? true : false;
/* Only need to check the suspend flags when memory is shared since
shared memory must be enabled for multi-threading */
if (!is_shared_memory) {
return true;
}
/* Offset of suspend_flags */
offset = I32_FIVE;
@ -694,36 +701,27 @@ check_suspend_flags(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
if (!(terminate_flags =
LLVMBuildLoad2(comp_ctx->builder, I32_TYPE, terminate_addr,
"terminate_flags"))) {
aot_set_last_error("llvm build bit cast failed");
aot_set_last_error("llvm build LOAD failed");
return false;
}
/* Set terminate_flags memory accecc to volatile, so that the value
will always be loaded from memory rather than register */
LLVMSetVolatile(terminate_flags, true);
CREATE_BLOCK(terminate_check_block, "terminate_check");
MOVE_BLOCK_AFTER_CURR(terminate_check_block);
CREATE_BLOCK(non_terminate_block, "non_terminate");
MOVE_BLOCK_AFTER_CURR(non_terminate_block);
BUILD_ICMP(LLVMIntSGT, terminate_flags, I32_ZERO, res, "need_terminate");
BUILD_COND_BR(res, terminate_check_block, non_terminate_block);
/* Move builder to terminate check block */
SET_BUILDER_POS(terminate_check_block);
CREATE_BLOCK(terminate_block, "terminate");
MOVE_BLOCK_AFTER_CURR(terminate_block);
if (!(flag = LLVMBuildAnd(comp_ctx->builder, terminate_flags, I32_ONE,
"termination_flag"))) {
aot_set_last_error("llvm build AND failed");
return false;
}
BUILD_ICMP(LLVMIntSGT, flag, I32_ZERO, res, "need_terminate");
BUILD_COND_BR(res, terminate_block, non_terminate_block);
CREATE_BLOCK(non_terminate_block, "non_terminate");
MOVE_BLOCK_AFTER_CURR(non_terminate_block);
CREATE_BLOCK(terminate_block, "terminate");
MOVE_BLOCK_AFTER_CURR(terminate_block);
BUILD_ICMP(LLVMIntEQ, flag, I32_ZERO, res, "flag_terminate");
BUILD_COND_BR(res, non_terminate_block, terminate_block);
/* Move builder to terminate block */
SET_BUILDER_POS(terminate_block);
@ -731,7 +729,7 @@ check_suspend_flags(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
goto fail;
}
/* Move builder to terminate block */
/* Move builder to non terminate block */
SET_BUILDER_POS(non_terminate_block);
return true;

View File

@ -0,0 +1,5 @@
# Classic interpreter
## stack format
![](./images/stack_format_ci.svg)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 112 KiB

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 158 KiB

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 186 KiB

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

After

Width:  |  Height:  |  Size: 53 KiB

View File

@ -0,0 +1,22 @@
# Wasm exports
The internal data structure for Wasm exports:
![](./images/wasm_exports.svg)
## Setup exports for Module
The array data structure pointed by `WASMModule::exports` is setup during loading a module. Basically the runtime will load the exports sections from the module file content, and construct an array of C struct `WASMExport`.
A `WASMExport` item contains three elements that map the Wasm file exports section structure:
- name: the name shown to external
- kind: total 4 export types: function, globals, memory, table
- index: As all the 4 export types are organized in array, this refers to index in target export type
## Function exports
### use function exports
function exports are often used in two situations:
1. **call by host**: runtime API `wasm_runtime_lookup_function` will walk through the array of `WASMModuleInstance::export_functions` and compare the exported name with given target symbol name in the function parameter. If any array item matches the name, it then returns the value of field `function` which points to associated function instance (WASMFunctionInstance)
2. **import by another module**: During linking multiple modules, the runtime saves the pointer of exported WASMFunctionInstance in the local WASMFunctionInstance of importing module.
### setup for instance
The data structure pointed by `WASMModuleInstance::export_functions` is set up during instantiating module instance.
The runtime will walk through the `WASMModule::exports` array and find all the item with kind equal to "function". Create a node of `WASMExportFuncInstance` for each matching, find the associated `WASMFunctionInstance` object and save its address in the field `function`.

View File

@ -0,0 +1,47 @@
# Wasm Function
## Internal data structure
![](./images/wasm_function.svg)
## Module level data (function)
**WASMModule**: Data structure created for loading a module.
- `WASMImport *import_functions`: initialized from the Wasm file function section
- `WASMImport *import_functions`: initialized from the Wasm file import section. The runtime will try to solve the imports from the native API registration, refer to [Export native API to WASM application](../../../doc/export_native_api.md).
**WASMFunction**: represent a Wasm function located in Wasm file code section. Track the links to the compiled function body.
**WASMImport**: represent a imported Wasm function which can be a solved as a native function or another Wasm module exported function.
## Instance level data (function)
**WASMModuleInstance**: Data structure created for instantiating a module
- `WASMModuleInstanceExtra::functions`: combined the imported and internal functions into single array of structure `WASMFunctionInstance`
- `WASMModuleInstance::import_func_ptrs`: pointer array for solved function imports. This array is referred during calling imported native function. Note it is initialzed with the module level solved imports, but may points to different native function later due to c-api calls.
## Execution paths
**Interpreter**:
- Execute internal bytecode function:
```
WASMModuleInstance::e
-> WASMModuleInstanceExtra::functions[..]
-> WASMFunctionInstance::func
-> WASMFunction::code
```
- Execute imported function from other module:
```
WASMModuleInstance::e
-> WASMModuleInstanceExtra::functions[..]
(WASMFunctionInstance flag indicates an import)
-> WASMFunctionInstance::import_func_inst
-> WASMModuleInstance(second)::func
-> WASMFunction (second module)::code
```
- Execute imported native function:
```
WASMModuleInstance::e
-> WASMModuleInstanceExtra::functions[..]
(flag indicates imported native)
WASMModuleInstance::import_func_ptrs[..]
-> native function
```

View File

@ -0,0 +1,4 @@
# Wasm globals
![](./images/wasm_globals.svg)

File diff suppressed because it is too large Load Diff

View File

@ -30,7 +30,7 @@
#define BUILD_COND_BR(value_if, block_then, block_else) \
do { \
if (!GEN_INSN(CMP, cc->cmp_reg, value_if, NEW_CONST(cc, 0)) \
if (!GEN_INSN(CMP, cc->cmp_reg, value_if, NEW_CONST(I32, 0)) \
|| !GEN_INSN(BNE, cc->cmp_reg, jit_basic_block_label(block_then), \
jit_basic_block_label(block_else))) { \
jit_set_last_error(cc, "generate bne insn failed"); \
@ -904,6 +904,42 @@ check_copy_arities(const JitBlock *block_dst, JitFrame *jit_frame)
}
}
#if WASM_ENABLE_THREAD_MGR != 0
bool
jit_check_suspend_flags(JitCompContext *cc)
{
JitReg exec_env, suspend_flags, terminate_flag, offset;
JitBasicBlock *terminate_block, *cur_basic_block;
JitFrame *jit_frame = cc->jit_frame;
cur_basic_block = cc->cur_basic_block;
terminate_block = jit_cc_new_basic_block(cc, 0);
if (!terminate_block) {
return false;
}
gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp);
exec_env = cc->exec_env_reg;
suspend_flags = jit_cc_new_reg_I32(cc);
terminate_flag = jit_cc_new_reg_I32(cc);
offset = jit_cc_new_const_I32(cc, offsetof(WASMExecEnv, suspend_flags));
GEN_INSN(LDI32, suspend_flags, exec_env, offset);
GEN_INSN(AND, terminate_flag, suspend_flags, NEW_CONST(I32, 1));
GEN_INSN(CMP, cc->cmp_reg, terminate_flag, NEW_CONST(I32, 0));
GEN_INSN(BNE, cc->cmp_reg, jit_basic_block_label(terminate_block), 0);
cc->cur_basic_block = terminate_block;
GEN_INSN(RETURN, NEW_CONST(I32, 0));
cc->cur_basic_block = cur_basic_block;
return true;
}
#endif
static bool
handle_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip)
{
@ -986,6 +1022,13 @@ fail:
bool
jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip)
{
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
return false;
#endif
return handle_op_br(cc, br_depth, p_frame_ip)
&& handle_next_reachable_block(cc, p_frame_ip);
}
@ -1105,6 +1148,12 @@ jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth,
jit_insn_delete(insn_select);
}
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
return false;
#endif
SET_BUILDER_POS(if_basic_block);
SET_BB_BEGIN_BCIP(if_basic_block, *p_frame_ip - 1);
@ -1144,6 +1193,12 @@ jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count,
uint32 i = 0;
JitOpndLookupSwitch *opnd = NULL;
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
return false;
#endif
cur_basic_block = cc->cur_basic_block;
POP_I32(value);

View File

@ -5,6 +5,7 @@
#include "jit_emit_function.h"
#include "jit_emit_exception.h"
#include "jit_emit_control.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h"
@ -232,6 +233,12 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
bool is_pointer_arg;
bool return_value = false;
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
if (func_idx < wasm_module->import_function_count) {
/* The function to call is an import function */
func_import = &wasm_module->import_functions[func_idx].u.function;
@ -275,6 +282,12 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
goto fail;
}
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
return true;
}
@ -416,6 +429,12 @@ jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
}
}
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
/* Clear part of memory regs and table regs as their values
may be changed in the function call */
if (cc->cur_wasm_module->possible_memory_grow)
@ -540,6 +559,12 @@ jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
GEN_INSN(STI32, func_idx, cc->exec_env_reg,
NEW_CONST(I32, offsetof(WASMExecEnv, jit_cache) + 4));
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
block_import = jit_cc_new_basic_block(cc, 0);
block_nonimport = jit_cc_new_basic_block(cc, 0);
func_return = jit_cc_new_basic_block(cc, 0);
@ -742,6 +767,12 @@ jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
goto fail;
}
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
/* Clear part of memory regs and table regs as their values
may be changed in the function call */
if (cc->cur_wasm_module->possible_memory_grow)

View File

@ -9,6 +9,7 @@
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h"
#include "jit_emit_control.h"
#ifndef OS_ENABLE_HW_BOUND_CHECK
static JitReg
@ -60,6 +61,14 @@ fail:
}
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
static void
set_load_or_store_atomic(JitInsn *load_or_store_inst)
{
load_or_store_inst->flags_u8 |= 0x1;
}
#endif
#if UINTPTR_MAX == UINT64_MAX
static JitReg
check_and_seek_on_64bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
@ -177,23 +186,36 @@ fail:
return 0;
}
#define CHECK_ALIGNMENT(maddr, memory_data, offset1) \
#if UINTPTR_MAX == UINT64_MAX
#define CHECK_ALIGNMENT(offset1) \
do { \
GEN_INSN(ADD, maddr, memory_data, offset1); \
JitReg align_mask = NEW_CONST(I64, ((uint64)1 << align) - 1); \
JitReg AND_res = jit_cc_new_reg_I64(cc); \
GEN_INSN(AND, AND_res, maddr, align_mask); \
GEN_INSN(AND, AND_res, offset1, align_mask); \
GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I64, 0)); \
if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
cc->cmp_reg, NULL)) \
goto fail; \
} while (0)
#else
#define CHECK_ALIGNMENT(offset1) \
do { \
JitReg align_mask = NEW_CONST(I32, (1 << align) - 1); \
JitReg AND_res = jit_cc_new_reg_I32(cc); \
GEN_INSN(AND, AND_res, offset1, align_mask); \
GEN_INSN(CMP, cc->cmp_reg, AND_res, NEW_CONST(I32, 0)); \
if (!jit_emit_exception(cc, EXCE_UNALIGNED_ATOMIC, JIT_OP_BNE, \
cc->cmp_reg, NULL)) \
goto fail; \
} while (0)
#endif
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
JitReg addr, offset1, value, memory_data;
JitInsn *load_insn = NULL;
POP_I32(addr);
@ -201,6 +223,11 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
if (!offset1) {
goto fail;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
CHECK_ALIGNMENT(offset1);
}
#endif
memory_data = get_memory_data_reg(cc->jit_frame, 0);
@ -209,30 +236,30 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
case 1:
{
if (sign) {
GEN_INSN(LDI8, value, memory_data, offset1);
load_insn = GEN_INSN(LDI8, value, memory_data, offset1);
}
else {
GEN_INSN(LDU8, value, memory_data, offset1);
load_insn = GEN_INSN(LDU8, value, memory_data, offset1);
}
break;
}
case 2:
{
if (sign) {
GEN_INSN(LDI16, value, memory_data, offset1);
load_insn = GEN_INSN(LDI16, value, memory_data, offset1);
}
else {
GEN_INSN(LDU16, value, memory_data, offset1);
load_insn = GEN_INSN(LDU16, value, memory_data, offset1);
}
break;
}
case 4:
{
if (sign) {
GEN_INSN(LDI32, value, memory_data, offset1);
load_insn = GEN_INSN(LDI32, value, memory_data, offset1);
}
else {
GEN_INSN(LDU32, value, memory_data, offset1);
load_insn = GEN_INSN(LDU32, value, memory_data, offset1);
}
break;
}
@ -243,6 +270,13 @@ jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
}
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic && load_insn)
set_load_or_store_atomic(load_insn);
#else
(void)load_insn;
#endif
PUSH_I32(value);
return true;
fail:
@ -254,6 +288,7 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
JitReg addr, offset1, value, memory_data;
JitInsn *load_insn = NULL;
POP_I32(addr);
@ -261,6 +296,11 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
if (!offset1) {
goto fail;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
CHECK_ALIGNMENT(offset1);
}
#endif
memory_data = get_memory_data_reg(cc->jit_frame, 0);
@ -269,40 +309,40 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
case 1:
{
if (sign) {
GEN_INSN(LDI8, value, memory_data, offset1);
load_insn = GEN_INSN(LDI8, value, memory_data, offset1);
}
else {
GEN_INSN(LDU8, value, memory_data, offset1);
load_insn = GEN_INSN(LDU8, value, memory_data, offset1);
}
break;
}
case 2:
{
if (sign) {
GEN_INSN(LDI16, value, memory_data, offset1);
load_insn = GEN_INSN(LDI16, value, memory_data, offset1);
}
else {
GEN_INSN(LDU16, value, memory_data, offset1);
load_insn = GEN_INSN(LDU16, value, memory_data, offset1);
}
break;
}
case 4:
{
if (sign) {
GEN_INSN(LDI32, value, memory_data, offset1);
load_insn = GEN_INSN(LDI32, value, memory_data, offset1);
}
else {
GEN_INSN(LDU32, value, memory_data, offset1);
load_insn = GEN_INSN(LDU32, value, memory_data, offset1);
}
break;
}
case 8:
{
if (sign) {
GEN_INSN(LDI64, value, memory_data, offset1);
load_insn = GEN_INSN(LDI64, value, memory_data, offset1);
}
else {
GEN_INSN(LDU64, value, memory_data, offset1);
load_insn = GEN_INSN(LDU64, value, memory_data, offset1);
}
break;
}
@ -313,6 +353,13 @@ jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
}
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic && load_insn)
set_load_or_store_atomic(load_insn);
#else
(void)load_insn;
#endif
PUSH_I64(value);
return true;
fail:
@ -370,6 +417,7 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
JitReg value, addr, offset1, memory_data;
JitInsn *store_insn = NULL;
POP_I32(value);
POP_I32(addr);
@ -378,23 +426,28 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
if (!offset1) {
goto fail;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
CHECK_ALIGNMENT(offset1);
}
#endif
memory_data = get_memory_data_reg(cc->jit_frame, 0);
switch (bytes) {
case 1:
{
GEN_INSN(STI8, value, memory_data, offset1);
store_insn = GEN_INSN(STI8, value, memory_data, offset1);
break;
}
case 2:
{
GEN_INSN(STI16, value, memory_data, offset1);
store_insn = GEN_INSN(STI16, value, memory_data, offset1);
break;
}
case 4:
{
GEN_INSN(STI32, value, memory_data, offset1);
store_insn = GEN_INSN(STI32, value, memory_data, offset1);
break;
}
default:
@ -403,6 +456,12 @@ jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
goto fail;
}
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic && store_insn)
set_load_or_store_atomic(store_insn);
#else
(void)store_insn;
#endif
return true;
fail:
@ -414,6 +473,7 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
JitReg value, addr, offset1, memory_data;
JitInsn *store_insn = NULL;
POP_I64(value);
POP_I32(addr);
@ -422,6 +482,11 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
if (!offset1) {
goto fail;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic) {
CHECK_ALIGNMENT(offset1);
}
#endif
if (jit_reg_is_const(value) && bytes < 8) {
value = NEW_CONST(I32, (int32)jit_cc_get_const_I64(cc, value));
@ -432,22 +497,22 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
switch (bytes) {
case 1:
{
GEN_INSN(STI8, value, memory_data, offset1);
store_insn = GEN_INSN(STI8, value, memory_data, offset1);
break;
}
case 2:
{
GEN_INSN(STI16, value, memory_data, offset1);
store_insn = GEN_INSN(STI16, value, memory_data, offset1);
break;
}
case 4:
{
GEN_INSN(STI32, value, memory_data, offset1);
store_insn = GEN_INSN(STI32, value, memory_data, offset1);
break;
}
case 8:
{
GEN_INSN(STI64, value, memory_data, offset1);
store_insn = GEN_INSN(STI64, value, memory_data, offset1);
break;
}
default:
@ -456,6 +521,12 @@ jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
goto fail;
}
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (atomic && store_insn)
set_load_or_store_atomic(store_insn);
#else
(void)store_insn;
#endif
return true;
fail:
@ -774,10 +845,153 @@ fail:
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
#define GEN_AT_RMW_INSN(op, op_type, bytes, result, value, memory_data, \
offset1) \
do { \
switch (bytes) { \
case 1: \
{ \
insn = GEN_INSN(AT_##op##U8, result, value, memory_data, \
offset1); \
break; \
} \
case 2: \
{ \
insn = GEN_INSN(AT_##op##U16, result, value, memory_data, \
offset1); \
break; \
} \
case 4: \
{ \
if (op_type == VALUE_TYPE_I32) \
insn = GEN_INSN(AT_##op##I32, result, value, memory_data, \
offset1); \
else \
insn = GEN_INSN(AT_##op##U32, result, value, memory_data, \
offset1); \
break; \
} \
case 8: \
{ \
insn = GEN_INSN(AT_##op##I64, result, value, memory_data, \
offset1); \
break; \
} \
default: \
{ \
bh_assert(0); \
goto fail; \
} \
} \
} while (0)
bool
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes)
{
JitReg addr, offset1, memory_data, value, result, eax_hreg, rax_hreg,
ebx_hreg, rbx_hreg;
JitInsn *insn = NULL;
bool is_i32 = op_type == VALUE_TYPE_I32;
bool is_logical_op = atomic_op == AtomicRMWBinOpAnd
|| atomic_op == AtomicRMWBinOpOr
|| atomic_op == AtomicRMWBinOpXor;
/* currently we only implement atomic rmw on x86-64 target */
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
/* For atomic logical binary ops, it implicitly uses rax in cmpxchg
* instruction and implicitly uses rbx for storing temp value in the
* generated loop */
eax_hreg = jit_codegen_get_hreg_by_name("eax");
rax_hreg = jit_codegen_get_hreg_by_name("rax");
ebx_hreg = jit_codegen_get_hreg_by_name("ebx");
rbx_hreg = jit_codegen_get_hreg_by_name("rbx");
bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
if (op_type == VALUE_TYPE_I32) {
POP_I32(value);
}
else {
POP_I64(value);
}
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
CHECK_ALIGNMENT(offset1);
memory_data = get_memory_data_reg(cc->jit_frame, 0);
if (op_type == VALUE_TYPE_I32)
result = jit_cc_new_reg_I32(cc);
else
result = jit_cc_new_reg_I64(cc);
switch (atomic_op) {
case AtomicRMWBinOpAdd:
{
GEN_AT_RMW_INSN(ADD, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpSub:
{
GEN_AT_RMW_INSN(SUB, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpAnd:
{
GEN_AT_RMW_INSN(AND, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpOr:
{
GEN_AT_RMW_INSN(OR, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpXor:
{
GEN_AT_RMW_INSN(XOR, op_type, bytes, result, value, memory_data,
offset1);
break;
}
case AtomicRMWBinOpXchg:
{
GEN_AT_RMW_INSN(XCHG, op_type, bytes, result, value, memory_data,
offset1);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
if (is_logical_op
&& (!insn
|| !jit_lock_reg_in_insn(cc, insn, is_i32 ? eax_hreg : rax_hreg)
|| !jit_lock_reg_in_insn(cc, insn, is_i32 ? ebx_hreg : rbx_hreg))) {
jit_set_last_error(
cc, "generate atomic logical insn or lock ra&rb hreg failed");
goto fail;
}
if (op_type == VALUE_TYPE_I32)
PUSH_I32(result);
else
PUSH_I64(result);
return true;
#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
fail:
return false;
}
@ -785,6 +999,93 @@ bool
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
JitReg addr, offset1, memory_data, value, expect, result;
bool is_i32 = op_type == VALUE_TYPE_I32;
/* currently we only implement atomic cmpxchg on x86-64 target */
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
/* cmpxchg will use register al/ax/eax/rax to store parameter expected
* value, and the read result will also be stored to al/ax/eax/rax */
JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
JitInsn *insn = NULL;
bh_assert(op_type == VALUE_TYPE_I32 || op_type == VALUE_TYPE_I64);
if (is_i32) {
POP_I32(value);
POP_I32(expect);
result = jit_cc_new_reg_I32(cc);
}
else {
POP_I64(value);
POP_I64(expect);
result = jit_cc_new_reg_I64(cc);
}
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
CHECK_ALIGNMENT(offset1);
memory_data = get_memory_data_reg(cc->jit_frame, 0);
GEN_INSN(MOV, is_i32 ? eax_hreg : rax_hreg, expect);
switch (bytes) {
case 1:
{
insn = GEN_INSN(AT_CMPXCHGU8, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
break;
}
case 2:
{
insn = GEN_INSN(AT_CMPXCHGU16, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
break;
}
case 4:
{
if (op_type == VALUE_TYPE_I32)
insn =
GEN_INSN(AT_CMPXCHGI32, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
else
insn =
GEN_INSN(AT_CMPXCHGU32, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
break;
}
case 8:
{
insn = GEN_INSN(AT_CMPXCHGI64, value, is_i32 ? eax_hreg : rax_hreg,
memory_data, offset1);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
if (!insn
|| !jit_lock_reg_in_insn(cc, insn, is_i32 ? eax_hreg : rax_hreg)) {
jit_set_last_error(cc, "generate cmpxchg insn or lock ra hreg failed");
goto fail;
}
GEN_INSN(MOV, result, is_i32 ? eax_hreg : rax_hreg);
if (is_i32)
PUSH_I32(result);
else
PUSH_I64(result);
return true;
#endif /* defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64) */
fail:
return false;
}
@ -812,8 +1113,10 @@ jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1)
goto fail;
JitReg maddr = jit_cc_new_reg_I64(cc);
CHECK_ALIGNMENT(maddr, memory_data, offset1);
CHECK_ALIGNMENT(offset1);
JitReg maddr = jit_cc_new_reg_ptr(cc);
GEN_INSN(ADD, maddr, memory_data, offset1);
// Prepare `wasm_runtime_atomic_wait` arguments
JitReg res = jit_cc_new_reg_I32(cc);
@ -835,6 +1138,12 @@ jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
goto fail;
PUSH_I32(res);
#if WASM_ENABLE_THREAD_MGR != 0
/* Insert suspend check point */
if (!jit_check_suspend_flags(cc))
goto fail;
#endif
return true;
fail:
return false;
@ -854,8 +1163,10 @@ jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
JitReg offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1)
goto fail;
JitReg maddr = jit_cc_new_reg_I64(cc);
CHECK_ALIGNMENT(maddr, memory_data, offset1);
CHECK_ALIGNMENT(offset1);
JitReg maddr = jit_cc_new_reg_ptr(cc);
GEN_INSN(ADD, maddr, memory_data, offset1);
// Prepare `wasm_runtime_atomic_notify` arguments
JitReg res = jit_cc_new_reg_I32(cc);
@ -879,4 +1190,11 @@ jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
fail:
return false;
}
bool
jit_compiler_op_atomic_fence(JitCompContext *cc)
{
GEN_INSN(FENCE);
return true;
}
#endif

View File

@ -80,6 +80,9 @@ jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes);
bool
jit_compiler_op_atomic_fence(JitCompContext *cc);
#endif
#ifdef __cplusplus

View File

@ -16,11 +16,13 @@ if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
FetchContent_Declare(
asmjit
GIT_REPOSITORY https://github.com/asmjit/asmjit.git
GIT_TAG c1019f1642a588107148f64ba54584b0ae3ec8d1
)
else ()
FetchContent_Declare(
asmjit
GIT_REPOSITORY https://github.com/asmjit/asmjit.git
GIT_TAG c1019f1642a588107148f64ba54584b0ae3ec8d1
PATCH_COMMAND git apply ${IWASM_FAST_JIT_DIR}/asmjit_sgx_patch.diff
)
endif ()

View File

@ -114,7 +114,10 @@ jit_dump_insn(JitCompContext *cc, JitInsn *insn)
switch (insn->opcode) {
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) \
case JIT_OP_##NAME: \
os_printf(" %-15s", #NAME); \
if (insn->flags_u8 & 0x1) \
os_printf(" ATOMIC %-8s", #NAME); \
else \
os_printf(" %-15s", #NAME); \
jit_dump_insn_##OPND_KIND(cc, insn, OPND_NUM); \
break;
#include "jit_ir.def"
@ -319,7 +322,9 @@ jit_pass_dump(JitCompContext *cc)
os_printf("JIT.COMPILER.DUMP: PASS_NO=%d PREV_PASS=%s\n\n", pass_no,
pass_name);
jit_dump_cc(cc);
os_printf("\n");
return true;
}

View File

@ -223,18 +223,37 @@ get_memory_data_reg(JitFrame *frame, uint32 mem_idx)
{
JitCompContext *cc = frame->cc;
JitReg module_inst_reg = get_module_inst_reg(frame);
uint32 memory_data_offset =
(uint32)offsetof(WASMModuleInstance, global_table_data.bytes)
+ (uint32)offsetof(WASMMemoryInstance, memory_data);
uint32 memory_data_offset;
bh_assert(mem_idx == 0);
#if WASM_ENABLE_SHARED_MEMORY != 0
uint32 memories_offset = (uint32)offsetof(WASMModuleInstance, memories);
JitReg memories_addr = jit_cc_new_reg_ptr(cc);
JitReg memories_0_addr = jit_cc_new_reg_ptr(cc);
memory_data_offset = (uint32)offsetof(WASMMemoryInstance, memory_data);
if (!frame->memory_regs[mem_idx].memory_data) {
frame->memory_regs[mem_idx].memory_data =
cc->memory_regs[mem_idx].memory_data;
/* module_inst->memories */
GEN_INSN(LDPTR, memories_addr, module_inst_reg,
NEW_CONST(I32, memories_offset));
/* module_inst->memories[0] */
GEN_INSN(LDPTR, memories_0_addr, memories_addr, NEW_CONST(I32, 0));
/* memories[0]->memory_data */
GEN_INSN(LDPTR, frame->memory_regs[mem_idx].memory_data,
memories_0_addr, NEW_CONST(I32, memory_data_offset));
}
#else
memory_data_offset =
(uint32)offsetof(WASMModuleInstance, global_table_data.bytes)
+ (uint32)offsetof(WASMMemoryInstance, memory_data);
if (!frame->memory_regs[mem_idx].memory_data) {
frame->memory_regs[mem_idx].memory_data =
cc->memory_regs[mem_idx].memory_data;
GEN_INSN(LDPTR, frame->memory_regs[mem_idx].memory_data,
module_inst_reg, NEW_CONST(I32, memory_data_offset));
}
#endif
return frame->memory_regs[mem_idx].memory_data;
}
@ -1078,6 +1097,39 @@ read_leb(JitCompContext *cc, const uint8 *buf, const uint8 *buf_end,
res = (int64)res64; \
} while (0)
#if WASM_ENABLE_SHARED_MEMORY != 0
#define COMPILE_ATOMIC_RMW(OP, NAME) \
case WASM_OP_ATOMIC_RMW_I32_##NAME: \
bytes = 4; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME: \
bytes = 8; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I32_##NAME##8_U: \
bytes = 1; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I32_##NAME##16_U: \
bytes = 2; \
op_type = VALUE_TYPE_I32; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##8_U: \
bytes = 1; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##16_U: \
bytes = 2; \
op_type = VALUE_TYPE_I64; \
goto OP_ATOMIC_##OP; \
case WASM_OP_ATOMIC_RMW_I64_##NAME##32_U: \
bytes = 4; \
op_type = VALUE_TYPE_I64; \
OP_ATOMIC_##OP : bin_op = AtomicRMWBinOp##OP; \
goto build_atomic_rmw;
#endif
static bool
jit_compile_func(JitCompContext *cc)
{
@ -2096,6 +2148,8 @@ jit_compile_func(JitCompContext *cc)
case WASM_OP_ATOMIC_FENCE:
/* Skip memory index */
frame_ip++;
if (!jit_compiler_op_atomic_fence(cc))
return false;
break;
case WASM_OP_ATOMIC_I32_LOAD:
bytes = 4;
@ -2192,15 +2246,12 @@ jit_compile_func(JitCompContext *cc)
return false;
break;
/* TODO */
/*
COMPILE_ATOMIC_RMW(Add, ADD);
COMPILE_ATOMIC_RMW(Sub, SUB);
COMPILE_ATOMIC_RMW(And, AND);
COMPILE_ATOMIC_RMW(Or, OR);
COMPILE_ATOMIC_RMW(Xor, XOR);
COMPILE_ATOMIC_RMW(Xchg, XCHG);
*/
build_atomic_rmw:
if (!jit_compile_op_atomic_rmw(cc, bin_op, op_type,

View File

@ -108,6 +108,17 @@ typedef enum FloatArithmetic {
FLOAT_MAX,
} FloatArithmetic;
#if WASM_ENABLE_SHARED_MEMORY != 0
typedef enum AtomicRMWBinOp {
AtomicRMWBinOpAdd,
AtomicRMWBinOpSub,
AtomicRMWBinOpAnd,
AtomicRMWBinOpOr,
AtomicRMWBinOpXor,
AtomicRMWBinOpXchg
} AtomicRMWBinOp;
#endif
/**
* Translate instructions in a function. The translated block must
* end with a branch instruction whose targets are offsets relating to

View File

@ -10,7 +10,11 @@
/**
* Operand kinds of instructions.
*/
enum { JIT_OPND_KIND_Reg, JIT_OPND_KIND_VReg, JIT_OPND_KIND_LookupSwitch };
enum {
JIT_OPND_KIND_Reg,
JIT_OPND_KIND_VReg,
JIT_OPND_KIND_LookupSwitch,
};
/**
* Operand kind of each instruction.
@ -45,6 +49,18 @@ static const uint8 insn_opnd_first_use[] = {
jit_calloc(offsetof(JitInsn, _opnd._opnd_VReg._reg) \
+ sizeof(JitReg) * (OPND_NUM))
JitInsn *
_jit_insn_new_Reg_0(JitOpcode opc)
{
JitInsn *insn = JIT_INSN_NEW_Reg(0);
if (insn) {
insn->opcode = opc;
}
return insn;
}
JitInsn *
_jit_insn_new_Reg_1(JitOpcode opc, JitReg r0)
{

View File

@ -200,6 +200,50 @@ INSN(CALLBC, Reg, 4, 2)
INSN(RETURNBC, Reg, 3, 0)
INSN(RETURN, Reg, 1, 0)
#if WASM_ENABLE_SHARED_MEMORY != 0
/* Atomic Memory Accesses */
/* op1(replacement val) op2(expected val) op3(mem data) op4(offset)
* and in x86, the result is stored in register al/ax/eax/rax */
INSN(AT_CMPXCHGU8, Reg, 4, 0)
INSN(AT_CMPXCHGU16, Reg, 4, 0)
INSN(AT_CMPXCHGI32, Reg, 4, 0)
INSN(AT_CMPXCHGU32, Reg, 4, 0)
INSN(AT_CMPXCHGI64, Reg, 4, 0)
/* rmw operations:
* op1(read value) op2(operand value) op3(mem data) op4(offset) */
INSN(AT_ADDU8, Reg, 4, 1)
INSN(AT_ADDU16, Reg, 4, 1)
INSN(AT_ADDI32, Reg, 4, 1)
INSN(AT_ADDU32, Reg, 4, 1)
INSN(AT_ADDI64, Reg, 4, 1)
INSN(AT_SUBU8, Reg, 4, 1)
INSN(AT_SUBU16, Reg, 4, 1)
INSN(AT_SUBI32, Reg, 4, 1)
INSN(AT_SUBU32, Reg, 4, 1)
INSN(AT_SUBI64, Reg, 4, 1)
INSN(AT_ANDU8, Reg, 4, 1)
INSN(AT_ANDU16, Reg, 4, 1)
INSN(AT_ANDI32, Reg, 4, 1)
INSN(AT_ANDU32, Reg, 4, 1)
INSN(AT_ANDI64, Reg, 4, 1)
INSN(AT_ORU8, Reg, 4, 1)
INSN(AT_ORU16, Reg, 4, 1)
INSN(AT_ORI32, Reg, 4, 1)
INSN(AT_ORU32, Reg, 4, 1)
INSN(AT_ORI64, Reg, 4, 1)
INSN(AT_XORU8, Reg, 4, 1)
INSN(AT_XORU16, Reg, 4, 1)
INSN(AT_XORI32, Reg, 4, 1)
INSN(AT_XORU32, Reg, 4, 1)
INSN(AT_XORI64, Reg, 4, 1)
INSN(AT_XCHGU8, Reg, 4, 1)
INSN(AT_XCHGU16, Reg, 4, 1)
INSN(AT_XCHGI32, Reg, 4, 1)
INSN(AT_XCHGU32, Reg, 4, 1)
INSN(AT_XCHGI64, Reg, 4, 1)
INSN(FENCE, Reg, 0, 0)
#endif
#undef INSN
/**

View File

@ -313,7 +313,8 @@ typedef struct JitInsn {
/* Opcode of the instruction. */
uint16 opcode;
/* Reserved field that may be used by optimizations locally. */
/* Reserved field that may be used by optimizations locally.
* bit_0(Least Significant Bit) is atomic flag for load/store */
uint8 flags_u8;
/* The unique ID of the instruction. */
@ -346,6 +347,9 @@ typedef enum JitOpcode {
* Helper functions for creating new instructions. Don't call them
* directly. Use jit_insn_new_NAME, such as jit_insn_new_MOV instead.
*/
JitInsn *
_jit_insn_new_Reg_0(JitOpcode opc);
JitInsn *
_jit_insn_new_Reg_1(JitOpcode opc, JitReg r0);
JitInsn *
@ -368,31 +372,35 @@ _jit_insn_new_LookupSwitch_1(JitOpcode opc, JitReg value, uint32 num);
* Instruction creation functions jit_insn_new_NAME, where NAME is the
* name of the instruction defined in jit_ir.def.
*/
#define ARG_DECL_Reg_0
#define ARG_LIST_Reg_0
#define ARG_DECL_Reg_1 JitReg r0
#define ARG_LIST_Reg_1 r0
#define ARG_LIST_Reg_1 , r0
#define ARG_DECL_Reg_2 JitReg r0, JitReg r1
#define ARG_LIST_Reg_2 r0, r1
#define ARG_LIST_Reg_2 , r0, r1
#define ARG_DECL_Reg_3 JitReg r0, JitReg r1, JitReg r2
#define ARG_LIST_Reg_3 r0, r1, r2
#define ARG_LIST_Reg_3 , r0, r1, r2
#define ARG_DECL_Reg_4 JitReg r0, JitReg r1, JitReg r2, JitReg r3
#define ARG_LIST_Reg_4 r0, r1, r2, r3
#define ARG_LIST_Reg_4 , r0, r1, r2, r3
#define ARG_DECL_Reg_5 JitReg r0, JitReg r1, JitReg r2, JitReg r3, JitReg r4
#define ARG_LIST_Reg_5 r0, r1, r2, r3, r4
#define ARG_LIST_Reg_5 , r0, r1, r2, r3, r4
#define ARG_DECL_VReg_1 JitReg r0, int n
#define ARG_LIST_VReg_1 r0, n
#define ARG_LIST_VReg_1 , r0, n
#define ARG_DECL_VReg_2 JitReg r0, JitReg r1, int n
#define ARG_LIST_VReg_2 r0, r1, n
#define ARG_LIST_VReg_2 , r0, r1, n
#define ARG_DECL_LookupSwitch_1 JitReg value, uint32 num
#define ARG_LIST_LookupSwitch_1 value, num
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) \
static inline JitInsn *jit_insn_new_##NAME( \
ARG_DECL_##OPND_KIND##_##OPND_NUM) \
{ \
return _jit_insn_new_##OPND_KIND##_##OPND_NUM( \
JIT_OP_##NAME, ARG_LIST_##OPND_KIND##_##OPND_NUM); \
#define ARG_LIST_LookupSwitch_1 , value, num
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) \
static inline JitInsn *jit_insn_new_##NAME( \
ARG_DECL_##OPND_KIND##_##OPND_NUM) \
{ \
return _jit_insn_new_##OPND_KIND##_##OPND_NUM( \
JIT_OP_##NAME ARG_LIST_##OPND_KIND##_##OPND_NUM); \
}
#include "jit_ir.def"
#undef INSN
#undef ARG_DECL_Reg_0
#undef ARG_LIST_Reg_0
#undef ARG_DECL_Reg_1
#undef ARG_LIST_Reg_1
#undef ARG_DECL_Reg_2

View File

@ -410,6 +410,13 @@ collect_distances(RegallocContext *rc, JitBasicBlock *basic_block)
JIT_FOREACH_INSN(basic_block, insn)
{
#if WASM_ENABLE_SHARED_MEMORY != 0
/* fence insn doesn't have any operand, hence, no regs involved */
if (insn->opcode == JIT_OP_FENCE) {
continue;
}
#endif
JitRegVec regvec = jit_insn_opnd_regs(insn);
unsigned i;
JitReg *regp;
@ -737,6 +744,13 @@ allocate_for_basic_block(RegallocContext *rc, JitBasicBlock *basic_block,
JIT_FOREACH_INSN_REVERSE(basic_block, insn)
{
#if WASM_ENABLE_SHARED_MEMORY != 0
/* fence insn doesn't have any operand, hence, no regs involved */
if (insn->opcode == JIT_OP_FENCE) {
continue;
}
#endif
JitRegVec regvec = jit_insn_opnd_regs(insn);
unsigned first_use = jit_insn_opnd_first_use(insn);
unsigned i;

View File

@ -467,14 +467,14 @@ wasm_runtime_set_wasi_ns_lookup_pool(wasm_module_t module, const char *ns_lookup
* Instantiate a WASM module.
*
* @param module the WASM module to instantiate
* @param stack_size the default stack size of the module instance when the
* @param default_stack_size the default stack size of the module instance when the
* exec env's operation stack isn't created by user, e.g. API
* wasm_application_execute_main() and wasm_application_execute_func()
* create the operation stack internally with the stack size specified
* here. And API wasm_runtime_create_exec_env() creates the operation
* stack with stack size specified by its parameter, the stack size
* specified here is ignored.
* @param heap_size the default heap size of the module instance, a heap will
* @param host_managed_heap_size the default heap size of the module instance, a heap will
* be created besides the app memory space. Both wasm app and native
* function can allocate memory from the heap.
* @param error_buf buffer to output the error info if failed
@ -484,7 +484,7 @@ wasm_runtime_set_wasi_ns_lookup_pool(wasm_module_t module, const char *ns_lookup
*/
WASM_RUNTIME_API_EXTERN wasm_module_inst_t
wasm_runtime_instantiate(const wasm_module_t module,
uint32_t stack_size, uint32_t heap_size,
uint32_t default_stack_size, uint32_t host_managed_heap_size,
char *error_buf, uint32_t error_buf_size);
/**

View File

@ -602,6 +602,9 @@ struct WASMModule {
since no need to enable llvm jit compilation for Mode_Interp and
Mode_Fast_JIT, so as to improve performance for them */
bool enable_llvm_jit_compilation;
/* The count of groups which finish compiling the fast jit
functions in that group */
uint32 fast_jit_ready_groups;
#endif
};

View File

@ -28,12 +28,23 @@ typedef float64 CellType_F64;
#define BR_TABLE_TMP_BUF_LEN 32
#if WASM_ENABLE_THREAD_MGR == 0
#define get_linear_mem_size() linear_mem_size
#else
/**
* Load memory data size in each time boundary check in
* multi-threading mode since it may be changed by other
* threads in memory.grow
*/
#define get_linear_mem_size() memory->memory_data_size
#endif
#if !defined(OS_ENABLE_HW_BOUND_CHECK) \
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
#define CHECK_MEMORY_OVERFLOW(bytes) \
do { \
uint64 offset1 = (uint64)offset + (uint64)addr; \
if (offset1 + bytes <= (uint64)linear_mem_size) \
if (offset1 + bytes <= (uint64)get_linear_mem_size()) \
/* If offset1 is in valid range, maddr must also \
be in valid range, no need to check it again. */ \
maddr = memory->memory_data + offset1; \
@ -41,15 +52,15 @@ typedef float64 CellType_F64;
goto out_of_bounds; \
} while (0)
#define CHECK_BULK_MEMORY_OVERFLOW(start, bytes, maddr) \
do { \
uint64 offset1 = (uint32)(start); \
if (offset1 + bytes <= (uint64)linear_mem_size) \
/* App heap space is not valid space for \
bulk memory operation */ \
maddr = memory->memory_data + offset1; \
else \
goto out_of_bounds; \
#define CHECK_BULK_MEMORY_OVERFLOW(start, bytes, maddr) \
do { \
uint64 offset1 = (uint32)(start); \
if (offset1 + bytes <= (uint64)get_linear_mem_size()) \
/* App heap space is not valid space for \
bulk memory operation */ \
maddr = memory->memory_data + offset1; \
else \
goto out_of_bounds; \
} while (0)
#else
#define CHECK_MEMORY_OVERFLOW(bytes) \
@ -1079,12 +1090,14 @@ wasm_interp_call_func_import(WASMModuleInstance *module_inst,
/* Record the current frame_ip, so when exception occurs, \
debugger can know the exact opcode who caused the exception */ \
frame_ip_orig = frame_ip; \
os_mutex_lock(&exec_env->wait_lock); \
while (exec_env->current_status->signal_flag == WAMR_SIG_SINGSTEP \
&& exec_env->current_status->step_count++ == 1) { \
exec_env->current_status->step_count = 0; \
SYNC_ALL_TO_FRAME(); \
wasm_cluster_thread_waiting_run(exec_env); \
} \
os_mutex_unlock(&exec_env->wait_lock); \
goto *handle_table[*frame_ip++]; \
} while (0)
#else
@ -1095,12 +1108,14 @@ wasm_interp_call_func_import(WASMModuleInstance *module_inst,
#define HANDLE_OP(opcode) case opcode:
#if WASM_ENABLE_THREAD_MGR != 0 && WASM_ENABLE_DEBUG_INTERP != 0
#define HANDLE_OP_END() \
os_mutex_lock(&exec_env->wait_lock); \
if (exec_env->current_status->signal_flag == WAMR_SIG_SINGSTEP \
&& exec_env->current_status->step_count++ == 2) { \
exec_env->current_status->step_count = 0; \
SYNC_ALL_TO_FRAME(); \
wasm_cluster_thread_waiting_run(exec_env); \
} \
os_mutex_unlock(&exec_env->wait_lock); \
continue
#else
#define HANDLE_OP_END() continue
@ -1130,22 +1145,16 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
#if WASM_ENABLE_SHARED_MEMORY != 0
WASMSharedMemNode *node =
wasm_module_get_shared_memory((WASMModuleCommon *)module->module);
#else
void *node = NULL;
#endif
WASMMemoryInstance *memory = wasm_get_default_memory(module);
uint8 *global_data = module->global_data;
#if !defined(OS_ENABLE_HW_BOUND_CHECK) \
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0 \
|| WASM_ENABLE_BULK_MEMORY != 0
uint32 num_bytes_per_page =
memory ? wasm_get_num_bytes_per_page(memory, node) : 0;
uint32 linear_mem_size =
memory ? wasm_get_linear_memory_size(memory, node) : 0;
uint32 linear_mem_size = memory ? memory->memory_data_size : 0;
#endif
WASMType **wasm_types = module->module->types;
WASMGlobalInstance *globals = module->e->globals, *global;
uint8 *global_data = module->global_data;
uint8 opcode_IMPDEP = WASM_OP_IMPDEP;
WASMInterpFrame *frame = NULL;
/* Points to this special opcode so as to jump to the
@ -2132,8 +2141,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
#if !defined(OS_ENABLE_HW_BOUND_CHECK) \
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0 \
|| WASM_ENABLE_BULK_MEMORY != 0
linear_mem_size =
num_bytes_per_page * memory->cur_page_count;
linear_mem_size = memory->memory_data_size;
#endif
}
@ -3137,6 +3145,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
offset = (uint64)(uint32)POP_I32();
addr = (uint32)POP_I32();
#if WASM_ENABLE_THREAD_MGR != 0
linear_mem_size = memory->memory_data_size;
#endif
#ifndef OS_ENABLE_HW_BOUND_CHECK
CHECK_BULK_MEMORY_OVERFLOW(addr, bytes, maddr);
#else
@ -3175,6 +3187,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
src = POP_I32();
dst = POP_I32();
#if WASM_ENABLE_THREAD_MGR != 0
linear_mem_size = memory->memory_data_size;
#endif
#ifndef OS_ENABLE_HW_BOUND_CHECK
CHECK_BULK_MEMORY_OVERFLOW(src, len, msrc);
CHECK_BULK_MEMORY_OVERFLOW(dst, len, mdst);
@ -3202,6 +3218,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
fill_val = POP_I32();
dst = POP_I32();
#if WASM_ENABLE_THREAD_MGR != 0
linear_mem_size = memory->memory_data_size;
#endif
#ifndef OS_ENABLE_HW_BOUND_CHECK
CHECK_BULK_MEMORY_OVERFLOW(dst, len, mdst);
#else
@ -3424,7 +3444,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
ret = wasm_runtime_atomic_notify(
(WASMModuleInstanceCommon *)module, maddr,
notify_count);
bh_assert((int32)ret >= 0);
if (ret == (uint32)-1)
goto got_exception;
PUSH_I32(ret);
break;
@ -3481,7 +3502,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
{
/* Skip the memory index */
frame_ip++;
os_atomic_thread_fence(os_memory_order_release);
os_atomic_thread_fence(os_memory_order_seq_cst);
break;
}
@ -3588,7 +3609,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 4, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&node->shared_mem_lock);
STORE_U32(maddr, frame_sp[1]);
STORE_U32(maddr, sval);
os_mutex_unlock(&node->shared_mem_lock);
}
break;
@ -3629,8 +3650,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
CHECK_BULK_MEMORY_OVERFLOW(addr + offset, 8, maddr);
CHECK_ATOMIC_MEMORY_ACCESS();
os_mutex_lock(&node->shared_mem_lock);
PUT_I64_TO_ADDR((uint32 *)maddr,
GET_I64_FROM_ADDR(frame_sp + 1));
PUT_I64_TO_ADDR((uint32 *)maddr, sval);
os_mutex_unlock(&node->shared_mem_lock);
}
break;
@ -3731,9 +3751,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_I64(maddr);
if (readv == expect) {
if (readv == expect)
STORE_I64(maddr, sval);
}
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I64(readv);
@ -3878,7 +3897,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0 \
|| WASM_ENABLE_BULK_MEMORY != 0
if (memory)
linear_mem_size = num_bytes_per_page * memory->cur_page_count;
linear_mem_size = memory->memory_data_size;
#endif
if (wasm_copy_exception(module, NULL))
goto got_exception;

View File

@ -19,12 +19,23 @@ typedef int64 CellType_I64;
typedef float32 CellType_F32;
typedef float64 CellType_F64;
#if WASM_ENABLE_THREAD_MGR == 0
#define get_linear_mem_size() linear_mem_size
#else
/**
* Load memory data size in each time boundary check in
* multi-threading mode since it may be changed by other
* threads in memory.grow
*/
#define get_linear_mem_size() memory->memory_data_size
#endif
#if !defined(OS_ENABLE_HW_BOUND_CHECK) \
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0
#define CHECK_MEMORY_OVERFLOW(bytes) \
do { \
uint64 offset1 = (uint64)offset + (uint64)addr; \
if (offset1 + bytes <= (uint64)linear_mem_size) \
if (offset1 + bytes <= (uint64)get_linear_mem_size()) \
/* If offset1 is in valid range, maddr must also \
be in valid range, no need to check it again. */ \
maddr = memory->memory_data + offset1; \
@ -35,7 +46,7 @@ typedef float64 CellType_F64;
#define CHECK_BULK_MEMORY_OVERFLOW(start, bytes, maddr) \
do { \
uint64 offset1 = (uint32)(start); \
if (offset1 + bytes <= linear_mem_size) \
if (offset1 + bytes <= get_linear_mem_size()) \
/* App heap space is not valid space for \
bulk memory operation */ \
maddr = memory->memory_data + offset1; \
@ -1158,23 +1169,16 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
#if WASM_ENABLE_SHARED_MEMORY != 0
WASMSharedMemNode *node =
wasm_module_get_shared_memory((WASMModuleCommon *)module->module);
#else
void *node = NULL;
#endif
WASMMemoryInstance *memory = wasm_get_default_memory(module);
#if !defined(OS_ENABLE_HW_BOUND_CHECK) \
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0 \
|| WASM_ENABLE_BULK_MEMORY != 0
uint32 num_bytes_per_page =
memory ? wasm_get_num_bytes_per_page(memory, node) : 0;
uint32 linear_mem_size =
memory ? wasm_get_linear_memory_size(memory, node) : 0;
uint32 linear_mem_size = memory ? memory->memory_data_size : 0;
#endif
uint8 *global_data = module->global_data;
WASMGlobalInstance *globals = module->e ? module->e->globals : NULL;
WASMGlobalInstance *global;
uint8 *global_data = module->global_data;
uint8 opcode_IMPDEP = WASM_OP_IMPDEP;
WASMInterpFrame *frame = NULL;
/* Points to this special opcode so as to jump to the
@ -1906,8 +1910,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
#if !defined(OS_ENABLE_HW_BOUND_CHECK) \
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0 \
|| WASM_ENABLE_BULK_MEMORY != 0
linear_mem_size =
num_bytes_per_page * memory->cur_page_count;
linear_mem_size = memory->memory_data_size;
#endif
}
@ -2989,6 +2992,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
offset = (uint64)POP_I32();
addr = POP_I32();
#if WASM_ENABLE_THREAD_MGR
linear_mem_size = memory->memory_data_size;
#endif
#ifndef OS_ENABLE_HW_BOUND_CHECK
CHECK_BULK_MEMORY_OVERFLOW(addr, bytes, maddr);
#else
@ -3026,6 +3033,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
src = POP_I32();
dst = POP_I32();
#if WASM_ENABLE_THREAD_MGR
linear_mem_size = memory->memory_data_size;
#endif
#ifndef OS_ENABLE_HW_BOUND_CHECK
CHECK_BULK_MEMORY_OVERFLOW(src, len, msrc);
CHECK_BULK_MEMORY_OVERFLOW(dst, len, mdst);
@ -3052,6 +3063,10 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
fill_val = POP_I32();
dst = POP_I32();
#if WASM_ENABLE_THREAD_MGR
linear_mem_size = memory->memory_data_size;
#endif
#ifndef OS_ENABLE_HW_BOUND_CHECK
CHECK_BULK_MEMORY_OVERFLOW(dst, len, mdst);
#else
@ -3266,7 +3281,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
ret = wasm_runtime_atomic_notify(
(WASMModuleInstanceCommon *)module, maddr,
notify_count);
bh_assert((int32)ret >= 0);
if (ret == (uint32)-1)
goto got_exception;
PUSH_I32(ret);
break;
@ -3321,7 +3337,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
}
case WASM_OP_ATOMIC_FENCE:
{
os_atomic_thread_fence(os_memory_order_release);
os_atomic_thread_fence(os_memory_order_seq_cst);
break;
}
@ -3569,9 +3585,8 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
os_mutex_lock(&node->shared_mem_lock);
readv = (uint64)LOAD_I64(maddr);
if (readv == expect) {
if (readv == expect)
STORE_I64(maddr, sval);
}
os_mutex_unlock(&node->shared_mem_lock);
}
PUSH_I64(readv);
@ -3821,7 +3836,7 @@ wasm_interp_call_func_bytecode(WASMModuleInstance *module,
|| WASM_CPU_SUPPORTS_UNALIGNED_ADDR_ACCESS == 0 \
|| WASM_ENABLE_BULK_MEMORY != 0
if (memory)
linear_mem_size = num_bytes_per_page * memory->cur_page_count;
linear_mem_size = memory->memory_data_size;
#endif
if (wasm_copy_exception(module, NULL))
goto got_exception;

View File

@ -3182,6 +3182,11 @@ orcjit_thread_callback(void *arg)
return NULL;
}
}
#if WASM_ENABLE_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0
os_mutex_lock(&module->tierup_wait_lock);
module->fast_jit_ready_groups++;
os_mutex_unlock(&module->tierup_wait_lock);
#endif
#endif
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
@ -3209,9 +3214,11 @@ orcjit_thread_callback(void *arg)
}
}
/* Wait until init_llvm_jit_functions_stage2 finishes */
/* Wait until init_llvm_jit_functions_stage2 finishes and all
fast jit functions are compiled */
os_mutex_lock(&module->tierup_wait_lock);
while (!(module->llvm_jit_inited && module->enable_llvm_jit_compilation)) {
while (!(module->llvm_jit_inited && module->enable_llvm_jit_compilation
&& module->fast_jit_ready_groups >= group_stride)) {
os_cond_reltimedwait(&module->tierup_wait_cond,
&module->tierup_wait_lock, 10000);
if (module->orcjit_stop_compiling) {
@ -8239,12 +8246,13 @@ re_scan:
goto fail;
}
if (func_idx == cur_func_idx + module->import_function_count) {
/* Refer to a forward-declared function */
if (func_idx >= cur_func_idx + module->import_function_count) {
WASMTableSeg *table_seg = module->table_segments;
bool func_declared = false;
uint32 j;
/* Check whether current function is declared */
/* Check whether the function is declared in table segs */
for (i = 0; i < module->table_seg_count; i++, table_seg++) {
if (table_seg->elem_type == VALUE_TYPE_FUNCREF
&& wasm_elem_is_declarative(table_seg->mode)) {
@ -8256,6 +8264,17 @@ re_scan:
}
}
}
if (!func_declared) {
/* Check whether the function is exported */
for (i = 0; i < module->export_count; i++) {
if (module->exports[i].kind == EXPORT_KIND_FUNC
&& module->exports[i].index == func_idx) {
func_declared = true;
break;
}
}
}
if (!func_declared) {
set_error_buf(error_buf, error_buf_size,
"undeclared function reference");

View File

@ -2025,6 +2025,11 @@ orcjit_thread_callback(void *arg)
return NULL;
}
}
#if WASM_ENABLE_JIT != 0 && WASM_ENABLE_LAZY_JIT != 0
os_mutex_lock(&module->tierup_wait_lock);
module->fast_jit_ready_groups++;
os_mutex_unlock(&module->tierup_wait_lock);
#endif
#endif
#if WASM_ENABLE_FAST_JIT != 0 && WASM_ENABLE_JIT != 0 \
@ -2052,9 +2057,11 @@ orcjit_thread_callback(void *arg)
}
}
/* Wait until init_llvm_jit_functions_stage2 finishes */
/* Wait until init_llvm_jit_functions_stage2 finishes and all
fast jit functions are compiled */
os_mutex_lock(&module->tierup_wait_lock);
while (!(module->llvm_jit_inited && module->enable_llvm_jit_compilation)) {
while (!(module->llvm_jit_inited && module->enable_llvm_jit_compilation
&& module->fast_jit_ready_groups >= group_stride)) {
os_cond_reltimedwait(&module->tierup_wait_cond,
&module->tierup_wait_lock, 10000);
if (module->orcjit_stop_compiling) {
@ -6400,12 +6407,13 @@ re_scan:
goto fail;
}
if (func_idx == cur_func_idx + module->import_function_count) {
/* Refer to a forward-declared function */
if (func_idx >= cur_func_idx + module->import_function_count) {
WASMTableSeg *table_seg = module->table_segments;
bool func_declared = false;
uint32 j;
/* Check whether current function is declared */
/* Check whether the function is declared in table segs */
for (i = 0; i < module->table_seg_count; i++, table_seg++) {
if (table_seg->elem_type == VALUE_TYPE_FUNCREF
&& wasm_elem_is_declarative(table_seg->mode)) {
@ -6418,10 +6426,17 @@ re_scan:
}
}
if (!func_declared) {
set_error_buf(error_buf, error_buf_size,
"undeclared function reference");
goto fail;
/* Check whether the function is exported */
for (i = 0; i < module->export_count; i++) {
if (module->exports[i].kind == EXPORT_KIND_FUNC
&& module->exports[i].index == func_idx) {
func_declared = true;
break;
}
}
}
bh_assert(func_declared);
(void)func_declared;
}
#if WASM_ENABLE_FAST_INTERP != 0

View File

@ -1003,7 +1003,7 @@ lookup_post_instantiate_func(WASMModuleInstance *module_inst,
static bool
execute_post_instantiate_functions(WASMModuleInstance *module_inst,
bool is_sub_inst)
bool is_sub_inst, WASMExecEnv *exec_env_main)
{
WASMFunctionInstance *start_func = module_inst->e->start_function;
WASMFunctionInstance *initialize_func = NULL;
@ -1012,11 +1012,11 @@ execute_post_instantiate_functions(WASMModuleInstance *module_inst,
#if WASM_ENABLE_LIBC_WASI != 0
WASMModule *module = module_inst->module;
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMModuleInstanceCommon *module_inst_main = NULL;
WASMExecEnv *exec_env_tls = NULL;
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls();
#endif
WASMExecEnv *exec_env = NULL;
WASMExecEnv *exec_env = NULL, *exec_env_created = NULL;
bool ret = false;
#if WASM_ENABLE_LIBC_WASI != 0
@ -1057,25 +1057,46 @@ execute_post_instantiate_functions(WASMModuleInstance *module_inst,
return true;
}
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (is_sub_inst) {
exec_env = exec_env_tls = wasm_runtime_get_exec_env_tls();
if (exec_env_tls) {
/* Temporarily replace exec_env_tls's module inst to current
module inst to avoid checking failure when calling the
wasm functions, and ensure that the exec_env's module inst
is the correct one. */
module_inst_main = exec_env_tls->module_inst;
exec_env_tls->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
}
bh_assert(exec_env_main);
#ifdef OS_ENABLE_HW_BOUND_CHECK
bh_assert(exec_env_tls == exec_env_main);
(void)exec_env_tls;
#endif
if (!exec_env
&& !(exec_env =
wasm_exec_env_create((WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
wasm_set_exception(module_inst, "allocate memory failed");
return false;
exec_env = exec_env_main;
/* Temporarily replace parent exec_env's module inst to current
module inst to avoid checking failure when calling the
wasm functions, and ensure that the exec_env's module inst
is the correct one. */
module_inst_main = exec_env_main->module_inst;
exec_env->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
else {
/* Try using the existing exec_env */
#ifdef OS_ENABLE_HW_BOUND_CHECK
exec_env = exec_env_tls;
#endif
#if WASM_ENABLE_THREAD_MGR != 0
if (!exec_env)
exec_env = wasm_clusters_search_exec_env(
(WASMModuleInstanceCommon *)module_inst);
#endif
if (!exec_env) {
if (!(exec_env = exec_env_created = wasm_exec_env_create(
(WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
wasm_set_exception(module_inst, "allocate memory failed");
return false;
}
}
else {
/* Temporarily replace exec_env's module inst with current
module inst to ensure that the exec_env's module inst
is the correct one. */
module_inst_main = exec_env->module_inst;
exec_env->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
}
/* Execute start function for both main insance and sub instance */
@ -1101,23 +1122,23 @@ execute_post_instantiate_functions(WASMModuleInstance *module_inst,
ret = true;
fail:
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (is_sub_inst && exec_env_tls) {
bh_assert(exec_env == exec_env_tls);
/* Restore the exec_env_tls's module inst */
exec_env_tls->module_inst = module_inst_main;
if (is_sub_inst) {
/* Restore the parent exec_env's module inst */
exec_env_main->module_inst = module_inst_main;
}
else {
if (module_inst_main)
/* Restore the existing exec_env's module inst */
exec_env->module_inst = module_inst_main;
if (exec_env_created)
wasm_exec_env_destroy(exec_env_created);
}
else
wasm_exec_env_destroy(exec_env);
#else
wasm_exec_env_destroy(exec_env);
#endif
return ret;
}
static bool
execute_malloc_function(WASMModuleInstance *module_inst,
execute_malloc_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
WASMFunctionInstance *malloc_func,
WASMFunctionInstance *retain_func, uint32 size,
uint32 *p_result)
@ -1125,6 +1146,8 @@ execute_malloc_function(WASMModuleInstance *module_inst,
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls();
#endif
WASMExecEnv *exec_env_created = NULL;
WASMModuleInstanceCommon *module_inst_old = NULL;
uint32 argv[2], argc;
bool ret;
@ -1143,55 +1166,119 @@ execute_malloc_function(WASMModuleInstance *module_inst,
argc = 2;
}
if (exec_env) {
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (exec_env_tls != NULL) {
bh_assert(exec_env_tls->module_inst
== (WASMModuleInstanceCommon *)module_inst);
ret = wasm_call_function(exec_env_tls, malloc_func, argc, argv);
if (retain_func && ret) {
ret = wasm_call_function(exec_env_tls, retain_func, 1, argv);
if (exec_env_tls) {
bh_assert(exec_env_tls == exec_env);
}
}
else
#endif
{
ret = wasm_create_exec_env_and_call_function(module_inst, malloc_func,
argc, argv);
if (retain_func && ret) {
ret = wasm_create_exec_env_and_call_function(module_inst,
retain_func, 1, argv);
bh_assert(exec_env->module_inst
== (WASMModuleInstanceCommon *)module_inst);
}
else {
/* Try using the existing exec_env */
#ifdef OS_ENABLE_HW_BOUND_CHECK
exec_env = exec_env_tls;
#endif
#if WASM_ENABLE_THREAD_MGR != 0
if (!exec_env)
exec_env = wasm_clusters_search_exec_env(
(WASMModuleInstanceCommon *)module_inst);
#endif
if (!exec_env) {
if (!(exec_env = exec_env_created = wasm_exec_env_create(
(WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
wasm_set_exception(module_inst, "allocate memory failed");
return false;
}
}
else {
/* Temporarily replace exec_env's module inst with current
module inst to ensure that the exec_env's module inst
is the correct one. */
module_inst_old = exec_env->module_inst;
exec_env->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
}
ret = wasm_call_function(exec_env, malloc_func, argc, argv);
if (retain_func && ret)
ret = wasm_call_function(exec_env, retain_func, 1, argv);
if (module_inst_old)
/* Restore the existing exec_env's module inst */
exec_env->module_inst = module_inst_old;
if (exec_env_created)
wasm_exec_env_destroy(exec_env_created);
if (ret)
*p_result = argv[0];
return ret;
}
static bool
execute_free_function(WASMModuleInstance *module_inst,
execute_free_function(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
WASMFunctionInstance *free_func, uint32 offset)
{
#ifdef OS_ENABLE_HW_BOUND_CHECK
WASMExecEnv *exec_env_tls = wasm_runtime_get_exec_env_tls();
#endif
WASMExecEnv *exec_env_created = NULL;
WASMModuleInstanceCommon *module_inst_old = NULL;
uint32 argv[2];
bool ret;
argv[0] = offset;
if (exec_env) {
#ifdef OS_ENABLE_HW_BOUND_CHECK
if (exec_env_tls != NULL) {
bh_assert(exec_env_tls->module_inst
== (WASMModuleInstanceCommon *)module_inst);
return wasm_call_function(exec_env_tls, free_func, 1, argv);
}
else
if (exec_env_tls) {
bh_assert(exec_env_tls == exec_env);
}
#endif
{
return wasm_create_exec_env_and_call_function(module_inst, free_func, 1,
argv);
bh_assert(exec_env->module_inst
== (WASMModuleInstanceCommon *)module_inst);
}
else {
/* Try using the existing exec_env */
#ifdef OS_ENABLE_HW_BOUND_CHECK
exec_env = exec_env_tls;
#endif
#if WASM_ENABLE_THREAD_MGR != 0
if (!exec_env)
exec_env = wasm_clusters_search_exec_env(
(WASMModuleInstanceCommon *)module_inst);
#endif
if (!exec_env) {
if (!(exec_env = exec_env_created = wasm_exec_env_create(
(WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
wasm_set_exception(module_inst, "allocate memory failed");
return false;
}
}
else {
/* Temporarily replace exec_env's module inst with current
module inst to ensure that the exec_env's module inst
is the correct one. */
module_inst_old = exec_env->module_inst;
exec_env->module_inst = (WASMModuleInstanceCommon *)module_inst;
}
}
ret = wasm_call_function(exec_env, free_func, 1, argv);
if (module_inst_old)
/* Restore the existing exec_env's module inst */
exec_env->module_inst = module_inst_old;
if (exec_env_created)
wasm_exec_env_destroy(exec_env_created);
return ret;
}
#if WASM_ENABLE_MULTI_MODULE != 0
@ -1210,7 +1297,7 @@ sub_module_instantiate(WASMModule *module, WASMModuleInstance *module_inst,
WASMModuleInstance *sub_module_inst = NULL;
sub_module_inst =
wasm_instantiate(sub_module, false, stack_size, heap_size,
wasm_instantiate(sub_module, false, NULL, stack_size, heap_size,
error_buf, error_buf_size);
if (!sub_module_inst) {
LOG_DEBUG("instantiate %s failed",
@ -1481,7 +1568,7 @@ set_running_mode(WASMModuleInstance *module_inst, RunningMode running_mode,
os_mutex_lock(&module->tierup_wait_lock);
while (!module->llvm_jit_inited) {
os_cond_reltimedwait(&module->tierup_wait_cond,
&module->tierup_wait_lock, 10);
&module->tierup_wait_lock, 10000);
if (module->orcjit_stop_compiling) {
/* init_llvm_jit_functions_stage2 failed */
os_mutex_unlock(&module->tierup_wait_lock);
@ -1555,7 +1642,8 @@ wasm_set_running_mode(WASMModuleInstance *module_inst, RunningMode running_mode)
* Instantiate module
*/
WASMModuleInstance *
wasm_instantiate(WASMModule *module, bool is_sub_inst, uint32 stack_size,
wasm_instantiate(WASMModule *module, bool is_sub_inst,
WASMExecEnv *exec_env_main, uint32 stack_size,
uint32 heap_size, char *error_buf, uint32 error_buf_size)
{
WASMModuleInstance *module_inst;
@ -2049,7 +2137,8 @@ wasm_instantiate(WASMModule *module, bool is_sub_inst, uint32 stack_size,
&module_inst->e->functions[module->start_function];
}
if (!execute_post_instantiate_functions(module_inst, is_sub_inst)) {
if (!execute_post_instantiate_functions(module_inst, is_sub_inst,
exec_env_main)) {
set_error_buf(error_buf, error_buf_size, module_inst->cur_exception);
goto fail;
}
@ -2089,7 +2178,7 @@ wasm_deinstantiate(WASMModuleInstance *module_inst, bool is_sub_inst)
func_ptrs and fast_jit_func_ptrs of the instance, to avoid
accessing the freed memory in the jit backend compilation
threads */
if (!is_sub_inst) {
{
WASMModule *module = module_inst->module;
WASMModuleInstance *instance_prev = NULL, *instance;
os_mutex_lock(&module->instance_list_lock);
@ -2346,39 +2435,6 @@ wasm_call_function(WASMExecEnv *exec_env, WASMFunctionInstance *function,
return !wasm_copy_exception(module_inst, NULL);
}
bool
wasm_create_exec_env_and_call_function(WASMModuleInstance *module_inst,
WASMFunctionInstance *func,
unsigned argc, uint32 argv[])
{
WASMExecEnv *exec_env = NULL, *existing_exec_env = NULL;
bool ret;
#if defined(OS_ENABLE_HW_BOUND_CHECK)
existing_exec_env = exec_env = wasm_runtime_get_exec_env_tls();
#elif WASM_ENABLE_THREAD_MGR != 0
existing_exec_env = exec_env =
wasm_clusters_search_exec_env((WASMModuleInstanceCommon *)module_inst);
#endif
if (!existing_exec_env) {
if (!(exec_env =
wasm_exec_env_create((WASMModuleInstanceCommon *)module_inst,
module_inst->default_wasm_stack_size))) {
wasm_set_exception(module_inst, "allocate memory failed");
return false;
}
}
ret = wasm_runtime_call_wasm(exec_env, func, argc, argv);
/* don't destroy the exec_env if it isn't created in this function */
if (!existing_exec_env)
wasm_exec_env_destroy(exec_env);
return ret;
}
#if WASM_ENABLE_PERF_PROFILING != 0
void
wasm_dump_perf_profiling(const WASMModuleInstance *module_inst)
@ -2426,8 +2482,9 @@ wasm_dump_perf_profiling(const WASMModuleInstance *module_inst)
#endif
uint32
wasm_module_malloc(WASMModuleInstance *module_inst, uint32 size,
void **p_native_addr)
wasm_module_malloc_internal(WASMModuleInstance *module_inst,
WASMExecEnv *exec_env, uint32 size,
void **p_native_addr)
{
WASMMemoryInstance *memory = wasm_get_default_memory(module_inst);
uint8 *addr = NULL;
@ -2443,7 +2500,7 @@ wasm_module_malloc(WASMModuleInstance *module_inst, uint32 size,
}
else if (module_inst->e->malloc_function && module_inst->e->free_function) {
if (!execute_malloc_function(
module_inst, module_inst->e->malloc_function,
module_inst, exec_env, module_inst->e->malloc_function,
module_inst->e->retain_function, size, &offset)) {
return 0;
}
@ -2471,8 +2528,9 @@ wasm_module_malloc(WASMModuleInstance *module_inst, uint32 size,
}
uint32
wasm_module_realloc(WASMModuleInstance *module_inst, uint32 ptr, uint32 size,
void **p_native_addr)
wasm_module_realloc_internal(WASMModuleInstance *module_inst,
WASMExecEnv *exec_env, uint32 ptr, uint32 size,
void **p_native_addr)
{
WASMMemoryInstance *memory = wasm_get_default_memory(module_inst);
uint8 *addr = NULL;
@ -2488,6 +2546,7 @@ wasm_module_realloc(WASMModuleInstance *module_inst, uint32 ptr, uint32 size,
}
/* Only support realloc in WAMR's app heap */
(void)exec_env;
if (!addr) {
if (memory->heap_handle
@ -2506,7 +2565,8 @@ wasm_module_realloc(WASMModuleInstance *module_inst, uint32 ptr, uint32 size,
}
void
wasm_module_free(WASMModuleInstance *module_inst, uint32 ptr)
wasm_module_free_internal(WASMModuleInstance *module_inst,
WASMExecEnv *exec_env, uint32 ptr)
{
if (ptr) {
WASMMemoryInstance *memory = wasm_get_default_memory(module_inst);
@ -2516,12 +2576,6 @@ wasm_module_free(WASMModuleInstance *module_inst, uint32 ptr)
return;
}
#if WASM_ENABLE_SHARED_MEMORY != 0
WASMSharedMemNode *node = wasm_module_get_shared_memory(
(WASMModuleCommon *)module_inst->module);
if (node)
os_mutex_lock(&node->shared_mem_lock);
#endif
addr = memory->memory_data + ptr;
if (memory->heap_handle && memory->heap_data <= addr
@ -2531,16 +2585,33 @@ wasm_module_free(WASMModuleInstance *module_inst, uint32 ptr)
else if (module_inst->e->malloc_function
&& module_inst->e->free_function && memory->memory_data <= addr
&& addr < memory->memory_data_end) {
execute_free_function(module_inst, module_inst->e->free_function,
ptr);
execute_free_function(module_inst, exec_env,
module_inst->e->free_function, ptr);
}
#if WASM_ENABLE_SHARED_MEMORY != 0
if (node)
os_mutex_unlock(&node->shared_mem_lock);
#endif
}
}
uint32
wasm_module_malloc(WASMModuleInstance *module_inst, uint32 size,
void **p_native_addr)
{
return wasm_module_malloc_internal(module_inst, NULL, size, p_native_addr);
}
uint32
wasm_module_realloc(WASMModuleInstance *module_inst, uint32 ptr, uint32 size,
void **p_native_addr)
{
return wasm_module_realloc_internal(module_inst, NULL, ptr, size,
p_native_addr);
}
void
wasm_module_free(WASMModuleInstance *module_inst, uint32 ptr)
{
wasm_module_free_internal(module_inst, NULL, ptr);
}
uint32
wasm_module_dup_data(WASMModuleInstance *module_inst, const char *src,
uint32 size)

View File

@ -400,7 +400,8 @@ void
wasm_unload(WASMModule *module);
WASMModuleInstance *
wasm_instantiate(WASMModule *module, bool is_sub_inst, uint32 stack_size,
wasm_instantiate(WASMModule *module, bool is_sub_inst,
WASMExecEnv *exec_env_main, uint32 stack_size,
uint32 heap_size, char *error_buf, uint32 error_buf_size);
void
@ -432,11 +433,6 @@ bool
wasm_call_function(WASMExecEnv *exec_env, WASMFunctionInstance *function,
unsigned argc, uint32 argv[]);
bool
wasm_create_exec_env_and_call_function(WASMModuleInstance *module_inst,
WASMFunctionInstance *function,
unsigned argc, uint32 argv[]);
void
wasm_set_exception(WASMModuleInstance *module, const char *exception);
@ -455,6 +451,20 @@ wasm_get_exception(WASMModuleInstance *module);
bool
wasm_copy_exception(WASMModuleInstance *module_inst, char *exception_buf);
uint32
wasm_module_malloc_internal(WASMModuleInstance *module_inst,
WASMExecEnv *exec_env, uint32 size,
void **p_native_addr);
uint32
wasm_module_realloc_internal(WASMModuleInstance *module_inst,
WASMExecEnv *exec_env, uint32 ptr, uint32 size,
void **p_native_addr);
void
wasm_module_free_internal(WASMModuleInstance *module_inst,
WASMExecEnv *exec_env, uint32 ptr);
uint32
wasm_module_malloc(WASMModuleInstance *module_inst, uint32 size,
void **p_native_addr);

View File

@ -581,7 +581,7 @@ pthread_create_wrapper(wasm_exec_env_t exec_env,
#endif
if (!(new_module_inst = wasm_runtime_instantiate_internal(
module, true, stack_size, 0, NULL, 0)))
module, true, exec_env, stack_size, 0, NULL, 0)))
return -1;
/* Set custom_data to new module instance */

View File

@ -3,10 +3,9 @@
# Copyright (C) 2023 Amazon.com Inc. or its affiliates. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set -ueo pipefail
set -eo pipefail
CC="${CC:=/opt/wasi-sdk/bin/clang}"
files=("tcp_udp.c" "nslookup.c")
WASI_SYSROOT=${WASI_SYSROOT:=~/dev/wasi-libc/sysroot}
for file in "${files[@]}"
do
@ -14,7 +13,6 @@ do
$CC \
--target=wasm32-wasi-threads \
-I../inc \
--sysroot $WASI_SYSROOT \
../src/wasi/wasi_socket_ext.c -pthread -ftls-model=local-exec \
-Wl,--allow-undefined \
-Wl,--strip-all,--no-entry \

View File

@ -90,7 +90,7 @@ thread_spawn_wrapper(wasm_exec_env_t exec_env, uint32 start_arg)
stack_size = ((WASMModuleInstance *)module_inst)->default_wasm_stack_size;
if (!(new_module_inst = wasm_runtime_instantiate_internal(
module, true, stack_size, 0, NULL, 0)))
module, true, exec_env, stack_size, 0, NULL, 0)))
return -1;
wasm_runtime_set_custom_data_internal(

11
core/iwasm/libraries/lib-wasi-threads/test/build.sh Normal file → Executable file
View File

@ -5,16 +5,21 @@
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
set -eo pipefail
CC=${CC:=/opt/wasi-sdk/bin/clang}
WASI_SYSROOT=${WASI_SYSROOT:=~/dev/wasi-libc/sysroot}
WAMR_DIR=../../../../..
for test_c in *.c; do
test_wasm="$(basename $test_c .c).wasm"
if [ $test_wasm = "linear_memory_size_update.wasm" ]; then
thread_start_file=""
else
thread_start_file=$WAMR_DIR/samples/wasi-threads/wasm-apps/wasi_thread_start.S
fi
echo "Compiling $test_c to $test_wasm"
$CC \
--sysroot $WASI_SYSROOT \
-target wasm32-wasi-threads \
-pthread -ftls-model=local-exec \
-z stack-size=32768 \
@ -25,6 +30,6 @@ for test_c in *.c; do
-Wl,--export=malloc \
-Wl,--export=free \
-I $WAMR_DIR/samples/wasi-threads/wasm-apps \
$WAMR_DIR/samples/wasi-threads/wasm-apps/wasi_thread_start.S \
$thread_start_file \
$test_c -o $test_wasm
done

View File

@ -6,9 +6,15 @@
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <pthread.h>
#include <stdbool.h>
#include <unistd.h>
#include <limits.h>
#if USE_CUSTOM_SYNC_PRIMITIVES != 0
#include "sync_primitives.h"
#else
#include <pthread.h>
#endif
#include "wasi_thread_start.h"
@ -23,7 +29,6 @@ static bool termination_by_trap;
static bool termination_in_main_thread;
static blocking_task_type_t blocking_task_type;
#define TIMEOUT_SECONDS 10ll
#define NUM_THREADS 3
static pthread_barrier_t barrier;
@ -36,15 +41,14 @@ void
run_long_task()
{
if (blocking_task_type == BLOCKING_TASK_BUSY_WAIT) {
for (int i = 0; i < TIMEOUT_SECONDS; i++)
sleep(1);
for (;;) {
}
}
else if (blocking_task_type == BLOCKING_TASK_ATOMIC_WAIT) {
__builtin_wasm_memory_atomic_wait32(
0, 0, TIMEOUT_SECONDS * 1000 * 1000 * 1000);
__builtin_wasm_memory_atomic_wait32(0, 0, -1);
}
else {
sleep(TIMEOUT_SECONDS);
sleep(UINT_MAX);
}
}

View File

@ -11,7 +11,12 @@
#include <stdio.h>
#include <assert.h>
#include <stdbool.h>
#if USE_CUSTOM_SYNC_PRIMITIVES != 0
#include "sync_primitives.h"
#else
#include <pthread.h>
#endif
#include "wasi_thread_start.h"

View File

@ -0,0 +1,94 @@
/*
* Copyright (C) 2023 Amazon.com Inc. or its affiliates. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdlib.h>
#include <pthread.h>
typedef enum {
APP_STARTED,
THREAD_STARTED,
MEMORY_ALLOCATED,
} app_state_t;
typedef struct {
pthread_cond_t cond;
pthread_mutex_t mutex;
app_state_t state;
char *data;
} context_t;
void
context_init(context_t *ctx)
{
pthread_cond_init(&ctx->cond, NULL);
pthread_mutex_init(&ctx->mutex, NULL);
ctx->state = APP_STARTED;
ctx->data = NULL;
}
void
context_destroy(context_t *ctx)
{
pthread_cond_destroy(&ctx->cond);
pthread_mutex_destroy(&ctx->mutex);
if (ctx->data) {
free(ctx->data);
}
}
void
context_set_state(context_t *ctx, app_state_t state)
{
pthread_mutex_lock(&ctx->mutex);
ctx->state = state;
pthread_mutex_unlock(&ctx->mutex);
pthread_cond_signal(&ctx->cond);
}
void
context_wait_for_state(context_t *ctx, app_state_t state)
{
pthread_mutex_lock(&ctx->mutex);
while (ctx->state != state) {
pthread_cond_wait(&ctx->cond, &ctx->mutex);
}
pthread_mutex_unlock(&ctx->mutex);
}
void *
fnc(void *p)
{
context_t *ctx = (context_t *)p;
context_set_state(ctx, THREAD_STARTED);
context_wait_for_state(ctx, MEMORY_ALLOCATED);
// trigger memory.copy
__builtin_memcpy(ctx->data + 512 * 1024, ctx->data + 1024, 1024);
return NULL;
}
int
main()
{
context_t ctx;
context_init(&ctx);
pthread_t th;
pthread_create(&th, NULL, fnc, &ctx);
context_wait_for_state(&ctx, THREAD_STARTED);
// trigger memory.grow
ctx.data = calloc(1024 * 1024, 1);
context_set_state(&ctx, MEMORY_ALLOCATED);
pthread_join(th, NULL);
context_destroy(&ctx);
return 0;
}

View File

@ -0,0 +1,91 @@
/*
* Copyright (C) 2023 Amazon.com Inc. or its affiliates. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include <stdbool.h>
/* Mutex */
typedef int pthread_mutex_t;
int
pthread_mutex_init(pthread_mutex_t *mutex, void *unused)
{
*mutex = 0;
return 0;
}
int
pthread_mutex_destroy(pthread_mutex_t *mutex)
{
return 0;
}
static bool
try_pthread_mutex_lock(pthread_mutex_t *mutex)
{
int expected = 0;
return __atomic_compare_exchange_n(mutex, &expected, 1, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
}
int
pthread_mutex_lock(pthread_mutex_t *mutex)
{
while (!try_pthread_mutex_lock(mutex))
__builtin_wasm_memory_atomic_wait32(mutex, 1, -1);
return 0;
}
int
pthread_mutex_unlock(pthread_mutex_t *mutex)
{
__atomic_store_n(mutex, 0, __ATOMIC_SEQ_CST);
__builtin_wasm_memory_atomic_notify(mutex, 1);
return 0;
}
/* Barrier */
typedef struct {
int count;
int num_threads;
int mutex;
int ready;
} pthread_barrier_t;
int
pthread_barrier_init(pthread_barrier_t *barrier, void *unused, int num_threads)
{
barrier->count = 0;
barrier->num_threads = num_threads;
barrier->ready = 0;
pthread_mutex_init(&barrier->mutex, NULL);
return 0;
}
int
pthread_barrier_wait(pthread_barrier_t *barrier)
{
bool no_wait = false;
int count;
pthread_mutex_lock(&barrier->mutex);
count = barrier->count++;
if (barrier->count >= barrier->num_threads) {
no_wait = true;
barrier->count = 0;
}
pthread_mutex_unlock(&barrier->mutex);
if (no_wait) {
__atomic_store_n(&barrier->ready, 1, __ATOMIC_SEQ_CST);
__builtin_wasm_memory_atomic_notify(&barrier->ready, count);
return 0;
}
__builtin_wasm_memory_atomic_wait32(&barrier->ready, 0, -1);
return 0;
}

View File

@ -11,6 +11,7 @@
#include <stdio.h>
#include <assert.h>
#include <stdbool.h>
#include <pthread.h>
#include "wasi_thread_start.h"
@ -29,6 +30,7 @@ typedef struct {
int *pval;
} shared_t;
pthread_mutex_t mutex;
int *vals[NUM_THREADS];
void
@ -39,7 +41,6 @@ __wasi_thread_start_C(int thread_id, int *start_arg)
for (int i = 0; i < NUM_ITER; i++)
__atomic_fetch_add(data->count, 1, __ATOMIC_SEQ_CST);
vals[data->iteration] = malloc(sizeof(int));
*vals[data->iteration] = data->iteration;
__atomic_store_n(&data->th_done, 1, __ATOMIC_SEQ_CST);
@ -53,6 +54,14 @@ main(int argc, char **argv)
int thread_ids[NUM_THREADS];
int *count = calloc(1, sizeof(int));
assert(count != NULL && "Failed to call calloc");
assert(pthread_mutex_init(&mutex, NULL) == 0 && "Failed to init mutex");
for (int i = 0; i < NUM_THREADS; i++) {
vals[i] = malloc(sizeof(int));
assert(vals[i] != NULL && "Failed to call calloc");
}
for (int i = 0; i < NUM_THREADS; i++) {
assert(start_args_init(&data[i].base)
&& "Stack allocation for thread failed");
@ -82,5 +91,7 @@ main(int argc, char **argv)
}
free(count);
assert(pthread_mutex_destroy(&mutex) == 0 && "Failed to destroy mutex");
return EXIT_SUCCESS;
}

View File

@ -2655,8 +2655,8 @@ wasmtime_ssp_poll_oneoff(
pfds[i] = (struct pollfd){
.fd = fd_number(fos[i]),
.events = s->u.type == __WASI_EVENTTYPE_FD_READ
? POLLRDNORM
: POLLWRNORM,
? POLLIN
: POLLOUT,
};
}
else {
@ -2767,7 +2767,7 @@ wasmtime_ssp_poll_oneoff(
__WASI_EVENT_FD_READWRITE_HANGUP,
};
}
else if ((pfds[i].revents & (POLLRDNORM | POLLWRNORM)) != 0) {
else if ((pfds[i].revents & (POLLIN | POLLOUT)) != 0) {
// Read or write possible.
out[(*nevents)++] = (__wasi_event_t){
.userdata = in[i].userdata,

View File

@ -143,8 +143,8 @@ allocate_aux_stack(WASMExecEnv *exec_env, uint32 *start, uint32 *size)
wasm_exec_env_get_module_inst(exec_env);
uint32 stack_end;
stack_end =
wasm_runtime_module_malloc(module_inst, cluster->stack_size, NULL);
stack_end = wasm_runtime_module_malloc_internal(module_inst, exec_env,
cluster->stack_size, NULL);
*start = stack_end + cluster->stack_size;
*size = cluster->stack_size;
@ -188,7 +188,8 @@ free_aux_stack(WASMExecEnv *exec_env, uint32 start)
bh_assert(start >= cluster->stack_size);
wasm_runtime_module_free(module_inst, start - cluster->stack_size);
wasm_runtime_module_free_internal(module_inst, exec_env,
start - cluster->stack_size);
return true;
#else
@ -381,9 +382,9 @@ wasm_cluster_add_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
return ret;
}
/* The caller should lock cluster->lock for thread safety */
bool
wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
static bool
wasm_cluster_del_exec_env_internal(WASMCluster *cluster, WASMExecEnv *exec_env,
bool can_destroy_cluster)
{
bool ret = true;
bh_assert(exec_env->cluster == cluster);
@ -406,13 +407,26 @@ wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
if (bh_list_remove(&cluster->exec_env_list, exec_env) != 0)
ret = false;
if (cluster->exec_env_list.len == 0) {
/* exec_env_list empty, destroy the cluster */
wasm_cluster_destroy(cluster);
if (can_destroy_cluster) {
if (cluster->exec_env_list.len == 0) {
/* exec_env_list empty, destroy the cluster */
wasm_cluster_destroy(cluster);
}
}
else {
/* Don't destroy cluster as cluster->lock is being used */
}
return ret;
}
/* The caller should lock cluster->lock for thread safety */
bool
wasm_cluster_del_exec_env(WASMCluster *cluster, WASMExecEnv *exec_env)
{
return wasm_cluster_del_exec_env_internal(cluster, exec_env, true);
}
static WASMExecEnv *
wasm_cluster_search_exec_env(WASMCluster *cluster,
WASMModuleInstanceCommon *module_inst)
@ -495,7 +509,7 @@ wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
#endif
if (!(new_module_inst = wasm_runtime_instantiate_internal(
module, true, stack_size, 0, NULL, 0))) {
module, true, exec_env, stack_size, 0, NULL, 0))) {
goto fail1;
}
@ -525,6 +539,9 @@ wasm_cluster_spawn_exec_env(WASMExecEnv *exec_env)
goto fail4;
}
/* Inherit suspend_flags of parent thread */
new_exec_env->suspend_flags.flags = exec_env->suspend_flags.flags;
if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
goto fail4;
@ -557,7 +574,7 @@ wasm_cluster_destroy_spawned_exec_env(WASMExecEnv *exec_env)
/* Free aux stack space */
free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
/* Remove exec_env */
wasm_cluster_del_exec_env(cluster, exec_env);
wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
/* Destroy exec_env */
wasm_exec_env_destroy_internal(exec_env);
/* Routine exit, destroy instance */
@ -617,7 +634,7 @@ thread_manager_start_routine(void *arg)
/* Free aux stack space */
free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
/* Remove exec_env */
wasm_cluster_del_exec_env(cluster, exec_env);
wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
/* Destroy exec_env */
wasm_exec_env_destroy_internal(exec_env);
/* Routine exit, destroy instance */
@ -674,6 +691,9 @@ wasm_cluster_create_thread(WASMExecEnv *exec_env,
new_exec_env->aux_stack_bottom.bottom = UINT32_MAX;
}
/* Inherit suspend_flags of parent thread */
new_exec_env->suspend_flags.flags = exec_env->suspend_flags.flags;
if (!wasm_cluster_add_exec_env(cluster, new_exec_env))
goto fail3;
@ -700,7 +720,7 @@ wasm_cluster_create_thread(WASMExecEnv *exec_env,
return 0;
fail4:
wasm_cluster_del_exec_env(cluster, new_exec_env);
wasm_cluster_del_exec_env_internal(cluster, new_exec_env, false);
fail3:
/* free the allocated aux stack space */
if (alloc_aux_stack)
@ -787,18 +807,12 @@ notify_debug_instance_exit(WASMExecEnv *exec_env)
void
wasm_cluster_thread_waiting_run(WASMExecEnv *exec_env)
{
os_mutex_lock(&exec_env->wait_lock);
/* Wake up debugger thread after we get the lock, otherwise we may miss the
* signal from debugger thread, see
* https://github.com/bytecodealliance/wasm-micro-runtime/issues/1860 */
exec_env->current_status->running_status = STATUS_STOP;
notify_debug_instance(exec_env);
while (!wasm_cluster_thread_is_running(exec_env)) {
os_cond_wait(&exec_env->wait_cond, &exec_env->wait_lock);
}
os_mutex_unlock(&exec_env->wait_lock);
}
void
@ -971,7 +985,7 @@ wasm_cluster_exit_thread(WASMExecEnv *exec_env, void *retval)
/* Free aux stack space */
free_aux_stack(exec_env, exec_env->aux_stack_bottom.bottom);
/* Remove exec_env */
wasm_cluster_del_exec_env(cluster, exec_env);
wasm_cluster_del_exec_env_internal(cluster, exec_env, false);
/* Destroy exec_env */
wasm_exec_env_destroy_internal(exec_env);
/* Routine exit, destroy instance */
@ -988,12 +1002,12 @@ static void
set_thread_cancel_flags(WASMExecEnv *exec_env)
{
os_mutex_lock(&exec_env->wait_lock);
/* Set the termination flag */
#if WASM_ENABLE_DEBUG_INTERP != 0
wasm_cluster_thread_send_signal(exec_env, WAMR_SIG_TERM);
#else
exec_env->suspend_flags.flags |= 0x01;
#endif
exec_env->suspend_flags.flags |= 0x01;
os_mutex_unlock(&exec_env->wait_lock);
}

View File

@ -180,6 +180,9 @@ wasm_cluster_destroy_exenv_status(WASMCurrentEnvStatus *status);
void
wasm_cluster_send_signal_all(WASMCluster *cluster, uint32 signo);
/* This function must be called with exec_env->wait_lock locked, otherwise we
* may miss the signal from debugger thread, see
* https://github.com/bytecodealliance/wasm-micro-runtime/issues/1860 */
void
wasm_cluster_thread_waiting_run(WASMExecEnv *exec_env);

View File

@ -1 +1 @@
tensorflow==2.10.1
tensorflow==2.11.1

View File

@ -153,7 +153,7 @@ static void
adjust_ptr(uint8 **p_ptr, intptr_t offset)
{
if (*p_ptr)
*p_ptr += offset;
*p_ptr = (uint8 *)((intptr_t)(*p_ptr) + offset);
}
int

View File

@ -215,7 +215,7 @@ unlinkat(int fd, const char *path, int flag)
}
int
utimensat(int fd, const char *path, const struct timespec ts[2], int flag)
utimensat(int fd, const char *path, const struct timespec *ts, int flag)
{
errno = ENOSYS;
return -1;
@ -238,7 +238,7 @@ ftruncate(int fd, off_t length)
#endif
int
futimens(int fd, const struct timespec times[2])
futimens(int fd, const struct timespec *times)
{
errno = ENOSYS;
return -1;

View File

@ -116,12 +116,19 @@ os_thread_exit(void *retval);
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58016 */
#if __GNUC_PREREQ(4, 9)
#define BH_HAS_STD_ATOMIC
#elif __GNUC_PREREQ(4, 7)
#define os_memory_order_acquire __ATOMIC_ACQUIRE
#define os_memory_order_release __ATOMIC_RELEASE
#define os_memory_order_seq_cst __ATOMIC_SEQ_CST
#define os_atomic_thread_fence __atomic_thread_fence
#endif /* end of __GNUC_PREREQ(4, 9) */
#endif /* end of defined(__GNUC_PREREQ) */
#if defined(BH_HAS_STD_ATOMIC) && !defined(__cplusplus)
#include <stdatomic.h>
#define os_memory_order_acquire memory_order_acquire
#define os_memory_order_release memory_order_release
#define os_memory_order_seq_cst memory_order_seq_cst
#define os_atomic_thread_fence atomic_thread_fence
#endif

View File

@ -63,7 +63,9 @@ os_set_print_function(os_print_function_t pf);
char *
strcpy(char *dest, const char *src);
#define os_memory_order_acquire __ATOMIC_ACQUIRE
#define os_memory_order_release __ATOMIC_RELEASE
#define os_memory_order_seq_cst __ATOMIC_SEQ_CST
#define os_atomic_thread_fence __atomic_thread_fence
#ifdef __cplusplus

View File

@ -6,7 +6,7 @@
#include "bh_assert.h"
void
bh_assert_internal(int v, const char *file_name, int line_number,
bh_assert_internal(int64 v, const char *file_name, int line_number,
const char *expr_string)
{
if (v)

View File

@ -14,10 +14,10 @@ extern "C" {
#if BH_DEBUG != 0
void
bh_assert_internal(int v, const char *file_name, int line_number,
bh_assert_internal(int64 v, const char *file_name, int line_number,
const char *expr_string);
#define bh_assert(expr) \
bh_assert_internal((int)(uintptr_t)(expr), __FILE__, __LINE__, #expr)
bh_assert_internal((int64)(uintptr_t)(expr), __FILE__, __LINE__, #expr)
#else
#define bh_assert(expr) (void)0
#endif /* end of BH_DEBUG */

View File

@ -33,6 +33,9 @@ bh_hash_map_create(uint32 size, bool use_lock, HashFunc hash_func,
HashMap *map;
uint64 total_size;
if (size < HASH_MAP_MIN_SIZE)
size = HASH_MAP_MIN_SIZE;
if (size > HASH_MAP_MAX_SIZE) {
LOG_ERROR("HashMap create failed: size is too large.\n");
return NULL;

View File

@ -12,6 +12,9 @@
extern "C" {
#endif
/* Minimum initial size of hash map */
#define HASH_MAP_MIN_SIZE 4
/* Maximum initial size of hash map */
#define HASH_MAP_MAX_SIZE 65536

View File

@ -6,6 +6,6 @@
#ifndef _WAMR_VERSION_H_
#define _WAMR_VERSION_H_
#define WAMR_VERSION_MAJOR 1
#define WAMR_VERSION_MINOR 1
#define WAMR_VERSION_PATCH 2
#define WAMR_VERSION_MINOR 2
#define WAMR_VERSION_PATCH 1
#endif

View File

@ -1,11 +1,16 @@
Build WAMR vmcore (iwasm)
=========================
It is recommended to use the [WAMR SDK](../wamr-sdk) tools to build a project that integrates the WAMR. This document introduces how to build the WAMR minimal product which is vmcore only (no app-framework and app-mgr) for multiple platforms.
# Build WAMR vmcore
WAMR vmcore is a set of runtime libraries for loading and running Wasm modules. This document introduces how to build the WAMR vmcore.
References:
- [how to build iwasm](../product-mini/README.md): building different target platforms such as Linux, Windows, Mac etc
- [Blog: Introduction to WAMR running modes](https://bytecodealliance.github.io/wamr.dev/blog/introduction-to-wamr-running-modes/)
## WAMR vmcore cmake building configurations
By including the script `runtime_lib.cmake` under folder [build-scripts](../build-scripts) in CMakeList.txt, it is easy to build minimal product with cmake.
By including the script `runtime_lib.cmake` under folder [build-scripts](../build-scripts) in CMakeList.txt, it is easy to use vmcore to build host software with cmake.
```cmake
# add this into your CMakeList.txt
@ -42,6 +47,7 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
- **WAMR_BUILD_AOT**=1/0, enable AOT or not, default to enable if not set
- **WAMR_BUILD_JIT**=1/0, enable LLVM JIT or not, default to disable if not set
- **WAMR_BUILD_FAST_JIT**=1/0, enable Fast JIT or not, default to disable if not set
- **WAMR_BUILD_FAST_JIT**=1 and **WAMR_BUILD_JIT**=1, enable Multi-tier JIT, default to disable if not set
#### **Configure LIBC**
@ -80,6 +86,16 @@ cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
- **WAMR_BUILD_LIB_PTHREAD_SEMAPHORE**=1/0, default to disable if not set
> Note: This feature depends on `lib-pthread`, it will be enabled automatically if this feature is enabled.
#### **Enable lib wasi-threads**
- **WAMR_BUILD_LIB_WASI_THREADS**=1/0, default to disable if not set
> Note: The dependent feature of lib wasi-threads such as the `shared memory` and `thread manager` will be enabled automatically.
#### **Enable lib wasi-nn**
- **WAMR_BUILD_WASI_NN**=1/0, default to disable if not set
#### **Enable lib wasi-nn GPU mode**
- **WASI_NN_ENABLE_GPU**=1/0, default to disable if not set
#### **Disable boundary check with hardware trap**
- **WAMR_DISABLE_HW_BOUND_CHECK**=1/0, default to enable if not set and supported by platform
> Note: by default only platform linux/darwin/android/windows/vxworks 64-bit will enable the boundary check with hardware trap feature, and the wamrc tool will generate AOT code without boundary check instructions in all 64-bit targets except SGX to improve performance. The boundary check includes linear memory access boundary and native stack access boundary, if `WAMR_DISABLE_STACK_HW_BOUND_CHECK` below isn't set.
@ -195,460 +211,3 @@ Or if we want to enable interpreter, disable AOT and WASI, and build as X86_32,
``` Bash
cmake .. -DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_AOT=0 -DWAMR_BUILD_LIBC_WASI=0 -DWAMR_BUILD_TARGET=X86_32
```
## Cross compilation
If you are building for ARM architecture on a X86 development machine, you can use the `CMAKE_TOOLCHAIN_FILE` to set the toolchain file for cross compling.
```
cmake .. -DCMAKE_TOOLCHAIN_FILE=$TOOL_CHAIN_FILE \
-DWAMR_BUILD_PLATFORM=linux \
-DWAMR_BUILD_TARGET=ARM
```
Refer to toolchain sample file [`samples/simple/profiles/arm-interp/toolchain.cmake`](../samples/simple/profiles/arm-interp/toolchain.cmake) for how to build mini product for ARM target architecture.
If you compile for ESP-IDF, make sure to set the right toolchain file for the chip you're using (e.g. `$IDF_PATH/tools/cmake/toolchain-esp32c3.cmake`).
Note that all ESP-IDF toolchain files live under `$IDF_PATH/tools/cmake/`.
Linux
-------------------------
First of all please install the dependent packages.
Run command below in Ubuntu-18.04:
``` Bash
sudo apt install build-essential cmake g++-multilib libgcc-8-dev lib32gcc-8-dev
```
Or in Ubuntu-16.04:
``` Bash
sudo apt install build-essential cmake g++-multilib libgcc-5-dev lib32gcc-5-dev
```
Or in Fedora:
``` Bash
sudo dnf install glibc-devel.i686
```
After installing dependencies, build the source code:
``` Bash
cd product-mini/platforms/linux/
mkdir build && cd build
cmake ..
make
# iwasm is generated under current directory
```
By default in Linux, the `fast interpreter`, `AOT` and `Libc WASI` are enabled, and JIT is disabled.
And the build target is set to X86_64 or X86_32 depending on the platform's bitwidth.
There are total 6 running modes supported: fast interpreter, classi interpreter, AOT, LLVM JIT, Fast JIT and Multi-tier JIT.
(1) To run a wasm file with `fast interpreter` mode - build iwasm with default build and then:
```Bash
iwasm <wasm file>
```
Or
```Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_INTERP=1
make
```
(2) To disable `fast interpreter` and enable `classic interpreter` instead:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_INTERP=0
make
```
(3) To run an AOT file, firstly please refer to [Build wamrc AOT compiler](../README.md#build-wamrc-aot-compiler) to build wamrc, and then:
```Bash
wamrc -o <AOT file> <WASM file>
iwasm <AOT file>
```
(4) To enable the `LLVM JIT` mode, firstly we should build the LLVM library:
``` Bash
cd product-mini/platforms/linux/
./build_llvm.sh (The llvm source code is cloned under <wamr_root_dir>/core/deps/llvm and auto built)
```
Then pass argument `-DWAMR_BUILD_JIT=1` to cmake to enable LLVM JIT:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_JIT=1
make
```
Note:
By default, the LLVM Orc JIT with Lazy compilation is enabled to speedup the lanuching process and reduce
the JIT compilation time by creating backend threads to compile the WASM functions parallely, and for the
main thread, the functions in the module will not be compiled until they are firstly called and haven't been
compiled by the compilation threads.
If developer wants to disable the Lazy compilation, we can:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0
make
```
In which all the WASM functions will be previously compiled before main thread starts to run the wasm module.
(5) To enable the `Fast JIT` mode:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_JIT=1
make
```
The Fast JIT is a lightweight JIT engine with quick startup, small footprint and good portability, and gains ~50% performance of AOT.
(6) To enable the `Multi-tier JIT` mode:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_JTI=1 -DWAMR_BUILD_JIT=1
make
```
The Multi-tier JIT is a two level JIT tier-up engine, which launchs Fast JIT to run the wasm module as soon as possible and creates backend threads to compile the LLVM JIT functions at the same time, and when the LLVM JIT functions are compiled, the runtime will switch the extecution from the Fast JIT jitted code to LLVM JIT jitted code gradually, so as to gain the best performance.
Linux SGX (Intel Software Guard Extension)
-------------------------
Please see [Build and Port WAMR vmcore for Linux SGX](./linux_sgx.md) for the details.
MacOS
-------------------------
Make sure to install Xcode from App Store firstly, and install cmake.
If you use Homebrew, install cmake from the command line:
``` Bash
brew install cmake
```
Then build the source codes:
``` Bash
cd product-mini/platforms/darwin/
mkdir build
cd build
cmake ..
make
# iwasm is generated under current directory
```
By default in MacOS, the `fast interpreter`, `AOT` and `Libc WASI` are enabled, and JIT is disabled.
And the build target is set to X86_64 or X86_32 depending on the platform's bitwidth.
To run a wasm file with interpreter mode:
```Bash
iwasm <wasm file>
```
To run an AOT file, firstly please refer to [Build wamrc AOT compiler](../README.md#build-wamrc-aot-compiler) to build wamrc, and then:
```Bash
wamrc -o <AOT file> <WASM file>
iwasm <AOT file>
```
Note:
For how to build the `JIT` mode and `classic interpreter` mode, please refer to [Build iwasm on Linux](./build_wamr.md#linux).
WAMR provides some features which can be easily configured by passing options to cmake, please see [WAMR vmcore cmake building configurations](./build_wamr.md#wamr-vmcore-cmake-building-configurations) for details. Currently in MacOS, interpreter, AOT, and builtin libc are enabled by default.
Windows
-------------------------
Make sure `MSVC` and `cmake` are installed and available in the command line environment
Then build the source codes:
``` Bash
cd product-mini/platforms/windows/
mkdir build
cd build
cmake ..
cmake --build . --config Release
# ./Release/iwasm.exe is generated
```
By default in Windows, the `fast interpreter`, `AOT` and `Libc WASI` are enabled, and JIT is disabled.
To run a wasm file with interpreter mode:
```Bash
iwasm.exe <wasm file>
```
To run an AOT file, firstly please refer to [Build wamrc AOT compiler](../README.md#build-wamrc-aot-compiler) to build wamrc, and then:
```Bash
wamrc.exe -o <AOT file> <WASM file>
iwasm.exe <AOT file>
```
Note:
For how to build the `JIT` mode and `classic interpreter` mode, please refer to [Build iwasm on Linux](./build_wamr.md#linux).
WAMR provides some features which can be easily configured by passing options to cmake, please see [WAMR vmcore cmake building configurations](./build_wamr.md#wamr-vmcore-cmake-building-configurations) for details. Currently in Windows, interpreter, AOT, and builtin libc are enabled by default.
MinGW
-------------------------
First make sure the correct CMake package is installed; the following commands
are valid for the MSYS2 build environment:
```Bash
pacman -R cmake
pacman -S mingw-w64-x86_64-cmake
pacman -S mingw-w64-x86_64-gcc
pacman -S make git
```
Then follow the build instructions for Windows above, and add the following
arguments for cmake:
```Bash
cmake .. -G"Unix Makefiles" \
-DWAMR_DISABLE_HW_BOUND_CHECK=1
````
Note that WASI will be disabled until further work is done towards full MinGW support.
- Since memory access boundary check with hardware trap feature is disabled, when generating the AOT file with `wamrc`, the `--bounds-checks=1` flag should be added to generate the memory access boundary check instructions to ensure the sandbox security:
```bash
wamrc --bounds-checks=1 -o <aot_file> <wasm_file>
```
- Compiler complaining about missing `UnwindInfoAddress` field in `RUNTIME_FUNCTION`
struct (winnt.h).
VxWorks
-------------------------
VxWorks 7 SR0620 release is validated.
First you need to build a VSB. Make sure *UTILS_UNIX* layer is added in the VSB.
After the VSB is built, export the VxWorks toolchain path by:
```bash
export <vsb_dir_path>/host/vx-compiler/bin:$PATH
```
Now switch to iwasm source tree to build the source code:
```bash
cd product-mini/platforms/vxworks/
mkdir build
cd build
cmake ..
make
```
Create a VIP based on the VSB. Make sure the following components are added:
* INCLUDE_POSIX_PTHREADS
* INCLUDE_POSIX_PTHREAD_SCHEDULER
* INCLUDE_SHARED_DATA
* INCLUDE_SHL
Copy the generated iwasm executable, the test WASM binary as well as the needed
shared libraries (libc.so.1, libllvm.so.1 or libgnu.so.1 depending on the VSB,
libunix.so.1) to a supported file system (eg: romfs).
Note:
WAMR provides some features which can be easily configured by passing options to cmake, please see [WAMR vmcore cmake building configurations](./build_wamr.md#wamr-vmcore-cmake-building-configurations) for details. Currently in VxWorks, interpreter and builtin libc are enabled by default.
Zephyr
-------------------------
You need to prepare Zephyr first as described here https://docs.zephyrproject.org/latest/getting_started/index.html#get-zephyr-and-install-python-dependencies.
After that you need to point the `ZEPHYR_BASE` variable to e.g. `~/zephyrproject/zephyr`. Also, it is important that you have `west` available for subsequent actions.
``` Bash
cd <wamr_root_dir>/product-mini/platforms/zephyr/simple
# Execute the ./build_and_run.sh script with board name as parameter. Here take x86 as example:
./build_and_run.sh x86
```
If you want to use the Espressif toolchain (esp32 or esp32c3), you can most conveniently install it with `west`:
``` Bash
cd $ZEPHYR_BASE
west espressif install
```
After that set `ESPRESSIF_TOOLCHAIN_PATH` according to the output, for example `~/.espressif/tools/zephyr`.
Note:
WAMR provides some features which can be easily configured by passing options to cmake, please see [WAMR vmcore cmake building configurations](./build_wamr.md#wamr-vmcore-cmake-building-configurations) for details. Currently in Zephyr, interpreter, AOT and builtin libc are enabled by default.
AliOS-Things
-------------------------
1. a developerkit board id needed for testing
2. download the AliOS-Things code
``` Bash
git clone https://github.com/alibaba/AliOS-Things.git
```
3. copy <wamr_root_dir>/product-mini/platforms/alios-things directory to AliOS-Things/middleware, and rename it as iwasm
``` Bash
cp -a <wamr_root_dir>/product-mini/platforms/alios-things middleware/iwasm
```
4. create a link to <wamr_root_dir> in middleware/iwasm/ and rename it to wamr
``` Bash
ln -s <wamr_root_dir> middleware/iwasm/wamr
```
5. modify file app/example/helloworld/helloworld.c, patch as:
``` C
#include <stdbool.h>
#include <aos/kernel.h>
extern bool iwasm_init();
int application_start(int argc, char *argv[])
{
int count = 0;
iwasm_init();
...
}
```
6. modify file app/example/helloworld/aos.mk
``` C
$(NAME)_COMPONENTS := osal_aos iwasm
```
7. build source code and run
For linux host:
``` Bash
aos make helloworld@linuxhost -c config
aos make
./out/helloworld@linuxhost/binary/helloworld@linuxhost.elf
```
For developerkit:
Modify file middleware/iwasm/aos.mk, patch as:
``` C
WAMR_BUILD_TARGET := THUMBV7M
```
``` Bash
aos make helloworld@developerkit -c config
aos make
```
download the binary to developerkit board, check the output from serial port
RT-Thread
-------------------------
1. Get rt-thread [system codes](https://github.com/RT-Thread/rt-thread).
2. Enable WAMR software package with menuconfig tool which provided by RT-Thread.
* Environment in Linux, run command below:
```bash
scons --menuconfig
```
* Environment in Windows ConEmu, run command below:
```bash
menuconfig
```
Select and enable `WAMR` in:
* RT-Thread online packages
* tools packages
* WebAssembly Micro Runtime (WAMR)
3. Configure `WAMR` with menuconfig tool.
you can choice features of iwasm below:
* Enable testing parameters of iwasm
* Enable interpreter Mode / Fast interpreter Mode
* Use built-libc
* Enable AOT
4. Exit menuconfig tool and save configure, update and download package.
```bash
pkgs --update
```
5. build project and download the binary to boards.
```bash
scons
```
or build project with 8-thread by using command below:
```bash
scons -j8
```
after project building, you can got an binary file named `rtthread.bin`, then you can download this file to the MCU board.
Android
-------------------------
able to generate a shared library support Android platform.
- need an [android SDK](https://developer.android.com/studio). Go and get the "Command line tools only"
- look for a command named *sdkmanager* and download below components. version numbers might need to check and pick others
- "build-tools;29.0.3"
- "cmake;3.10.2.4988404"
- "ndk;latest"
- "patcher;v4"
- "platform-tools"
- "platforms;android-29"
- add bin/ of the downloaded cmake to $PATH
- export ANDROID_HOME=/the/path/of/downloaded/sdk/
- export ANDROID_NDK_LATEST_HOME=/the/path/of/downloaded/sdk/ndk/2x.xxx/
- ready to go
Use such commands, you are able to compile with default configurations. Any compiling requirement should be satisfied by modifying product-mini/platforms/android/CMakeList.txt. For example, chaning ${WAMR_BUILD_TARGET} in CMakeList could get different libraries support different ABIs.
``` shell
$ cd product-mini/platforms/android/
$ mkdir build
$ cd build
$ cmake ..
$ make
$ # check output in distribution/wasm
$ # include/ includes all necesary head files
$ # lib includes libiwasm.so
```
NuttX
-------------------------
WAMR is intergrated with NuttX, just enable the WAMR in Kconfig option (Application Configuration/Interpreters).
ESP-IDF
-------------------------
WAMR integrates with ESP-IDF both for the XTENSA and RISC-V chips (esp32x and esp32c3 respectively).
In order to use this, you need at least version 4.3.1 of ESP-IDF.
If you don't have it installed, follow the instructions [here](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/get-started/#get-started-get-prerequisites).
ESP-IDF also installs the toolchains needed for compiling WAMR and ESP-IDF.
A small demonstration of how to use WAMR and ESP-IDF can be found under [product_mini](/product-mini/platforms/esp-idf).
The demo builds WAMR for ESP-IDF and runs a small wasm program.
In order to run it for your specific Espressif chip, edit the [build_and_run.sh](/product-mini/platforms/esp-idf/build_and_run.sh) file and put the correct toolchain file (see #Cross-compilation) and `IDF_TARGET`.
Before compiling it is also necessary to call ESP-IDF's `export.sh` script to bring all compile time relevant information in scope.
Docker
-------------------------
[Docker](https://www.docker.com/) will download all the dependencies and build WAMR Core on your behalf.
Make sure you have Docker installed on your machine: [macOS](https://docs.docker.com/docker-for-mac/install/), [Windows](https://docs.docker.com/docker-for-windows/install/) or [Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/).
Build *iwasm* with the Docker image:
``` Bash
$ cd ci
$ ./build_wamr.sh
$ ls ../build_out/
```
*build_wamr.sh* will generate *linux* compatible libraries ( libiwasm.so and
libvmlib.a ) and an executable binary (*iwasm*) and copy *iwasm* to
*build_out*. All original generated files are still under
*product-mini/platforms/linux/build*.
FreeBSD
-------------------------
First, install the dependent packages:
```shell
sudo pkg install gcc cmake wget
```
Then you can run the following commands to build iwasm with default configurations:
```shell
cd product-mini/platforms/freebsd
mkdir build && cd build
cmake ..
make
```

View File

@ -1,5 +1,8 @@
Memory model and memory usage tunning
=====================================
# Memory model and memory usage tunning
References:
- [Blog: Understand WAMR heap](https://bytecodealliance.github.io/wamr.dev/blog/understand-the-wamr-heaps/)
- [Blog: Understand WAMR stacks](https://bytecodealliance.github.io/wamr.dev/blog/understand-the-wamr-stacks/)
## The memory model

59
doc/pthread_impls.md Normal file
View File

@ -0,0 +1,59 @@
# Pthread implementations
WAMR has two pthread implementations available as of writing this.
These implementations are not ABI-compatible. You at least need to rebuild
your wasm modules when migrating from one pthread implementation to another.
For new users, we recommend to use (or at least experiment)
the new wasi-threads based implementation.
In future, we might remove the old implementation.
## WAMR lib-pthread (old)
* The pthread API is directly implemented as host functions in WAMR.
(`WAMR_BUILD_LIB_PTHREAD`)
* Only minimum API is implemented as of writing this.
(eg. no pthread barriers)
* WAMR-specific ABI
* [Known limitations](pthread_library.md#known-limits)
## wasi-threads (new)
* The pthread API is implemented in wasi-libc, based on
[wasi-threads](https://github.com/WebAssembly/wasi-threads)
and [WASM threads](https://github.com/WebAssembly/threads) proposals.
* It requires a recent-enough version of wasi-libc. The experimental support
is included in
[wasi-sdk 20.0](https://github.com/WebAssembly/wasi-sdk/releases/tag/wasi-sdk-20)
or later.
To build your application, cmake users can use the
[cmake toolchain file](https://github.com/WebAssembly/wasi-sdk/blob/main/wasi-sdk-pthread.cmake)
provided by wasi-sdk.
* wasi-threads is implemented as a host function in WAMR.
(`WAMR_BUILD_LIB_WASI_THREADS`)
* The ABI is specified in wasi-threads proposal.
You can run the same wasm modules on other runtimes which implement
the proposal. (wasmtime, toywasm, ...)
* Basically more feature-rich and complete than WAMR lib-pthread.
**EXCEPTION**: `pthread_exit` is not available as of writing this.
If `pthread_exit` is important for your use cases, please speak up in
the [GitHub issue](https://github.com/WebAssembly/wasi-threads/issues/7).
**EXCEPTION**: For threads created by `pthread_create`, the AUX stack
(aka C shadow stack) overflow detection mechanism is disabled as of
writing this.
If it's important for your use cases, please speak up in the
[GitHub issue](https://github.com/WebAssembly/wasi-threads/issues/12).
# References
* https://github.com/bytecodealliance/wasm-micro-runtime/issues/1790

View File

@ -1,5 +1,8 @@
# WAMR pthread library
**Note**: This document describes the old pthread implementation.
See [Pthread implementations](pthread_impls.md).
WAMR provides a built-in library to support pthread APIs. You can call pthread APIs in your application source code.
## Build and run

View File

@ -4,11 +4,11 @@
sockets. A socket is an abstract representation of the local endpoint of a
network communication path.
Currently, WAMR supports a limit set of all well-known functions:
`accept()`, `bind()`, `connect()`, `listen()`, `recv()`, `send()`, `shutdown()`
and `socket()`. Users can call those functions in WebAssembly code directly.
Those WebAssembly socket calls will be dispatched to the imported
functions and eventually will be implemented by host socket APIs.
Currently, WAMR supports some Socket API features:
- Support TCP and UDP
- Support IPv4 and IPv6
- Support get/set socket options
- Support access control
This document introduces a way to support the _Berkeley/POSIX Socket API_ in
WebAssembly code.
@ -86,4 +86,4 @@ Similarly to running _iwasm_ outside of an enclave, the allowed address ranges a
$ iwasm --addr-pool=1.2.3.4/15,2.3.4.6/16 socket_example.wasm
```
Refer to [socket api sample](../samples/socket-api) for the compilation of the Wasm applications and [_iwasm_ for Intel SGX](../product-mini/platforms/linux-sgx) for the Wasm runtime.
Refer to [socket api sample](../samples/socket-api) for the compilation of the Wasm applications and [_iwasm_ for Intel SGX](../product-mini/platforms/linux-sgx) for the Wasm runtime.

View File

@ -1,5 +1,9 @@
# WAMR source debugging
References:
- [Blog: WAMR source debugging basic](https://bytecodealliance.github.io/wamr.dev/blog/wamr-source-debugging-basic/)
- [Blog: Debugging wasm with VSCode](https://bytecodealliance.github.io/wamr.dev/blog/debugging-wasm-with-vscode/)
WAMR supports source level debugging based on DWARF (normally used in C/C++/Rust), source map (normally used in AssemblyScript) is not supported.
**The lldb's ability to debug wasm application is based on the patch [Add class WasmProcess for WebAssembly debugging](https://reviews.llvm.org/D78801). Thanks very much to the author @paolosev for such a great work!**
@ -36,16 +40,29 @@ iwasm -g=127.0.0.1:1234 test.wasm
# Use port = 0 to allow a random assigned debug port
```
4. Build customized lldb (assume you have already cloned llvm)
4. Build customized lldb
``` bash
cd ${WAMR_ROOT}/core/deps/llvm
git apply ../../../build-scripts/lldb-wasm.patch
mkdir build-lldb && cd build-lldb
cmake -DCMAKE_BUILD_TYPE:STRING="Release" -DLLVM_ENABLE_PROJECTS="clang;lldb" -DLLVM_TARGETS_TO_BUILD:STRING="X86;WebAssembly" -DLLVM_ENABLE_LIBXML2:BOOL=ON ../llvm
make -j $(nproc)
git clone --branch release/13.x --depth=1 https://github.com/llvm/llvm-project
cd llvm-project
git apply ${WAMR_ROOT}/build-scripts/lldb-wasm.patch
mkdir build-lldb
cmake -S ./llvm -B build-lldb \
-G Ninja \
-DCMAKE_BUILD_TYPE=Release -DLLVM_ENABLE_PROJECTS="clang;lldb" \
-DLLVM_TARGETS_TO_BUILD:STRING="X86;WebAssembly" \
-DCMAKE_EXPORT_COMPILE_COMMANDS=ON -DLLVM_BUILD_BENCHMARKS:BOOL=OFF \
-DLLVM_BUILD_DOCS:BOOL=OFF -DLLVM_BUILD_EXAMPLES:BOOL=OFF \
-DLLVM_BUILD_LLVM_DYLIB:BOOL=OFF -DLLVM_BUILD_TESTS:BOOL=OFF \
-DLLVM_ENABLE_BINDINGS:BOOL=OFF -DLLVM_INCLUDE_BENCHMARKS:BOOL=OFF \
-DLLVM_INCLUDE_DOCS:BOOL=OFF -DLLVM_INCLUDE_EXAMPLES:BOOL=OFF \
-DLLVM_INCLUDE_TESTS:BOOL=OFF -DLLVM_ENABLE_LIBXML2:BOOL=ON
cmake --build build-lldb --target lldb --parallel $(nproc)
# The lldb is generated under build-lldb/bin/lldb
```
> Note: If using `CommandLineTools` on MacOS, make sure only one SDK is present in `/Library/Developer/CommandLineTools/SDKs`.
> You can download pre-built `wamr-lldb` binaries from [here](https://github.com/bytecodealliance/wasm-micro-runtime/releases).
5. Launch customized lldb and connect to iwasm
``` bash
lldb
@ -53,8 +70,6 @@ lldb
```
Then you can use lldb commands to debug your applications. Please refer to [lldb document](https://lldb.llvm.org/use/tutorial.html) for command usage.
> Known issue: `step over` on some function may be treated as `step in`, it will be fixed later.
## Debugging with AOT
> Note: AOT debugging is experimental and only a few debugging capabilities are supported.

View File

@ -1183,9 +1183,10 @@ class BasicTestSuite(unittest.TestCase):
self._wasm_store, module, imports, create_null_pointer(wasm_trap_t)
)
wasm_instance_delete(instance)
wasm_module_delete(module)
self.assertIsNullPointer(instance)
def test_wasm_instance_new_pos(self):
binary = load_module_file(MODULE_BINARY)
module = wasm_module_new(self._wasm_store, binary)
@ -1227,9 +1228,10 @@ class BasicTestSuite(unittest.TestCase):
create_null_pointer(wasm_trap_t),
)
wasm_instance_delete(instance)
wasm_module_delete(module)
self.assertIsNullPointer(instance)
# test those APIs in advanced:
# wasm_instance_delete
# wasm_instance_exports

452
product-mini/README.md Normal file
View File

@ -0,0 +1,452 @@
# Build iwasm
iwasm is the executable binary built with WAMR VMcore supports WASI and command line interface. Refer to [**how to build wamr vmcore**](../doc/build_wamr.md) for all the supported CMAKE compilation variables.
If you are building for ARM architecture on a X86 development machine, you can use the `CMAKE_TOOLCHAIN_FILE` to set the toolchain file for cross compling.
```
cmake .. -DCMAKE_TOOLCHAIN_FILE=$TOOL_CHAIN_FILE \
-DWAMR_BUILD_PLATFORM=linux \
-DWAMR_BUILD_TARGET=ARM
```
Refer to toolchain sample file [`samples/simple/profiles/arm-interp/toolchain.cmake`](../samples/simple/profiles/arm-interp/toolchain.cmake) for how to build mini product for ARM target architecture.
If you compile for ESP-IDF, make sure to set the right toolchain file for the chip you're using (e.g. `$IDF_PATH/tools/cmake/toolchain-esp32c3.cmake`).
Note that all ESP-IDF toolchain files live under `$IDF_PATH/tools/cmake/`.
## Linux
First of all please install the dependent packages.
Run command below in Ubuntu-18.04:
``` Bash
sudo apt install build-essential cmake g++-multilib libgcc-8-dev lib32gcc-8-dev
```
Or in Ubuntu-16.04:
``` Bash
sudo apt install build-essential cmake g++-multilib libgcc-5-dev lib32gcc-5-dev
```
Or in Fedora:
``` Bash
sudo dnf install glibc-devel.i686
```
After installing dependencies, build the source code:
``` Bash
cd product-mini/platforms/linux/
mkdir build && cd build
cmake ..
make
# iwasm is generated under current directory
```
By default in Linux, the `fast interpreter`, `AOT` and `Libc WASI` are enabled, and JIT is disabled.
And the build target is set to X86_64 or X86_32 depending on the platform's bitwidth.
There are total 6 running modes supported: fast interpreter, classi interpreter, AOT, LLVM JIT, Fast JIT and Multi-tier JIT.
(1) To run a wasm file with `fast interpreter` mode - build iwasm with default build and then:
```Bash
iwasm <wasm file>
```
Or
```Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_INTERP=1
make
```
(2) To disable `fast interpreter` and enable `classic interpreter` instead:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_INTERP=0
make
```
(3) To run an AOT file, firstly please refer to [Build wamrc AOT compiler](../wamr-compiler/README.md) to build wamrc, and then:
```Bash
wamrc -o <AOT file> <WASM file>
iwasm <AOT file>
```
(4) To enable the `LLVM JIT` mode, firstly we should build the LLVM library:
``` Bash
cd product-mini/platforms/linux/
./build_llvm.sh (The llvm source code is cloned under <wamr_root_dir>/core/deps/llvm and auto built)
```
Then pass argument `-DWAMR_BUILD_JIT=1` to cmake to enable LLVM JIT:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_JIT=1
make
```
Note:
By default, the LLVM Orc JIT with Lazy compilation is enabled to speedup the lanuching process and reduce
the JIT compilation time by creating backend threads to compile the WASM functions parallely, and for the
main thread, the functions in the module will not be compiled until they are firstly called and haven't been
compiled by the compilation threads.
If developer wants to disable the Lazy compilation, we can:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0
make
```
In which all the WASM functions will be previously compiled before main thread starts to run the wasm module.
(5) To enable the `Fast JIT` mode:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_JIT=1
make
```
The Fast JIT is a lightweight JIT engine with quick startup, small footprint and good portability, and gains ~50% performance of AOT.
(6) To enable the `Multi-tier JIT` mode:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_JTI=1 -DWAMR_BUILD_JIT=1
make
```
The Multi-tier JIT is a two level JIT tier-up engine, which launchs Fast JIT to run the wasm module as soon as possible and creates backend threads to compile the LLVM JIT functions at the same time, and when the LLVM JIT functions are compiled, the runtime will switch the extecution from the Fast JIT jitted code to LLVM JIT jitted code gradually, so as to gain the best performance.
## Linux SGX (Intel Software Guard Extension)
Please see [Build and Port WAMR vmcore for Linux SGX](../doc/linux_sgx.md) for the details.
## MacOS
Make sure to install Xcode from App Store firstly, and install cmake.
If you use Homebrew, install cmake from the command line:
``` Bash
brew install cmake
```
Then build the source codes:
``` Bash
cd product-mini/platforms/darwin/
mkdir build
cd build
cmake ..
make
# iwasm is generated under current directory
```
By default in MacOS, the `fast interpreter`, `AOT` and `Libc WASI` are enabled, and JIT is disabled.
And the build target is set to X86_64 or X86_32 depending on the platform's bitwidth.
To run a wasm file with interpreter mode:
```Bash
iwasm <wasm file>
```
To run an AOT file, firstly please refer to [Build wamrc AOT compiler](../wamr-compiler/README.md) to build wamrc, and then:
```Bash
wamrc -o <AOT file> <WASM file>
iwasm <AOT file>
```
Note:
For how to build the `JIT` mode and `classic interpreter` mode, please refer to [Build iwasm on Linux](../doc/build_wamr.md#linux).
WAMR provides some features which can be easily configured by passing options to cmake, please see [WAMR vmcore cmake building configurations](../doc/build_wamr.md#wamr-vmcore-cmake-building-configurations) for details. Currently in MacOS, interpreter, AOT, and builtin libc are enabled by default.
## Windows
Make sure `MSVC` and `cmake` are installed and available in the command line environment
Then build the source codes:
``` Bash
cd product-mini/platforms/windows/
mkdir build
cd build
cmake ..
cmake --build . --config Release
# ./Release/iwasm.exe is generated
```
By default in Windows, the `fast interpreter`, `AOT` and `Libc WASI` are enabled, and JIT is disabled.
To run a wasm file with interpreter mode:
```Bash
iwasm.exe <wasm file>
```
To run an AOT file, firstly please refer to [Build wamrc AOT compiler](../wamr-compiler/README.md) to build wamrc, and then:
```Bash
wamrc.exe -o <AOT file> <WASM file>
iwasm.exe <AOT file>
```
Note:
For how to build the `JIT` mode and `classic interpreter` mode, please refer to [Build iwasm on Linux](../doc/build_wamr.md#linux).
WAMR provides some features which can be easily configured by passing options to cmake, please see [WAMR vmcore cmake building configurations](../doc/build_wamr.md#wamr-vmcore-cmake-building-configurations) for details. Currently in Windows, interpreter, AOT, and builtin libc are enabled by default.
## MinGW
First make sure the correct CMake package is installed; the following commands
are valid for the MSYS2 build environment:
```Bash
pacman -R cmake
pacman -S mingw-w64-x86_64-cmake
pacman -S mingw-w64-x86_64-gcc
pacman -S make git
```
Then follow the build instructions for Windows above, and add the following
arguments for cmake:
```Bash
cmake .. -G"Unix Makefiles" \
-DWAMR_DISABLE_HW_BOUND_CHECK=1
````
Note that WASI will be disabled until further work is done towards full MinGW support.
- Since memory access boundary check with hardware trap feature is disabled, when generating the AOT file with `wamrc`, the `--bounds-checks=1` flag should be added to generate the memory access boundary check instructions to ensure the sandbox security:
```bash
wamrc --bounds-checks=1 -o <aot_file> <wasm_file>
```
- Compiler complaining about missing `UnwindInfoAddress` field in `RUNTIME_FUNCTION`
struct (winnt.h).
## VxWorks
VxWorks 7 SR0620 release is validated.
First you need to build a VSB. Make sure *UTILS_UNIX* layer is added in the VSB.
After the VSB is built, export the VxWorks toolchain path by:
```bash
export <vsb_dir_path>/host/vx-compiler/bin:$PATH
```
Now switch to iwasm source tree to build the source code:
```bash
cd product-mini/platforms/vxworks/
mkdir build
cd build
cmake ..
make
```
Create a VIP based on the VSB. Make sure the following components are added:
* INCLUDE_POSIX_PTHREADS
* INCLUDE_POSIX_PTHREAD_SCHEDULER
* INCLUDE_SHARED_DATA
* INCLUDE_SHL
Copy the generated iwasm executable, the test WASM binary as well as the needed
shared libraries (libc.so.1, libllvm.so.1 or libgnu.so.1 depending on the VSB,
libunix.so.1) to a supported file system (eg: romfs).
Note:
WAMR provides some features which can be easily configured by passing options to cmake, please see [WAMR vmcore cmake building configurations](../doc/build_wamr.md#wamr-vmcore-cmake-building-configurations) for details. Currently in VxWorks, interpreter and builtin libc are enabled by default.
## Zephyr
You need to prepare Zephyr first as described [here](https://docs.zephyrproject.org/latest/getting_started/index.html#get-zephyr-and-install-python-dependencies).
After that you need to point the `ZEPHYR_BASE` variable to e.g. `~/zephyrproject/zephyr`. Also, it is important that you have `west` available for subsequent actions.
``` Bash
cd <wamr_root_dir>/product-mini/platforms/zephyr/simple
# Execute the ./build_and_run.sh script with board name as parameter. Here take x86 as example:
./build_and_run.sh x86
```
The [Zephyr SDK](https://github.com/zephyrproject-rtos/sdk-ng) provides toolchains for all supported targets. Follow the instructions in the [documentation](https://docs.zephyrproject.org/latest/develop/getting_started/index.html#install-zephyr-sdk) to ensure it is installed and configured correctly.
Note:
WAMR provides some features which can be easily configured by passing options to cmake, please see [WAMR vmcore cmake building configurations](../doc/build_wamr.md#wamr-vmcore-cmake-building-configurations) for details. Currently in Zephyr, interpreter, AOT and builtin libc are enabled by default.
## RT-Thread
1. Get rt-thread [system codes](https://github.com/RT-Thread/rt-thread).
2. Enable WAMR software package with menuconfig tool which provided by RT-Thread.
* Environment in Linux, run command below:
```bash
scons --menuconfig
```
* Environment in Windows ConEmu, run command below:
```bash
menuconfig
```
Select and enable `WAMR` in:
* RT-Thread online packages
* tools packages
* WebAssembly Micro Runtime (WAMR)
3. Configure `WAMR` with menuconfig tool.
you can choice features of iwasm below:
* Enable testing parameters of iwasm
* Enable interpreter Mode / Fast interpreter Mode
* Use built-libc
* Enable AOT
4. Exit menuconfig tool and save configure, update and download package.
```bash
pkgs --update
```
5. build project and download the binary to boards.
```bash
scons
```
or build project with 8-thread by using command below:
```bash
scons -j8
```
after project building, you can got an binary file named `rtthread.bin`, then you can download this file to the MCU board.
## Android
able to generate a shared library support Android platform.
- need an [android SDK](https://developer.android.com/studio). Go and get the "Command line tools only"
- look for a command named *sdkmanager* and download below components. version numbers might need to check and pick others
- "build-tools;29.0.3"
- "cmake;3.10.2.4988404"
- "ndk;latest"
- "patcher;v4"
- "platform-tools"
- "platforms;android-29"
- add bin/ of the downloaded cmake to $PATH
- export ANDROID_HOME=/the/path/of/downloaded/sdk/
- export ANDROID_NDK_LATEST_HOME=/the/path/of/downloaded/sdk/ndk/2x.xxx/
- ready to go
Use such commands, you are able to compile with default configurations. Any compiling requirement should be satisfied by modifying product-mini/platforms/android/CMakeList.txt. For example, chaning ${WAMR_BUILD_TARGET} in CMakeList could get different libraries support different ABIs.
``` shell
$ cd product-mini/platforms/android/
$ mkdir build
$ cd build
$ cmake ..
$ make
$ # check output in distribution/wasm
$ # include/ includes all necesary head files
$ # lib includes libiwasm.so
```
## NuttX
WAMR is intergrated with NuttX, just enable the WAMR in Kconfig option (Application Configuration/Interpreters).
## ESP-IDF
WAMR integrates with ESP-IDF both for the XTENSA and RISC-V chips (esp32x and esp32c3 respectively).
In order to use this, you need at least version 4.3.1 of ESP-IDF.
If you don't have it installed, follow the instructions [here](https://docs.espressif.com/projects/esp-idf/en/latest/esp32/get-started/#get-started-get-prerequisites).
ESP-IDF also installs the toolchains needed for compiling WAMR and ESP-IDF.
A small demonstration of how to use WAMR and ESP-IDF can be found under [product_mini](./platforms/esp-idf).
The demo builds WAMR for ESP-IDF and runs a small wasm program.
In order to run it for your specific Espressif chip, edit the [build_and_run.sh](./platforms/esp-idf/build_and_run.sh) file and put the correct toolchain file (see #Cross-compilation) and `IDF_TARGET`.
Before compiling it is also necessary to call ESP-IDF's `export.sh` script to bring all compile time relevant information in scope.
## Docker
[Docker](https://www.docker.com/) will download all the dependencies and build WAMR Core on your behalf.
Make sure you have Docker installed on your machine: [macOS](https://docs.docker.com/docker-for-mac/install/), [Windows](https://docs.docker.com/docker-for-windows/install/) or [Linux](https://docs.docker.com/install/linux/docker-ce/ubuntu/).
Build *iwasm* with the Docker image:
``` Bash
$ cd ci
$ ./build_wamr.sh
$ ls ../build_out/
```
*build_wamr.sh* will generate *linux* compatible libraries ( libiwasm.so and
libvmlib.a ) and an executable binary (*iwasm*) and copy *iwasm* to
*build_out*. All original generated files are still under
*product-mini/platforms/linux/build*.
## FreeBSD
First, install the dependent packages:
```shell
sudo pkg install gcc cmake wget
```
Then you can run the following commands to build iwasm with default configurations:
```shell
cd product-mini/platforms/freebsd
mkdir build && cd build
cmake ..
make
```
## AliOS-Things
1. a developerkit board id needed for testing
2. download the AliOS-Things code
``` Bash
git clone https://github.com/alibaba/AliOS-Things.git
```
3. copy <wamr_root_dir>/product-mini/platforms/alios-things directory to AliOS-Things/middleware, and rename it as iwasm
``` Bash
cp -a <wamr_root_dir>/product-mini/platforms/alios-things middleware/iwasm
```
4. create a link to <wamr_root_dir> in middleware/iwasm/ and rename it to wamr
``` Bash
ln -s <wamr_root_dir> middleware/iwasm/wamr
```
5. modify file app/example/helloworld/helloworld.c, patch as:
``` C
#include <stdbool.h>
#include <aos/kernel.h>
extern bool iwasm_init();
int application_start(int argc, char *argv[])
{
int count = 0;
iwasm_init();
...
}
```
6. modify file app/example/helloworld/aos.mk
``` C
$(NAME)_COMPONENTS := osal_aos iwasm
```
7. build source code and run
For linux host:
``` Bash
aos make helloworld@linuxhost -c config
aos make
./out/helloworld@linuxhost/binary/helloworld@linuxhost.elf
```
For developerkit:
Modify file middleware/iwasm/aos.mk, patch as:
``` C
WAMR_BUILD_TARGET := THUMBV7M
```
``` Bash
aos make helloworld@developerkit -c config
aos make
```
download the binary to developerkit board, check the output from serial port

View File

@ -3,4 +3,5 @@
# Copyright (C) 2020 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
/usr/bin/env python3 -m pip install --user -r ../../../build-scripts/requirements.txt
/usr/bin/env python3 ../../../build-scripts/build_llvm.py --platform android "$@"

View File

@ -3,4 +3,5 @@
# Copyright (C) 2020 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
/usr/bin/env python3 -m pip install --user -r ../../../build-scripts/requirements.txt
/usr/bin/env python3 ../../../build-scripts/build_llvm.py --platform darwin "$@"

View File

@ -3,4 +3,5 @@
# Copyright (C) 2020 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
/usr/bin/env python3 -m pip install --user -r ../../../build-scripts/requirements.txt
/usr/bin/env python3 ../../../build-scripts/build_llvm.py "$@"

View File

@ -16,6 +16,7 @@ set (CMAKE_SHARED_LIBRARY_LINK_C_FLAGS "")
set (CMAKE_SHARED_LIBRARY_LINK_CXX_FLAGS "")
set (CMAKE_C_STANDARD 99)
set (CMAKE_CXX_STANDARD 14)
# Set WAMR_BUILD_TARGET, currently values supported:
# "X86_64", "AMD_64", "X86_32", "AARCH64[sub]", "ARM[sub]", "THUMB[sub]",

Some files were not shown because too many files have changed in this diff Show More