Import WAMR Fast JIT (#1343)

Import WAMR Fast JIT which is a lightweight JIT with quick startup, small footprint,
relatively good performance (~40% to ~50% of LLVM JIT) and good portability.

Platforms supported: Linux, MacOS and Linux SGX.
Arch supported: x86-64.
This commit is contained in:
Wenyong Huang 2022-08-02 16:03:50 +08:00 committed by GitHub
parent 1c6d10095e
commit bf28030993
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
68 changed files with 22563 additions and 65 deletions

View File

@ -454,7 +454,7 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
test_option: [$DEFAULT_TEST_OPTIONS, $SIMD_TEST_OPTIONS]
test_option: [$DEFAULT_TEST_OPTIONS]
steps:
- name: checkout
uses: actions/checkout@v3
@ -488,7 +488,13 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
test_option: [$MULTI_MODULES_TEST_OPTIONS, $THREADS_TEST_OPTIONS]
running_mode: ["classic-interp", "fast-interp", "jit", "aot"]
test_option:
[
$MULTI_MODULES_TEST_OPTIONS,
$SIMD_TEST_OPTIONS,
$THREADS_TEST_OPTIONS,
]
steps:
- name: checkout
uses: actions/checkout@v3
@ -513,7 +519,7 @@ jobs:
run: sudo apt install -y ninja-build
- name: run spec tests
run: ./test_wamr.sh ${{ matrix.test_option }}
run: ./test_wamr.sh ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
working-directory: ./tests/wamr-test-suites
spec_test_x86_32:
@ -522,6 +528,7 @@ jobs:
runs-on: ubuntu-20.04
strategy:
matrix:
running_mode: ["classic-interp", "fast-interp", "jit", "aot"]
test_option: [$DEFAULT_TEST_OPTIONS, $THREADS_TEST_OPTIONS]
steps:
- name: checkout
@ -553,5 +560,5 @@ jobs:
sudo apt install -y g++-multilib lib32gcc-9-dev ninja-build
- name: run spec tests
run: ./test_wamr.sh ${{ env.X86_32_TARGET_TEST_OPTIONS }} ${{ matrix.test_option }}
run: ./test_wamr.sh ${{ env.X86_32_TARGET_TEST_OPTIONS }} ${{ matrix.test_option }} -t ${{ matrix.running_mode }}
working-directory: ./tests/wamr-test-suites

2
.gitignore vendored
View File

@ -26,3 +26,5 @@ tests/wamr-test-suites/workspace
!/test-tools/wamr-ide/VSCode-Extension/.vscode
samples/socket-api/wasm-src/inc/pthread.h
**/__pycache__

View File

@ -13,6 +13,8 @@ WAMR project reused some components from other open source project:
- **WebAssembly debugging patch for LLDB**: for extending the ability of LLDB to support wasm debugging
- **libuv**: for the WASI Libc with uvwasi implementation
- **uvwasi**: for the WASI Libc with uvwasi implementation
- **asmjit**: for the Fast JIT x86-64 codegen implementation
- **zydis**: for the Fast JIT x86-64 codegen implementation
The WAMR fast interpreter is a clean room development. We would acknowledge the inspirations by [WASM3](https://github.com/wasm3/wasm3) open source project for the approach of pre-calculated oprand stack location.
@ -29,6 +31,8 @@ The WAMR fast interpreter is a clean room development. We would acknowledge the
| WebAssembly debugging patch for LLDB | unspecified | unspecified | https://reviews.llvm.org/D78801 | |
| libuv | v1.42.0 | v1.44.1 | https://github.com/libuv/libuv | https://www.cvedetails.com/vendor/15402/Libuv-Project.html |
| uvwasi | unspecified | v0.0.12 | https://github.com/nodejs/uvwasi | |
| asmjit | unspecified | unspecified | https://github.com/asmjit/asmjit | |
| zydis | unspecified | e14a07895136182a5b53e181eec3b1c6e0b434de | https://github.com/zyantific/zydis | |
## Licenses
@ -79,3 +83,9 @@ The WAMR fast interpreter is a clean room development. We would acknowledge the
### uvwasi
[LICENSE](./core/iwasm/libraries/libc-uvwasi/LICENSE_UVWASI)
### asmjit
[LICENSE](./core/iwasm/fast-jit/cg/LICENSE_ASMJIT)
### zydis
[LICENSE](./core/iwasm/fast-jit/cg/LICENSE_ZYDIS)

View File

@ -7,7 +7,7 @@ WebAssembly Micro Runtime
[BA]: https://bytecodealliance.org/
WebAssembly Micro Runtime (WAMR) is a lightweight standalone WebAssembly (WASM) runtime with small footprint, high performance and highly configurable features for applications cross from embedded, IoT, edge to Trusted Execution Environment (TEE), smart contract, cloud native and so on. It includes a few parts as below:
- The [**"iwasm" VM core**](./README.md#iwasm-vm-core) to run WASM applications, supporting interpreter mode, AOT mode (Ahead-of-Time compilation) and JIT mode (Just-in-Time compilation)
- The [**"iwasm" VM core**](./README.md#iwasm-vm-core) to run WASM applications, supporting interpreter mode, AOT mode (Ahead-of-Time compilation) and JIT modes (Just-in-Time compilation, LLVM JIT and Fast JIT are supported)
- The [**"wamrc" AOT compiler**](./README.md#build-wamrc-aot-compiler) to compile WASM file into AOT file for best performance and smaller runtime footprint, which is run by "iwasm" VM Core

View File

@ -138,6 +138,8 @@ if (WAMR_BUILD_JIT EQUAL 1)
else ()
message (" WAMR LLVM MC JIT enabled")
endif ()
elseif (WAMR_BUILD_FAST_JIT EQUAL 1)
message (" WAMR Fast JIT enabled")
else ()
message (" WAMR JIT disabled")
endif ()

View File

@ -50,7 +50,11 @@ if (NOT DEFINED WAMR_BUILD_TARGET)
endif ()
################ optional according to settings ################
if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1)
if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1
OR WAMR_BUILD_FAST_JIT EQUAL 1)
if (WAMR_BUILD_FAST_JIT EQUAL 1)
set (WAMR_BUILD_FAST_INTERP 0)
endif ()
include (${IWASM_DIR}/interpreter/iwasm_interp.cmake)
endif ()
@ -61,6 +65,10 @@ if (WAMR_BUILD_AOT EQUAL 1)
endif ()
endif ()
if (NOT WAMR_BUILD_JIT EQUAL 1 AND WAMR_BUILD_FAST_JIT EQUAL 1)
include (${IWASM_DIR}/fast-jit/iwasm_fast_jit.cmake)
endif ()
if (WAMR_BUILD_APP_FRAMEWORK EQUAL 1)
include (${APP_FRAMEWORK_DIR}/app_framework.cmake)
include (${SHARED_DIR}/coap/lib_coap.cmake)
@ -139,6 +147,7 @@ set (source_all
${IWASM_INTERP_SOURCE}
${IWASM_AOT_SOURCE}
${IWASM_COMPL_SOURCE}
${IWASM_FAST_JIT_SOURCE}
${WASM_APP_LIB_SOURCE_ALL}
${NATIVE_INTERFACE_SOURCE}
${APP_MGR_SOURCE}

View File

@ -94,6 +94,18 @@
#define WASM_ENABLE_LAZY_JIT 0
#endif
#ifndef WASM_ENABLE_FAST_JIT
#define WASM_ENABLE_FAST_JIT 0
#endif
#ifndef WASM_ENABLE_FAST_JIT_DUMP
#define WASM_ENABLE_FAST_JIT_DUMP 0
#endif
#ifndef FAST_JIT_DEFAULT_CODE_CACHE_SIZE
#define FAST_JIT_DEFAULT_CODE_CACHE_SIZE 10 * 1024 * 1024
#endif
#ifndef WASM_ENABLE_WAMR_COMPILER
#define WASM_ENABLE_WAMR_COMPILER 0
#endif

View File

@ -73,6 +73,7 @@ wasm_exec_env_create_internal(struct WASMModuleInstanceCommon *module_inst,
#if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_exec_env_mem_consumption(exec_env);
#endif
return exec_env;
#if WASM_ENABLE_THREAD_MGR != 0

View File

@ -84,6 +84,17 @@ typedef struct WASMExecEnv {
void **native_symbol;
#endif
#if WASM_ENABLE_FAST_JIT != 0
/**
* Cache for
* - jit native operations in 32-bit target which hasn't 64-bit
* int/float registers, mainly for the operations of double and int64,
* such as F64TOI64, F32TOI64, I64 MUL/REM, and so on.
* - SSE instructions.
**/
uint64 jit_cache[2];
#endif
#if WASM_ENABLE_THREAD_MGR != 0
/* thread return value */
void *thread_ret_value;

View File

@ -27,6 +27,9 @@
#if WASM_ENABLE_SHARED_MEMORY != 0
#include "wasm_shared_memory.h"
#endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#endif
#include "../common/wasm_c_api_internal.h"
/**
@ -117,6 +120,10 @@ runtime_malloc(uint64 size, WASMModuleInstanceCommon *module_inst,
return mem;
}
#if WASM_ENABLE_FAST_JIT != 0
static JitCompOptions jit_options = { 0 };
#endif
#ifdef OS_ENABLE_HW_BOUND_CHECK
/* The exec_env of thread local storage, set before calling function
and used in signal handler, as we cannot get it from the argument
@ -259,8 +266,20 @@ wasm_runtime_env_init()
}
#endif
#if WASM_ENABLE_FAST_JIT != 0
if (!jit_compiler_init(&jit_options)) {
goto fail9;
}
#endif
return true;
#if WASM_ENABLE_FAST_JIT != 0
fail9:
#if WASM_ENABLE_REF_TYPES != 0
wasm_externref_map_destroy();
#endif
#endif
#if WASM_ENABLE_REF_TYPES != 0
fail8:
#endif
@ -321,6 +340,10 @@ wasm_runtime_init()
void
wasm_runtime_destroy()
{
#if WASM_ENABLE_FAST_JIT != 0
jit_compiler_destroy();
#endif
#if WASM_ENABLE_REF_TYPES != 0
wasm_externref_map_destroy();
#endif
@ -368,6 +391,10 @@ wasm_runtime_full_init(RuntimeInitArgs *init_args)
&init_args->mem_alloc_option))
return false;
#if WASM_ENABLE_FAST_JIT != 0
jit_options.code_cache_size = init_args->fast_jit_code_cache_size;
#endif
if (!wasm_runtime_env_init()) {
wasm_runtime_memory_destroy();
return false;

View File

@ -0,0 +1,42 @@
diff --git a/src/asmjit/core/cpuinfo.cpp b/src/asmjit/core/cpuinfo.cpp
index 7bf7407..ae2160b 100644
--- a/src/asmjit/core/cpuinfo.cpp
+++ b/src/asmjit/core/cpuinfo.cpp
@@ -9,13 +9,13 @@
#if !defined(_WIN32)
#include <errno.h>
- #include <sys/utsname.h>
+ //#include <sys/utsname.h>
#include <unistd.h>
#endif
// Required by `getauxval()` on Linux.
#if defined(__linux__)
- #include <sys/auxv.h>
+ //#include <sys/auxv.h>
#endif
//! Required to detect CPU and features on Apple platforms.
diff --git a/src/asmjit/core/globals.cpp b/src/asmjit/core/globals.cpp
index 2bbd0c0..e6b69e5 100644
--- a/src/asmjit/core/globals.cpp
+++ b/src/asmjit/core/globals.cpp
@@ -105,6 +105,8 @@ ASMJIT_FAVOR_SIZE const char* DebugUtils::errorAsString(Error err) noexcept {
#endif
}
+extern "C" int os_printf(const char *message, ...);
+
// DebugUtils - Debug Output
// =========================
@@ -112,7 +114,7 @@ ASMJIT_FAVOR_SIZE void DebugUtils::debugOutput(const char* str) noexcept {
#if defined(_WIN32)
::OutputDebugStringA(str);
#else
- ::fputs(str, stderr);
+ os_printf(str);
#endif
}

View File

@ -0,0 +1,17 @@
Copyright (c) 2008-2020 The AsmJit Authors
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

View File

@ -0,0 +1,23 @@
The MIT License (MIT)
Copyright (c) 2014-2021 Florian Bernd
Copyright (c) 2014-2021 Joel Höner
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,345 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_compare.h"
#include "jit_emit_function.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
static bool
jit_compile_op_compare_integer(JitCompContext *cc, IntCond cond, bool is64Bit)
{
JitReg lhs, rhs, res, const_zero, const_one;
if (cond < INT_EQZ || cond > INT_GE_U) {
jit_set_last_error(cc, "unsupported comparation operation");
goto fail;
}
res = jit_cc_new_reg_I32(cc);
const_zero = NEW_CONST(I32, 0);
const_one = NEW_CONST(I32, 1);
if (is64Bit) {
if (INT_EQZ == cond) {
rhs = NEW_CONST(I64, 0);
}
else {
POP_I64(rhs);
}
POP_I64(lhs);
}
else {
if (INT_EQZ == cond) {
rhs = NEW_CONST(I32, 0);
}
else {
POP_I32(rhs);
}
POP_I32(lhs);
}
GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
switch (cond) {
case INT_EQ:
case INT_EQZ:
{
GEN_INSN(SELECTEQ, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_NE:
{
GEN_INSN(SELECTNE, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_LT_S:
{
GEN_INSN(SELECTLTS, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_LT_U:
{
GEN_INSN(SELECTLTU, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_GT_S:
{
GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_GT_U:
{
GEN_INSN(SELECTGTU, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_LE_S:
{
GEN_INSN(SELECTLES, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_LE_U:
{
GEN_INSN(SELECTLEU, res, cc->cmp_reg, const_one, const_zero);
break;
}
case INT_GE_S:
{
GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
break;
}
default: /* INT_GE_U */
{
GEN_INSN(SELECTGEU, res, cc->cmp_reg, const_one, const_zero);
break;
}
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond)
{
return jit_compile_op_compare_integer(cc, cond, false);
}
bool
jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond)
{
return jit_compile_op_compare_integer(cc, cond, true);
}
static int32
float_cmp_eq(float f1, float f2)
{
if (isnan(f1) || isnan(f2))
return 0;
return f1 == f2;
}
static int32
float_cmp_ne(float f1, float f2)
{
if (isnan(f1) || isnan(f2))
return 1;
return f1 != f2;
}
static int32
double_cmp_eq(double d1, double d2)
{
if (isnan(d1) || isnan(d2))
return 0;
return d1 == d2;
}
static int32
double_cmp_ne(double d1, double d2)
{
if (isnan(d1) || isnan(d2))
return 1;
return d1 != d2;
}
static bool
jit_compile_op_compare_float_point(JitCompContext *cc, FloatCond cond,
JitReg lhs, JitReg rhs)
{
JitReg res, args[2], const_zero, const_one;
JitRegKind kind;
void *func;
if (cond == FLOAT_EQ || cond == FLOAT_NE) {
kind = jit_reg_kind(lhs);
if (cond == FLOAT_EQ)
func = (kind == JIT_REG_KIND_F32) ? (void *)float_cmp_eq
: (void *)double_cmp_eq;
else
func = (kind == JIT_REG_KIND_F32) ? (void *)float_cmp_ne
: (void *)double_cmp_ne;
res = jit_cc_new_reg_I32(cc);
args[0] = lhs;
args[1] = rhs;
if (!jit_emit_callnative(cc, func, res, args, 2)) {
goto fail;
}
}
else {
res = jit_cc_new_reg_I32(cc);
const_zero = NEW_CONST(I32, 0);
const_one = NEW_CONST(I32, 1);
switch (cond) {
case FLOAT_LT:
{
GEN_INSN(CMP, cc->cmp_reg, rhs, lhs);
GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
break;
}
case FLOAT_GT:
{
GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
GEN_INSN(SELECTGTS, res, cc->cmp_reg, const_one, const_zero);
break;
}
case FLOAT_LE:
{
GEN_INSN(CMP, cc->cmp_reg, rhs, lhs);
GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
break;
}
case FLOAT_GE:
{
GEN_INSN(CMP, cc->cmp_reg, lhs, rhs);
GEN_INSN(SELECTGES, res, cc->cmp_reg, const_one, const_zero);
break;
}
default:
{
bh_assert(!"unknown FloatCond");
goto fail;
}
}
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond)
{
JitReg res, const_zero, const_one;
JitReg lhs, rhs;
POP_F32(rhs);
POP_F32(lhs);
if (jit_reg_is_const_val(lhs) && jit_reg_is_const_val(rhs)) {
float32 lvalue = jit_cc_get_const_F32(cc, lhs);
float32 rvalue = jit_cc_get_const_F32(cc, rhs);
const_zero = NEW_CONST(I32, 0);
const_one = NEW_CONST(I32, 1);
switch (cond) {
case FLOAT_EQ:
{
res = (lvalue == rvalue) ? const_one : const_zero;
break;
}
case FLOAT_NE:
{
res = (lvalue != rvalue) ? const_one : const_zero;
break;
}
case FLOAT_LT:
{
res = (lvalue < rvalue) ? const_one : const_zero;
break;
}
case FLOAT_GT:
{
res = (lvalue > rvalue) ? const_one : const_zero;
break;
}
case FLOAT_LE:
{
res = (lvalue <= rvalue) ? const_one : const_zero;
break;
}
case FLOAT_GE:
{
res = (lvalue >= rvalue) ? const_one : const_zero;
break;
}
default:
{
bh_assert(!"unknown FloatCond");
goto fail;
}
}
PUSH_I32(res);
return true;
}
return jit_compile_op_compare_float_point(cc, cond, lhs, rhs);
fail:
return false;
}
bool
jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond)
{
JitReg res, const_zero, const_one;
JitReg lhs, rhs;
POP_F64(rhs);
POP_F64(lhs);
if (jit_reg_is_const_val(lhs) && jit_reg_is_const_val(rhs)) {
float64 lvalue = jit_cc_get_const_F64(cc, lhs);
float64 rvalue = jit_cc_get_const_F64(cc, rhs);
const_zero = NEW_CONST(I32, 0);
const_one = NEW_CONST(I32, 1);
switch (cond) {
case FLOAT_EQ:
{
res = (lvalue == rvalue) ? const_one : const_zero;
break;
}
case FLOAT_NE:
{
res = (lvalue != rvalue) ? const_one : const_zero;
break;
}
case FLOAT_LT:
{
res = (lvalue < rvalue) ? const_one : const_zero;
break;
}
case FLOAT_GT:
{
res = (lvalue > rvalue) ? const_one : const_zero;
break;
}
case FLOAT_LE:
{
res = (lvalue <= rvalue) ? const_one : const_zero;
break;
}
case FLOAT_GE:
{
res = (lvalue >= rvalue) ? const_one : const_zero;
break;
}
default:
{
bh_assert(!"unknown FloatCond");
goto fail;
}
}
PUSH_I32(res);
return true;
}
return jit_compile_op_compare_float_point(cc, cond, lhs, rhs);
fail:
return false;
}

View File

@ -0,0 +1,32 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_COMPARE_H_
#define _JIT_EMIT_COMPARE_H_
#include "../jit_compiler.h"
#include "../jit_frontend.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond);
bool
jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond);
bool
jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond);
bool
jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_COMPARE_H_ */

View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_const.h"
#include "../jit_frontend.h"
bool
jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const)
{
JitReg value = NEW_CONST(I32, i32_const);
PUSH_I32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const)
{
JitReg value = NEW_CONST(I64, i64_const);
PUSH_I64(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const)
{
JitReg value = NEW_CONST(F32, f32_const);
PUSH_F32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const)
{
JitReg value = NEW_CONST(F64, f64_const);
PUSH_F64(value);
return true;
fail:
return false;
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONST_H_
#define _JIT_EMIT_CONST_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const);
bool
jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const);
bool
jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const);
bool
jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONST_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,56 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONTROL_H_
#define _JIT_EMIT_CONTROL_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_block(JitCompContext *cc, uint8 **p_frame_ip,
uint8 *frame_ip_end, uint32 label_type, uint32 param_count,
uint8 *param_types, uint32 result_count,
uint8 *result_types, bool merge_cmp_and_if);
bool
jit_compile_op_else(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_end(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip);
bool
jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth,
bool merge_cmp_and_br_if, uint8 **p_frame_ip);
bool
jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count,
uint8 **p_frame_ip);
bool
jit_compile_op_return(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_unreachable(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip);
#if WASM_ENABLE_THREAD_MGR != 0
bool
jit_check_suspend_flags(JitCompContext *cc);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONTROL_H_ */

View File

@ -0,0 +1,660 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_conversion.h"
#include "jit_emit_exception.h"
#include "jit_emit_function.h"
#include "../jit_codegen.h"
#include "../jit_frontend.h"
#define F32_I32_S_MIN (-2147483904.0f)
#define F32_I32_S_MAX (2147483648.0f)
#define F32_I32_U_MIN (-1.0f)
#define F32_I32_U_MAX (4294967296.0f)
#define F32_I64_S_MIN (-9223373136366403584.0f)
#define F32_I64_S_MAX (9223372036854775808.0f)
#define F32_I64_U_MIN (-1.0f)
#define F32_I64_U_MAX (18446744073709551616.0f)
#define F64_I32_S_MIN (-2147483649.0)
#define F64_I32_S_MAX (2147483648.0)
#define F64_I32_U_MIN (-1.0)
#define F64_I32_U_MAX (4294967296.0)
#define F64_I64_S_MIN (-9223372036854777856.0)
#define F64_I64_S_MAX (9223372036854775808.0)
#define F64_I64_U_MIN (-1.0)
#define F64_I64_U_MAX (18446744073709551616.0)
#define FP_TO_INT(f_ty, i_ty, f_nm, i_nm) \
static i_ty i_nm##_trunc_##f_nm(f_ty fp)
#define INT_TO_FP(i_ty, f_ty, i_nm, f_nm) \
static f_ty f_nm##_convert_##i_nm(i_ty i)
#define FP_TO_INT_SAT(f_ty, i_ty, f_nm, i_nm) \
static i_ty i_nm##_trunc_##f_nm##_sat(f_ty fp)
static int
local_isnan(double x)
{
return isnan(x);
}
static int
local_isnanf(float x)
{
return isnan(x);
}
#define RETURN_IF_NANF(fp) \
if (local_isnanf(fp)) { \
return 0; \
}
#define RETURN_IF_NAN(fp) \
if (local_isnan(fp)) { \
return 0; \
}
#define RETURN_IF_INF(fp, i_min, i_max) \
if (isinf(fp)) { \
return fp < 0 ? i_min : i_max; \
}
#define RETURN_IF_MIN(fp, f_min, i_min) \
if (fp <= f_min) { \
return i_min; \
}
#define RETURN_IF_MAX(fp, f_max, i_max) \
if (fp >= f_max) { \
return i_max; \
}
FP_TO_INT_SAT(float, int32, f32, i32)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, INT32_MIN, INT32_MAX)
RETURN_IF_MIN(fp, F32_I32_S_MIN, INT32_MIN)
RETURN_IF_MAX(fp, F32_I32_S_MAX, INT32_MAX)
return (int32)fp;
}
FP_TO_INT_SAT(float, uint32, f32, u32)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, 0, UINT32_MAX)
RETURN_IF_MIN(fp, F32_I32_U_MIN, 0)
RETURN_IF_MAX(fp, F32_I32_U_MAX, UINT32_MAX)
return (uint32)fp;
}
FP_TO_INT_SAT(double, int32, f64, i32)
{
RETURN_IF_NAN(fp)
RETURN_IF_INF(fp, INT32_MIN, INT32_MAX)
RETURN_IF_MIN(fp, F64_I32_S_MIN, INT32_MIN)
RETURN_IF_MAX(fp, F64_I32_S_MAX, INT32_MAX)
return (int32)fp;
}
FP_TO_INT_SAT(double, uint32, f64, u32)
{
RETURN_IF_NAN(fp)
RETURN_IF_INF(fp, 0, UINT32_MAX)
RETURN_IF_MIN(fp, F64_I32_U_MIN, 0)
RETURN_IF_MAX(fp, F64_I32_U_MAX, UINT32_MAX)
return (uint32)fp;
}
FP_TO_INT_SAT(float, int64, f32, i64)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, INT64_MIN, INT64_MAX)
RETURN_IF_MIN(fp, F32_I64_S_MIN, INT64_MIN)
RETURN_IF_MAX(fp, F32_I64_S_MAX, INT64_MAX)
return (int64)fp;
}
FP_TO_INT(float, uint64, f32, u64)
{
return (uint64)fp;
}
FP_TO_INT_SAT(float, uint64, f32, u64)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, 0, UINT64_MAX)
RETURN_IF_MIN(fp, F32_I64_U_MIN, 0)
RETURN_IF_MAX(fp, F32_I64_U_MAX, UINT64_MAX)
return (uint64)fp;
}
FP_TO_INT_SAT(double, int64, f64, i64)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, INT64_MIN, INT64_MAX)
RETURN_IF_MIN(fp, F64_I64_S_MIN, INT64_MIN)
RETURN_IF_MAX(fp, F64_I64_S_MAX, INT64_MAX)
return (int64)fp;
}
FP_TO_INT(double, uint64, f64, u64)
{
return (uint64)fp;
}
FP_TO_INT_SAT(double, uint64, f64, u64)
{
RETURN_IF_NANF(fp)
RETURN_IF_INF(fp, 0, UINT64_MAX)
RETURN_IF_MIN(fp, F64_I64_U_MIN, 0)
RETURN_IF_MAX(fp, F64_I64_U_MAX, UINT64_MAX)
return (uint64)fp;
}
INT_TO_FP(uint64, float, u64, f32)
{
return (float)i;
}
INT_TO_FP(uint64, double, u64, f64)
{
return (double)i;
}
bool
jit_compile_op_i32_wrap_i64(JitCompContext *cc)
{
JitReg num, res;
POP_I64(num);
res = jit_cc_new_reg_I32(cc);
GEN_INSN(I64TOI32, res, num);
PUSH_I32(res);
return true;
fail:
return false;
}
static bool
jit_compile_check_value_range(JitCompContext *cc, JitReg value, JitReg min_fp,
JitReg max_fp)
{
JitReg nan_ret = jit_cc_new_reg_I32(cc);
JitRegKind kind = jit_reg_kind(value);
bool emit_ret = false;
bh_assert(JIT_REG_KIND_F32 == kind || JIT_REG_KIND_F64 == kind);
/* If value is NaN, throw exception */
if (JIT_REG_KIND_F32 == kind)
emit_ret = jit_emit_callnative(cc, local_isnanf, nan_ret, &value, 1);
else
emit_ret = jit_emit_callnative(cc, local_isnan, nan_ret, &value, 1);
if (!emit_ret)
goto fail;
GEN_INSN(CMP, cc->cmp_reg, nan_ret, NEW_CONST(I32, 1));
if (!jit_emit_exception(cc, JIT_EXCE_INVALID_CONVERSION_TO_INTEGER,
JIT_OP_BEQ, cc->cmp_reg, NULL))
goto fail;
/* If value is out of integer range, throw exception */
GEN_INSN(CMP, cc->cmp_reg, min_fp, value);
if (!jit_emit_exception(cc, JIT_EXCE_INTEGER_OVERFLOW, JIT_OP_BGES,
cc->cmp_reg, NULL))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, value, max_fp);
if (!jit_emit_exception(cc, JIT_EXCE_INTEGER_OVERFLOW, JIT_OP_BGES,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
bool
jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool sat)
{
JitReg value, res;
POP_F32(value);
res = jit_cc_new_reg_I32(cc);
if (!sat) {
JitReg min_fp = NEW_CONST(F32, sign ? F32_I32_S_MIN : F32_I32_U_MIN);
JitReg max_fp = NEW_CONST(F32, sign ? F32_I32_S_MAX : F32_I32_U_MAX);
if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
goto fail;
if (sign)
GEN_INSN(F32TOI32, res, value);
else
GEN_INSN(F32TOU32, res, value);
}
else {
if (!jit_emit_callnative(cc,
sign ? (void *)i32_trunc_f32_sat
: (void *)u32_trunc_f32_sat,
res, &value, 1))
goto fail;
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool sat)
{
JitReg value, res;
POP_F64(value);
res = jit_cc_new_reg_I32(cc);
if (!sat) {
JitReg min_fp = NEW_CONST(F64, sign ? F64_I32_S_MIN : F64_I32_U_MIN);
JitReg max_fp = NEW_CONST(F64, sign ? F64_I32_S_MAX : F64_I32_U_MAX);
if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
goto fail;
if (sign)
GEN_INSN(F64TOI32, res, value);
else
GEN_INSN(F64TOU32, res, value);
}
else {
if (!jit_emit_callnative(cc,
sign ? (void *)i32_trunc_f64_sat
: (void *)u32_trunc_f64_sat,
res, &value, 1))
goto fail;
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_extend_i32(JitCompContext *cc, bool sign)
{
JitReg num, res;
POP_I32(num);
res = jit_cc_new_reg_I64(cc);
if (sign)
GEN_INSN(I32TOI64, res, num);
else
GEN_INSN(U32TOI64, res, num);
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_extend_i64(JitCompContext *cc, int8 bitwidth)
{
JitReg value, tmp, res;
POP_I64(value);
tmp = jit_cc_new_reg_I32(cc);
res = jit_cc_new_reg_I64(cc);
switch (bitwidth) {
case 8:
{
GEN_INSN(I64TOI8, tmp, value);
GEN_INSN(I8TOI64, res, tmp);
break;
}
case 16:
{
GEN_INSN(I64TOI16, tmp, value);
GEN_INSN(I16TOI64, res, tmp);
break;
}
case 32:
{
GEN_INSN(I64TOI32, tmp, value);
GEN_INSN(I32TOI64, res, tmp);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_extend_i32(JitCompContext *cc, int8 bitwidth)
{
JitReg value, tmp, res;
POP_I32(value);
tmp = jit_cc_new_reg_I32(cc);
res = jit_cc_new_reg_I32(cc);
switch (bitwidth) {
case 8:
{
GEN_INSN(I32TOI8, tmp, value);
GEN_INSN(I8TOI32, res, tmp);
break;
}
case 16:
{
GEN_INSN(I32TOI16, tmp, value);
GEN_INSN(I16TOI32, res, tmp);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool sat)
{
JitReg value, res;
POP_F32(value);
res = jit_cc_new_reg_I64(cc);
if (!sat) {
JitReg min_fp = NEW_CONST(F32, sign ? F32_I64_S_MIN : F32_I64_U_MIN);
JitReg max_fp = NEW_CONST(F32, sign ? F32_I64_S_MAX : F32_I64_U_MAX);
if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
goto fail;
if (sign) {
GEN_INSN(F32TOI64, res, value);
}
else {
if (!jit_emit_callnative(cc, u64_trunc_f32, res, &value, 1))
goto fail;
}
}
else {
if (!jit_emit_callnative(cc,
sign ? (void *)i64_trunc_f32_sat
: (void *)u64_trunc_f32_sat,
res, &value, 1))
goto fail;
}
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool sat)
{
JitReg value, res;
POP_F64(value);
res = jit_cc_new_reg_I64(cc);
if (!sat) {
JitReg min_fp = NEW_CONST(F64, sign ? F64_I64_S_MIN : F64_I64_U_MIN);
JitReg max_fp = NEW_CONST(F64, sign ? F64_I64_S_MAX : F64_I64_U_MAX);
if (!jit_compile_check_value_range(cc, value, min_fp, max_fp))
goto fail;
if (sign) {
GEN_INSN(F64TOI64, res, value);
}
else {
if (!jit_emit_callnative(cc, u64_trunc_f64, res, &value, 1))
goto fail;
}
}
else {
if (!jit_emit_callnative(cc,
sign ? (void *)i64_trunc_f64_sat
: (void *)u64_trunc_f64_sat,
res, &value, 1))
goto fail;
}
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_convert_i32(JitCompContext *cc, bool sign)
{
JitReg value, res;
POP_I32(value);
res = jit_cc_new_reg_F32(cc);
if (sign) {
GEN_INSN(I32TOF32, res, value);
}
else {
GEN_INSN(U32TOF32, res, value);
}
PUSH_F32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_convert_i64(JitCompContext *cc, bool sign)
{
JitReg value, res;
POP_I64(value);
res = jit_cc_new_reg_F32(cc);
if (sign) {
GEN_INSN(I64TOF32, res, value);
}
else {
if (!jit_emit_callnative(cc, f32_convert_u64, res, &value, 1)) {
goto fail;
}
}
PUSH_F32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_demote_f64(JitCompContext *cc)
{
JitReg value, res;
POP_F64(value);
res = jit_cc_new_reg_F32(cc);
GEN_INSN(F64TOF32, res, value);
PUSH_F32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_convert_i32(JitCompContext *cc, bool sign)
{
JitReg value, res;
POP_I32(value);
res = jit_cc_new_reg_F64(cc);
if (sign)
GEN_INSN(I32TOF64, res, value);
else
GEN_INSN(U32TOF64, res, value);
PUSH_F64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_convert_i64(JitCompContext *cc, bool sign)
{
JitReg value, res;
POP_I64(value);
res = jit_cc_new_reg_F64(cc);
if (sign) {
GEN_INSN(I64TOF64, res, value);
}
else {
if (!jit_emit_callnative(cc, f64_convert_u64, res, &value, 1)) {
goto fail;
}
}
PUSH_F64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_promote_f32(JitCompContext *cc)
{
JitReg value, res;
POP_F32(value);
res = jit_cc_new_reg_F64(cc);
GEN_INSN(F32TOF64, res, value);
PUSH_F64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_reinterpret_f64(JitCompContext *cc)
{
JitReg value, res;
POP_F64(value);
res = jit_cc_new_reg_I64(cc);
GEN_INSN(F64CASTI64, res, value);
PUSH_I64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_reinterpret_f32(JitCompContext *cc)
{
JitReg value, res;
POP_F32(value);
res = jit_cc_new_reg_I32(cc);
GEN_INSN(F32CASTI32, res, value);
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_reinterpret_i64(JitCompContext *cc)
{
JitReg value, res;
POP_I64(value);
res = jit_cc_new_reg_F64(cc);
GEN_INSN(I64CASTF64, res, value);
PUSH_F64(res);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_reinterpret_i32(JitCompContext *cc)
{
JitReg value, res;
POP_I32(value);
res = jit_cc_new_reg_F32(cc);
GEN_INSN(I32CASTF32, res, value);
PUSH_F32(res);
return true;
fail:
return false;
}

View File

@ -0,0 +1,73 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONVERSION_H_
#define _JIT_EMIT_CONVERSION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_wrap_i64(JitCompContext *cc);
bool
jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool sat);
bool
jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool sat);
bool
jit_compile_op_i64_extend_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_i64_extend_i64(JitCompContext *comp_ctx, int8 bitwidth);
bool
jit_compile_op_i32_extend_i32(JitCompContext *comp_ctx, int8 bitwidth);
bool
jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool sat);
bool
jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool sat);
bool
jit_compile_op_f32_convert_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f32_convert_i64(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f32_demote_f64(JitCompContext *comp_ctx);
bool
jit_compile_op_f64_convert_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f64_convert_i64(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f64_promote_f32(JitCompContext *comp_ctx);
bool
jit_compile_op_i64_reinterpret_f64(JitCompContext *comp_ctx);
bool
jit_compile_op_i32_reinterpret_f32(JitCompContext *comp_ctx);
bool
jit_compile_op_f64_reinterpret_i64(JitCompContext *comp_ctx);
bool
jit_compile_op_f32_reinterpret_i32(JitCompContext *comp_ctx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONVERSION_H_ */

View File

@ -0,0 +1,78 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_exception.h"
#include "../jit_frontend.h"
bool
jit_emit_exception(JitCompContext *cc, int32 exception_id, uint8 jit_opcode,
JitReg cond_br_if, JitBasicBlock *cond_br_else_block)
{
JitInsn *insn = NULL;
JitIncomingInsn *incoming_insn;
JitReg else_label;
bh_assert(exception_id < JIT_EXCE_NUM);
if (jit_opcode >= JIT_OP_BEQ && jit_opcode <= JIT_OP_BLEU) {
bh_assert(cond_br_if == cc->cmp_reg);
else_label =
cond_br_else_block ? jit_basic_block_label(cond_br_else_block) : 0;
switch (jit_opcode) {
case JIT_OP_BEQ:
insn = GEN_INSN(BEQ, cond_br_if, 0, else_label);
break;
case JIT_OP_BNE:
insn = GEN_INSN(BNE, cond_br_if, 0, else_label);
break;
case JIT_OP_BGTS:
insn = GEN_INSN(BGTS, cond_br_if, 0, else_label);
break;
case JIT_OP_BGES:
insn = GEN_INSN(BGES, cond_br_if, 0, else_label);
break;
case JIT_OP_BLTS:
insn = GEN_INSN(BLTS, cond_br_if, 0, else_label);
break;
case JIT_OP_BLES:
insn = GEN_INSN(BLES, cond_br_if, 0, else_label);
break;
case JIT_OP_BGTU:
insn = GEN_INSN(BGTU, cond_br_if, 0, else_label);
break;
case JIT_OP_BGEU:
insn = GEN_INSN(BGEU, cond_br_if, 0, else_label);
break;
case JIT_OP_BLTU:
insn = GEN_INSN(BLTU, cond_br_if, 0, else_label);
break;
case JIT_OP_BLEU:
insn = GEN_INSN(BLEU, cond_br_if, 0, else_label);
break;
}
if (!insn) {
jit_set_last_error(cc, "generate cond br insn failed");
return false;
}
}
else if (jit_opcode == JIT_OP_JMP) {
insn = GEN_INSN(JMP, 0);
if (!insn) {
jit_set_last_error(cc, "generate jmp insn failed");
return false;
}
}
incoming_insn = jit_calloc(sizeof(JitIncomingInsn));
if (!incoming_insn) {
jit_set_last_error(cc, "allocate memory failed");
return false;
}
incoming_insn->insn = insn;
incoming_insn->next = cc->incoming_insns_for_exec_bbs[exception_id];
cc->incoming_insns_for_exec_bbs[exception_id] = incoming_insn;
return true;
}

View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_EXCEPTION_H_
#define _JIT_EMIT_EXCEPTION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_emit_exception(JitCompContext *cc, int32 exception_id, uint8 jit_opcode,
JitReg cond_br_if, JitBasicBlock *cond_br_else_block);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_EXCEPTION_H_ */

View File

@ -0,0 +1,535 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_function.h"
#include "jit_emit_exception.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h"
extern bool
jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx,
WASMInterpFrame *prev_frame);
/* Prepare parameters for the function to call */
static bool
pre_call(JitCompContext *cc, const WASMType *func_type)
{
JitReg value;
uint32 i, outs_off;
/* Prepare parameters for the function to call */
outs_off =
cc->total_frame_size + offsetof(WASMInterpFrame, lp)
+ wasm_get_cell_num(func_type->types, func_type->param_count) * 4;
for (i = 0; i < func_type->param_count; i++) {
switch (func_type->types[func_type->param_count - 1 - i]) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
POP_I32(value);
outs_off -= 4;
GEN_INSN(STI32, value, cc->fp_reg, NEW_CONST(I32, outs_off));
break;
case VALUE_TYPE_I64:
POP_I64(value);
outs_off -= 8;
GEN_INSN(STI64, value, cc->fp_reg, NEW_CONST(I32, outs_off));
break;
case VALUE_TYPE_F32:
POP_F32(value);
outs_off -= 4;
GEN_INSN(STF32, value, cc->fp_reg, NEW_CONST(I32, outs_off));
break;
case VALUE_TYPE_F64:
POP_F64(value);
outs_off -= 8;
GEN_INSN(STF64, value, cc->fp_reg, NEW_CONST(I32, outs_off));
break;
default:
bh_assert(0);
goto fail;
}
}
/* Commit sp as the callee may use it to store the results */
gen_commit_sp_ip(cc->jit_frame);
return true;
fail:
return false;
}
/* Push results */
static bool
post_return(JitCompContext *cc, const WASMType *func_type, JitReg first_res)
{
uint32 i, n;
JitReg value;
n = cc->jit_frame->sp - cc->jit_frame->lp;
for (i = 0; i < func_type->result_count; i++) {
switch (func_type->types[func_type->param_count + i]) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
if (i == 0 && first_res) {
bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_I32);
value = first_res;
}
else {
value = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, value, cc->fp_reg,
NEW_CONST(I32, offset_of_local(n)));
}
PUSH_I32(value);
n++;
break;
case VALUE_TYPE_I64:
if (i == 0 && first_res) {
bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_I64);
value = first_res;
}
else {
value = jit_cc_new_reg_I64(cc);
GEN_INSN(LDI64, value, cc->fp_reg,
NEW_CONST(I32, offset_of_local(n)));
}
PUSH_I64(value);
n += 2;
break;
case VALUE_TYPE_F32:
if (i == 0 && first_res) {
bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_F32);
value = first_res;
}
else {
value = jit_cc_new_reg_F32(cc);
GEN_INSN(LDF32, value, cc->fp_reg,
NEW_CONST(I32, offset_of_local(n)));
}
PUSH_F32(value);
n++;
break;
case VALUE_TYPE_F64:
if (i == 0 && first_res) {
bh_assert(jit_reg_kind(first_res) == JIT_REG_KIND_F64);
value = first_res;
}
else {
value = jit_cc_new_reg_F64(cc);
GEN_INSN(LDF64, value, cc->fp_reg,
NEW_CONST(I32, offset_of_local(n)));
}
PUSH_F64(value);
n += 2;
break;
default:
bh_assert(0);
goto fail;
}
}
/* Update the committed_sp as the callee has updated the frame sp */
cc->jit_frame->committed_sp = cc->jit_frame->sp;
return true;
fail:
return false;
}
bool
jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
{
WASMModule *wasm_module = cc->cur_wasm_module;
WASMFunctionImport *func_import;
WASMFunction *func;
WASMType *func_type;
JitFrame *jit_frame = cc->jit_frame;
JitReg native_ret;
JitReg fast_jit_func_ptrs, jitted_code = 0;
uint32 jitted_func_idx;
if (func_idx >= wasm_module->import_function_count) {
fast_jit_func_ptrs = get_fast_jit_func_ptrs_reg(jit_frame);
jitted_code = jit_cc_new_reg_ptr(cc);
/* jitted_code = func_ptrs[func_idx - import_function_count] */
jitted_func_idx = func_idx - wasm_module->import_function_count;
GEN_INSN(LDPTR, jitted_code, fast_jit_func_ptrs,
NEW_CONST(I32, (uint32)sizeof(void *) * jitted_func_idx));
}
if (func_idx < wasm_module->import_function_count) {
func_import = &wasm_module->import_functions[func_idx].u.function;
func_type = func_import->func_type;
}
else {
func = wasm_module
->functions[func_idx - wasm_module->import_function_count];
func_type = func->func_type;
}
if (!pre_call(cc, func_type)) {
goto fail;
}
if (func_idx < wasm_module->import_function_count) {
JitReg arg_regs[3];
native_ret = jit_cc_new_reg_I32(cc);
arg_regs[0] = cc->exec_env_reg;
arg_regs[1] = NEW_CONST(I32, func_idx);
arg_regs[2] = cc->fp_reg;
if (!jit_emit_callnative(cc, jit_invoke_native, native_ret, arg_regs,
3)) {
return false;
}
/* Convert bool to uint32 */
GEN_INSN(AND, native_ret, native_ret, NEW_CONST(I32, 0xFF));
/* Check whether there is exception thrown */
GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BEQ,
cc->cmp_reg, NULL)) {
return false;
}
if (!post_return(cc, func_type, 0)) {
goto fail;
}
}
else {
JitReg res = 0;
if (func_type->result_count > 0) {
switch (func_type->types[func_type->param_count]) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
res = jit_cc_new_reg_I32(cc);
break;
case VALUE_TYPE_I64:
res = jit_cc_new_reg_I64(cc);
break;
case VALUE_TYPE_F32:
res = jit_cc_new_reg_F32(cc);
break;
case VALUE_TYPE_F64:
res = jit_cc_new_reg_F64(cc);
break;
default:
bh_assert(0);
goto fail;
}
}
GEN_INSN(CALLBC, res, 0, jitted_code);
if (!post_return(cc, func_type, res)) {
goto fail;
}
}
/* Clear part of memory regs and table regs as their values
may be changed in the function call */
if (cc->cur_wasm_module->possible_memory_grow)
clear_memory_regs(jit_frame);
clear_table_regs(jit_frame);
/* Ignore tail call currently */
(void)tail_call;
return true;
fail:
return false;
}
static JitReg
pack_argv(JitCompContext *cc)
{
/* reuse the stack of the next frame */
uint32 stack_base;
JitReg argv;
stack_base = cc->total_frame_size + offsetof(WASMInterpFrame, lp);
argv = jit_cc_new_reg_ptr(cc);
GEN_INSN(ADD, argv, cc->fp_reg, NEW_CONST(PTR, stack_base));
return argv;
}
static bool
unpack_argv(JitCompContext *cc, const WASMType *func_type, JitReg argv)
{
uint32 i, offset_by_cell = 0;
JitReg value;
/* push results in argv to stack */
for (i = 0; i < func_type->result_count; i++) {
switch (func_type->types[func_type->param_count + i]) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
{
value = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, value, argv, NEW_CONST(I32, offset_by_cell));
PUSH_I32(value);
offset_by_cell += 4;
break;
}
case VALUE_TYPE_I64:
{
value = jit_cc_new_reg_I64(cc);
GEN_INSN(LDI64, value, argv, NEW_CONST(I32, offset_by_cell));
PUSH_I64(value);
offset_by_cell += 8;
break;
}
case VALUE_TYPE_F32:
{
value = jit_cc_new_reg_F32(cc);
GEN_INSN(LDF32, value, argv, NEW_CONST(I32, offset_by_cell));
PUSH_F32(value);
offset_by_cell += 4;
break;
}
case VALUE_TYPE_F64:
{
value = jit_cc_new_reg_F64(cc);
GEN_INSN(LDF64, value, argv, NEW_CONST(I32, offset_by_cell));
PUSH_F64(value);
offset_by_cell += 8;
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
}
/* Update the committed_sp as the callee has updated the frame sp */
cc->jit_frame->committed_sp = cc->jit_frame->sp;
return true;
fail:
return false;
}
bool
jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
uint32 tbl_idx)
{
JitReg elem_idx, native_ret, argv, arg_regs[6];
WASMType *func_type;
POP_I32(elem_idx);
func_type = cc->cur_wasm_module->types[type_idx];
if (!pre_call(cc, func_type)) {
goto fail;
}
argv = pack_argv(cc);
native_ret = jit_cc_new_reg_I32(cc);
arg_regs[0] = cc->exec_env_reg;
arg_regs[1] = NEW_CONST(I32, tbl_idx);
arg_regs[2] = elem_idx;
arg_regs[3] = NEW_CONST(I32, type_idx);
arg_regs[4] = NEW_CONST(I32, func_type->param_cell_num);
arg_regs[5] = argv;
if (!jit_emit_callnative(cc, jit_call_indirect, native_ret, arg_regs, 6)) {
return false;
}
/* Convert bool to uint32 */
GEN_INSN(AND, native_ret, native_ret, NEW_CONST(I32, 0xFF));
/* Check whether there is exception thrown */
GEN_INSN(CMP, cc->cmp_reg, native_ret, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BEQ,
cc->cmp_reg, NULL)) {
return false;
}
if (!unpack_argv(cc, func_type, argv)) {
goto fail;
}
/* Clear part of memory regs and table regs as their values
may be changed in the function call */
if (cc->cur_wasm_module->possible_memory_grow)
clear_memory_regs(cc->jit_frame);
clear_table_regs(cc->jit_frame);
return true;
fail:
return false;
}
#if WASM_ENABLE_REF_TYPES != 0
bool
jit_compile_op_ref_null(JitCompContext *cc, uint32 ref_type)
{
PUSH_I32(NEW_CONST(I32, NULL_REF));
(void)ref_type;
return true;
fail:
return false;
}
bool
jit_compile_op_ref_is_null(JitCompContext *cc)
{
JitReg ref, res;
POP_I32(ref);
GEN_INSN(CMP, cc->cmp_reg, ref, NEW_CONST(I32, NULL_REF));
res = jit_cc_new_reg_I32(cc);
GEN_INSN(SELECTEQ, res, cc->cmp_reg, NEW_CONST(I32, 1), NEW_CONST(I32, 0));
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx)
{
PUSH_I32(NEW_CONST(I32, func_idx));
return true;
fail:
return false;
}
#endif
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count)
{
JitInsn *insn;
char *i64_arg_names[] = { "rdi", "rsi", "rdx", "rcx", "r8", "r9" };
char *f32_arg_names[] = { "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5" };
char *f64_arg_names[] = { "xmm0_f64", "xmm1_f64", "xmm2_f64",
"xmm3_f64", "xmm4_f64", "xmm5_f64" };
JitReg i64_arg_regs[6], f32_arg_regs[6], f64_arg_regs[6], res_hreg = 0;
JitReg eax_hreg = jit_codegen_get_hreg_by_name("eax");
JitReg rax_hreg = jit_codegen_get_hreg_by_name("rax");
JitReg xmm0_hreg = jit_codegen_get_hreg_by_name("xmm0");
JitReg xmm0_f64_hreg = jit_codegen_get_hreg_by_name("xmm0_f64");
uint32 i, i64_reg_idx, float_reg_idx;
bh_assert(param_count <= 6);
for (i = 0; i < 6; i++) {
i64_arg_regs[i] = jit_codegen_get_hreg_by_name(i64_arg_names[i]);
f32_arg_regs[i] = jit_codegen_get_hreg_by_name(f32_arg_names[i]);
f64_arg_regs[i] = jit_codegen_get_hreg_by_name(f64_arg_names[i]);
}
i64_reg_idx = float_reg_idx = 0;
for (i = 0; i < param_count; i++) {
switch (jit_reg_kind(params[i])) {
case JIT_REG_KIND_I32:
GEN_INSN(I32TOI64, i64_arg_regs[i64_reg_idx++], params[i]);
break;
case JIT_REG_KIND_I64:
GEN_INSN(MOV, i64_arg_regs[i64_reg_idx++], params[i]);
break;
case JIT_REG_KIND_F32:
GEN_INSN(MOV, f32_arg_regs[float_reg_idx++], params[i]);
break;
case JIT_REG_KIND_F64:
GEN_INSN(MOV, f64_arg_regs[float_reg_idx++], params[i]);
break;
default:
bh_assert(0);
return false;
}
}
if (res) {
switch (jit_reg_kind(res)) {
case JIT_REG_KIND_I32:
res_hreg = eax_hreg;
break;
case JIT_REG_KIND_I64:
res_hreg = rax_hreg;
break;
case JIT_REG_KIND_F32:
res_hreg = xmm0_hreg;
break;
case JIT_REG_KIND_F64:
res_hreg = xmm0_f64_hreg;
break;
default:
bh_assert(0);
return false;
}
}
insn = GEN_INSN(CALLNATIVE, res_hreg,
NEW_CONST(PTR, (uintptr_t)native_func), param_count);
if (!insn) {
return false;
}
i64_reg_idx = float_reg_idx = 0;
for (i = 0; i < param_count; i++) {
switch (jit_reg_kind(params[i])) {
case JIT_REG_KIND_I32:
case JIT_REG_KIND_I64:
*(jit_insn_opndv(insn, i + 2)) = i64_arg_regs[i64_reg_idx++];
break;
case JIT_REG_KIND_F32:
*(jit_insn_opndv(insn, i + 2)) = f32_arg_regs[float_reg_idx++];
break;
case JIT_REG_KIND_F64:
*(jit_insn_opndv(insn, i + 2)) = f64_arg_regs[float_reg_idx++];
break;
default:
bh_assert(0);
return false;
}
}
if (res) {
GEN_INSN(MOV, res, res_hreg);
}
return true;
}
#else
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count)
{
JitInsn *insn;
uint32 i;
bh_assert(param_count <= 6);
insn = GEN_INSN(CALLNATIVE, res, NEW_CONST(PTR, (uintptr_t)native_func),
param_count);
if (!insn)
return false;
for (i = 0; i < param_count; i++) {
*(jit_insn_opndv(insn, i + 2)) = params[i];
}
return true;
}
#endif

View File

@ -0,0 +1,39 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_FUNCTION_H_
#define _JIT_EMIT_FUNCTION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call);
bool
jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
uint32 tbl_idx);
bool
jit_compile_op_ref_null(JitCompContext *cc, uint32 ref_type);
bool
jit_compile_op_ref_is_null(JitCompContext *cc);
bool
jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx);
bool
jit_emit_callnative(JitCompContext *cc, void *native_func, JitReg res,
JitReg *params, uint32 param_count);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_FUNCTION_H_ */

View File

@ -0,0 +1,782 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_memory.h"
#include "jit_emit_exception.h"
#include "jit_emit_function.h"
#include "../jit_frontend.h"
#include "../jit_codegen.h"
#include "../../interpreter/wasm_runtime.h"
#ifndef OS_ENABLE_HW_BOUND_CHECK
static JitReg
get_memory_boundary(JitCompContext *cc, uint32 mem_idx, uint32 bytes)
{
JitReg memory_boundary;
switch (bytes) {
case 1:
{
memory_boundary =
get_mem_bound_check_1byte_reg(cc->jit_frame, mem_idx);
break;
}
case 2:
{
memory_boundary =
get_mem_bound_check_2bytes_reg(cc->jit_frame, mem_idx);
break;
}
case 4:
{
memory_boundary =
get_mem_bound_check_4bytes_reg(cc->jit_frame, mem_idx);
break;
}
case 8:
{
memory_boundary =
get_mem_bound_check_8bytes_reg(cc->jit_frame, mem_idx);
break;
}
case 16:
{
memory_boundary =
get_mem_bound_check_16bytes_reg(cc->jit_frame, mem_idx);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
return memory_boundary;
fail:
return 0;
}
#endif
#if UINTPTR_MAX == UINT64_MAX
static JitReg
check_and_seek_on_64bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
JitReg memory_boundary)
{
JitReg long_addr, offset1;
/* long_addr = (int64_t)addr */
long_addr = jit_cc_new_reg_I64(cc);
GEN_INSN(U32TOI64, long_addr, addr);
/* offset1 = offset + long_addr */
offset1 = jit_cc_new_reg_I64(cc);
GEN_INSN(ADD, offset1, offset, long_addr);
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* if (offset1 > memory_boundary) goto EXCEPTION */
GEN_INSN(CMP, cc->cmp_reg, offset1, memory_boundary);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_OP_BGTU, cc->cmp_reg, NULL)) {
goto fail;
}
#endif
return offset1;
#ifndef OS_ENABLE_HW_BOUND_CHECK
fail:
return 0;
#endif
}
#else
static JitReg
check_and_seek_on_32bit_platform(JitCompContext *cc, JitReg addr, JitReg offset,
JitReg memory_boundary)
{
JitReg offset1;
/* offset1 = offset + addr */
offset1 = jit_cc_new_reg_I32(cc);
GEN_INSN(ADD, offset1, offset, addr);
/* if (offset1 < addr) goto EXCEPTION */
GEN_INSN(CMP, cc->cmp_reg, offset1, addr);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_OP_BLTU, cc->cmp_reg, NULL)) {
goto fail;
}
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* if (offset1 > memory_boundary) goto EXCEPTION */
GEN_INSN(CMP, cc->cmp_reg, offset1, memory_boundary);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_OP_BGTU, cc->cmp_reg, NULL)) {
goto fail;
}
#endif
return offset1;
fail:
return 0;
}
#endif
static JitReg
check_and_seek(JitCompContext *cc, JitReg addr, uint32 offset, uint32 bytes)
{
JitReg memory_boundary = 0, offset1;
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* the default memory */
uint32 mem_idx = 0;
#endif
#ifndef OS_ENABLE_HW_BOUND_CHECK
/* ---------- check ---------- */
/* 1. shortcut if the memory size is 0 */
if (0 == cc->cur_wasm_module->memories[mem_idx].init_page_count) {
JitReg memory_inst, cur_mem_page_count;
/* if (cur_mem_page_count == 0) goto EXCEPTION */
memory_inst = get_memory_inst_reg(cc->jit_frame, mem_idx);
cur_mem_page_count = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, cur_mem_page_count, memory_inst,
NEW_CONST(I32, offsetof(WASMMemoryInstance, cur_page_count)));
GEN_INSN(CMP, cc->cmp_reg, cur_mem_page_count, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_OP_BEQ, cc->cmp_reg, NULL)) {
goto fail;
}
}
/* 2. a complete boundary check */
memory_boundary = get_memory_boundary(cc, mem_idx, bytes);
if (!memory_boundary)
goto fail;
#endif
#if UINTPTR_MAX == UINT64_MAX
offset1 = check_and_seek_on_64bit_platform(cc, addr, NEW_CONST(I64, offset),
memory_boundary);
if (!offset1)
goto fail;
#else
offset1 = check_and_seek_on_32bit_platform(cc, addr, NEW_CONST(I32, offset),
memory_boundary);
if (!offset1)
goto fail;
#endif
return offset1;
fail:
return 0;
}
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
JitReg addr, offset1, value, memory_data;
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
value = jit_cc_new_reg_I32(cc);
switch (bytes) {
case 1:
{
if (sign) {
GEN_INSN(LDI8, value, memory_data, offset1);
}
else {
GEN_INSN(LDU8, value, memory_data, offset1);
}
break;
}
case 2:
{
if (sign) {
GEN_INSN(LDI16, value, memory_data, offset1);
}
else {
GEN_INSN(LDU16, value, memory_data, offset1);
}
break;
}
case 4:
{
if (sign) {
GEN_INSN(LDI32, value, memory_data, offset1);
}
else {
GEN_INSN(LDU32, value, memory_data, offset1);
}
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
PUSH_I32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
JitReg addr, offset1, value, memory_data;
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
value = jit_cc_new_reg_I64(cc);
switch (bytes) {
case 1:
{
if (sign) {
GEN_INSN(LDI8, value, memory_data, offset1);
}
else {
GEN_INSN(LDU8, value, memory_data, offset1);
}
break;
}
case 2:
{
if (sign) {
GEN_INSN(LDI16, value, memory_data, offset1);
}
else {
GEN_INSN(LDU16, value, memory_data, offset1);
}
break;
}
case 4:
{
if (sign) {
GEN_INSN(LDI32, value, memory_data, offset1);
}
else {
GEN_INSN(LDU32, value, memory_data, offset1);
}
break;
}
case 8:
{
if (sign) {
GEN_INSN(LDI64, value, memory_data, offset1);
}
else {
GEN_INSN(LDU64, value, memory_data, offset1);
}
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
PUSH_I64(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset)
{
JitReg addr, offset1, value, memory_data;
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, 4);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
value = jit_cc_new_reg_F32(cc);
GEN_INSN(LDF32, value, memory_data, offset1);
PUSH_F32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset)
{
JitReg addr, offset1, value, memory_data;
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, 8);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
value = jit_cc_new_reg_F64(cc);
GEN_INSN(LDF64, value, memory_data, offset1);
PUSH_F64(value);
return true;
fail:
return false;
}
bool
jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
JitReg value, addr, offset1, memory_data;
POP_I32(value);
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
switch (bytes) {
case 1:
{
GEN_INSN(STI8, value, memory_data, offset1);
break;
}
case 2:
{
GEN_INSN(STI16, value, memory_data, offset1);
break;
}
case 4:
{
GEN_INSN(STI32, value, memory_data, offset1);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
return true;
fail:
return false;
}
bool
jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
JitReg value, addr, offset1, memory_data;
POP_I64(value);
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, bytes);
if (!offset1) {
goto fail;
}
if (jit_reg_is_const(value) && bytes < 8) {
value = NEW_CONST(I32, (int32)jit_cc_get_const_I64(cc, value));
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
switch (bytes) {
case 1:
{
GEN_INSN(STI8, value, memory_data, offset1);
break;
}
case 2:
{
GEN_INSN(STI16, value, memory_data, offset1);
break;
}
case 4:
{
GEN_INSN(STI32, value, memory_data, offset1);
break;
}
case 8:
{
GEN_INSN(STI64, value, memory_data, offset1);
break;
}
default:
{
bh_assert(0);
goto fail;
}
}
return true;
fail:
return false;
}
bool
jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset)
{
JitReg value, addr, offset1, memory_data;
POP_F32(value);
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, 4);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
GEN_INSN(STF32, value, memory_data, offset1);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset)
{
JitReg value, addr, offset1, memory_data;
POP_F64(value);
POP_I32(addr);
offset1 = check_and_seek(cc, addr, offset, 8);
if (!offset1) {
goto fail;
}
memory_data = get_memory_data_reg(cc->jit_frame, 0);
GEN_INSN(STF64, value, memory_data, offset1);
return true;
fail:
return false;
}
bool
jit_compile_op_memory_size(JitCompContext *cc, uint32 mem_idx)
{
JitReg mem_inst, res;
mem_inst = get_memory_inst_reg(cc->jit_frame, mem_idx);
res = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, res, mem_inst,
NEW_CONST(I32, offsetof(WASMMemoryInstance, cur_page_count)));
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_memory_grow(JitCompContext *cc, uint32 mem_idx)
{
JitReg memory_inst, grow_res, res;
JitReg prev_page_count, inc_page_count, args[2];
/* Get current page count */
memory_inst = get_memory_inst_reg(cc->jit_frame, mem_idx);
prev_page_count = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, prev_page_count, memory_inst,
NEW_CONST(I32, offsetof(WASMMemoryInstance, cur_page_count)));
/* Call wasm_enlarge_memory */
POP_I32(inc_page_count);
grow_res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = inc_page_count;
if (!jit_emit_callnative(cc, wasm_enlarge_memory, grow_res, args, 2)) {
goto fail;
}
/* Convert bool to uint32 */
GEN_INSN(AND, grow_res, grow_res, NEW_CONST(I32, 0xFF));
/* return different values according to memory.grow result */
res = jit_cc_new_reg_I32(cc);
GEN_INSN(CMP, cc->cmp_reg, grow_res, NEW_CONST(I32, 0));
GEN_INSN(SELECTNE, res, cc->cmp_reg, prev_page_count,
NEW_CONST(I32, (int32)-1));
PUSH_I32(res);
/* Ensure a refresh in next get memory related registers */
clear_memory_regs(cc->jit_frame);
return true;
fail:
return false;
}
#if WASM_ENABLE_BULK_MEMORY != 0
static int
wasm_init_memory(WASMModuleInstance *inst, uint32 mem_idx, uint32 seg_idx,
uint32 len, uint32 mem_offset, uint32 data_offset)
{
WASMMemoryInstance *mem_inst;
WASMDataSeg *data_segment;
uint32 mem_size;
uint8 *mem_addr, *data_addr;
/* if d + n > the length of mem.data */
mem_inst = inst->memories[mem_idx];
mem_size = mem_inst->cur_page_count * mem_inst->num_bytes_per_page;
if (mem_size < mem_offset || mem_size - mem_offset < len)
goto out_of_bounds;
/* if s + n > the length of data.data */
bh_assert(seg_idx < inst->module->data_seg_count);
data_segment = inst->module->data_segments[seg_idx];
if (data_segment->data_length < data_offset
|| data_segment->data_length - data_offset < len)
goto out_of_bounds;
mem_addr = mem_inst->memory_data + mem_offset;
data_addr = data_segment->data + data_offset;
bh_memcpy_s(mem_addr, mem_size - mem_offset, data_addr, len);
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds memory access");
return -1;
}
bool
jit_compile_op_memory_init(JitCompContext *cc, uint32 mem_idx, uint32 seg_idx)
{
JitReg len, mem_offset, data_offset, res;
JitReg args[6] = { 0 };
POP_I32(len);
POP_I32(data_offset);
POP_I32(mem_offset);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, mem_idx);
args[2] = NEW_CONST(I32, seg_idx);
args[3] = len;
args[4] = mem_offset;
args[5] = data_offset;
if (!jit_emit_callnative(cc, wasm_init_memory, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
bool
jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_idx)
{
JitReg module = get_module_reg(cc->jit_frame);
JitReg data_segments = jit_cc_new_reg_ptr(cc);
JitReg data_segment = jit_cc_new_reg_ptr(cc);
GEN_INSN(LDPTR, data_segments, module,
NEW_CONST(I32, offsetof(WASMModule, data_segments)));
GEN_INSN(LDPTR, data_segment, data_segments,
NEW_CONST(I32, seg_idx * sizeof(WASMDataSeg *)));
GEN_INSN(STI32, NEW_CONST(I32, 0), data_segment,
NEW_CONST(I32, offsetof(WASMDataSeg, data_length)));
return true;
}
static int
wasm_copy_memory(WASMModuleInstance *inst, uint32 src_mem_idx,
uint32 dst_mem_idx, uint32 len, uint32 src_offset,
uint32 dst_offset)
{
WASMMemoryInstance *src_mem, *dst_mem;
uint32 src_mem_size, dst_mem_size;
uint8 *src_addr, *dst_addr;
src_mem = inst->memories[src_mem_idx];
dst_mem = inst->memories[dst_mem_idx];
src_mem_size = src_mem->cur_page_count * src_mem->num_bytes_per_page;
dst_mem_size = dst_mem->cur_page_count * dst_mem->num_bytes_per_page;
/* if s + n > the length of mem.data */
if (src_mem_size < src_offset || src_mem_size - src_offset < len)
goto out_of_bounds;
/* if d + n > the length of mem.data */
if (dst_mem_size < dst_offset || dst_mem_size - dst_offset < len)
goto out_of_bounds;
src_addr = src_mem->memory_data + src_offset;
dst_addr = dst_mem->memory_data + dst_offset;
/* allowing the destination and source to overlap */
bh_memmove_s(dst_addr, dst_mem_size - dst_offset, src_addr, len);
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds memory access");
return -1;
}
bool
jit_compile_op_memory_copy(JitCompContext *cc, uint32 src_mem_idx,
uint32 dst_mem_idx)
{
JitReg len, src, dst, res;
JitReg args[6] = { 0 };
POP_I32(len);
POP_I32(src);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, src_mem_idx);
args[2] = NEW_CONST(I32, dst_mem_idx);
args[3] = len;
args[4] = src;
args[5] = dst;
if (!jit_emit_callnative(cc, wasm_copy_memory, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
static int
wasm_fill_memory(WASMModuleInstance *inst, uint32 mem_idx, uint32 len,
uint32 val, uint32 dst)
{
WASMMemoryInstance *mem_inst;
uint32 mem_size;
uint8 *dst_addr;
mem_inst = inst->memories[mem_idx];
mem_size = mem_inst->cur_page_count * mem_inst->num_bytes_per_page;
if (mem_size < dst || mem_size - dst < len)
goto out_of_bounds;
dst_addr = mem_inst->memory_data + dst;
memset(dst_addr, val, len);
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds memory access");
return -1;
}
bool
jit_compile_op_memory_fill(JitCompContext *cc, uint32 mem_idx)
{
JitReg res, len, val, dst;
JitReg args[5] = { 0 };
POP_I32(len);
POP_I32(val);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, mem_idx);
args[2] = len;
args[3] = val;
args[4] = dst;
if (!jit_emit_callnative(cc, wasm_fill_memory, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes)
{
return false;
}
#endif

View File

@ -0,0 +1,89 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_MEMORY_H_
#define _JIT_EMIT_MEMORY_H_
#include "../jit_compiler.h"
#if WASM_ENABLE_SHARED_MEMORY != 0
#include "../../common/wasm_shared_memory.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic);
bool
jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic);
bool
jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic);
bool
jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic);
bool
jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_memory_size(JitCompContext *cc, uint32 mem_idx);
bool
jit_compile_op_memory_grow(JitCompContext *cc, uint32 mem_idx);
#if WASM_ENABLE_BULK_MEMORY != 0
bool
jit_compile_op_memory_init(JitCompContext *cc, uint32 mem_idx, uint32 seg_idx);
bool
jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_idx);
bool
jit_compile_op_memory_copy(JitCompContext *cc, uint32 src_mem_idx,
uint32 dst_mem_idx);
bool
jit_compile_op_memory_fill(JitCompContext *cc, uint32 mem_idx);
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes);
bool
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_MEMORY_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,76 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_NUMBERIC_H_
#define _JIT_EMIT_NUMBERIC_H_
#include "../jit_compiler.h"
#include "../jit_frontend.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_clz(JitCompContext *cc);
bool
jit_compile_op_i32_ctz(JitCompContext *cc);
bool
jit_compile_op_i32_popcnt(JitCompContext *cc);
bool
jit_compile_op_i64_clz(JitCompContext *cc);
bool
jit_compile_op_i64_ctz(JitCompContext *cc);
bool
jit_compile_op_i64_popcnt(JitCompContext *cc);
bool
jit_compile_op_i32_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
uint8 **p_frame_ip);
bool
jit_compile_op_i64_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
uint8 **p_frame_ip);
bool
jit_compile_op_i32_bitwise(JitCompContext *cc, IntBitwise bitwise_op);
bool
jit_compile_op_i64_bitwise(JitCompContext *cc, IntBitwise bitwise_op);
bool
jit_compile_op_i32_shift(JitCompContext *cc, IntShift shift_op);
bool
jit_compile_op_i64_shift(JitCompContext *cc, IntShift shift_op);
bool
jit_compile_op_f32_math(JitCompContext *cc, FloatMath math_op);
bool
jit_compile_op_f64_math(JitCompContext *cc, FloatMath math_op);
bool
jit_compile_op_f32_arithmetic(JitCompContext *cc, FloatArithmetic arith_op);
bool
jit_compile_op_f64_arithmetic(JitCompContext *cc, FloatArithmetic arith_op);
bool
jit_compile_op_f32_copysign(JitCompContext *cc);
bool
jit_compile_op_f64_copysign(JitCompContext *cc);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_NUMBERIC_H_ */

View File

@ -0,0 +1,130 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_parametric.h"
#include "../jit_frontend.h"
static bool
pop_value_from_wasm_stack(JitCompContext *cc, bool is_32bit, JitReg *p_value,
uint8 *p_type)
{
JitValue *jit_value;
JitReg value;
uint8 type;
if (!jit_block_stack_top(&cc->block_stack)) {
jit_set_last_error(cc, "WASM block stack underflow.");
return false;
}
if (!jit_block_stack_top(&cc->block_stack)->value_stack.value_list_end) {
jit_set_last_error(cc, "WASM data stack underflow.");
return false;
}
jit_value = jit_value_stack_pop(
&jit_block_stack_top(&cc->block_stack)->value_stack);
type = jit_value->type;
if (p_type != NULL) {
*p_type = jit_value->type;
}
wasm_runtime_free(jit_value);
/* is_32: i32, f32, ref.func, ref.extern, v128 */
if (is_32bit
&& !(type == VALUE_TYPE_I32 || type == VALUE_TYPE_F32
#if WASM_ENABLE_REF_TYPES != 0
|| type == VALUE_TYPE_FUNCREF || type == VALUE_TYPE_EXTERNREF
#endif
|| type == VALUE_TYPE_V128)) {
jit_set_last_error(cc, "invalid WASM stack data type.");
return false;
}
/* !is_32: i64, f64 */
if (!is_32bit && !(type == VALUE_TYPE_I64 || type == VALUE_TYPE_F64)) {
jit_set_last_error(cc, "invalid WASM stack data type.");
return false;
}
switch (type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_FUNCREF:
case VALUE_TYPE_EXTERNREF:
#endif
value = pop_i32(cc->jit_frame);
break;
case VALUE_TYPE_I64:
value = pop_i64(cc->jit_frame);
break;
case VALUE_TYPE_F32:
value = pop_f32(cc->jit_frame);
break;
case VALUE_TYPE_F64:
value = pop_f64(cc->jit_frame);
break;
default:
bh_assert(0);
return false;
}
if (p_value != NULL) {
*p_value = value;
}
return true;
}
bool
jit_compile_op_drop(JitCompContext *cc, bool is_drop_32)
{
if (!pop_value_from_wasm_stack(cc, is_drop_32, NULL, NULL))
return false;
return true;
}
bool
jit_compile_op_select(JitCompContext *cc, bool is_select_32)
{
JitReg val1, val2, cond, selected;
uint8 val1_type, val2_type;
POP_I32(cond);
if (!pop_value_from_wasm_stack(cc, is_select_32, &val2, &val2_type)
|| !pop_value_from_wasm_stack(cc, is_select_32, &val1, &val1_type)) {
return false;
}
if (val1_type != val2_type) {
jit_set_last_error(cc, "invalid stack values with different type");
return false;
}
switch (val1_type) {
case VALUE_TYPE_I32:
selected = jit_cc_new_reg_I32(cc);
break;
case VALUE_TYPE_I64:
selected = jit_cc_new_reg_I64(cc);
break;
case VALUE_TYPE_F32:
selected = jit_cc_new_reg_F32(cc);
break;
case VALUE_TYPE_F64:
selected = jit_cc_new_reg_F64(cc);
break;
default:
bh_assert(0);
return false;
}
GEN_INSN(CMP, cc->cmp_reg, cond, NEW_CONST(I32, 0));
GEN_INSN(SELECTNE, selected, cc->cmp_reg, val1, val2);
PUSH(selected, val1_type);
return true;
fail:
return false;
}

View File

@ -0,0 +1,25 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_PARAMETRIC_H_
#define _JIT_EMIT_PARAMETRIC_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_drop(JitCompContext *cc, bool is_drop_32);
bool
jit_compile_op_select(JitCompContext *cc, bool is_select_32);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_PARAMETRIC_H_ */

View File

@ -0,0 +1,318 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_table.h"
#include "jit_emit_exception.h"
#include "jit_emit_function.h"
#include "../../interpreter/wasm_runtime.h"
#include "../jit_frontend.h"
#if WASM_ENABLE_REF_TYPES != 0
bool
jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx)
{
JitReg module, tbl_segs;
module = get_module_reg(cc->jit_frame);
tbl_segs = jit_cc_new_reg_ptr(cc);
GEN_INSN(LDPTR, tbl_segs, module,
NEW_CONST(I32, offsetof(WASMModule, table_segments)));
GEN_INSN(STI32, NEW_CONST(I32, true), tbl_segs,
NEW_CONST(I32, tbl_seg_idx * sizeof(WASMTableSeg)
+ offsetof(WASMTableSeg, is_dropped)));
return true;
}
bool
jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx)
{
JitReg elem_idx, tbl_sz, tbl_data, elem_idx_long, offset, res;
POP_I32(elem_idx);
/* if (elem_idx >= tbl_sz) goto exception; */
tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
GEN_INSN(CMP, cc->cmp_reg, elem_idx, tbl_sz);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_TABLE_ACCESS,
JIT_OP_BGEU, cc->cmp_reg, NULL))
goto fail;
elem_idx_long = jit_cc_new_reg_I64(cc);
GEN_INSN(I32TOI64, elem_idx_long, elem_idx);
offset = jit_cc_new_reg_I64(cc);
GEN_INSN(MUL, offset, elem_idx_long, NEW_CONST(I64, sizeof(uint32)));
res = jit_cc_new_reg_I32(cc);
tbl_data = get_table_data_reg(cc->jit_frame, tbl_idx);
GEN_INSN(LDI32, res, tbl_data, offset);
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx)
{
JitReg elem_idx, elem_val, tbl_sz, tbl_data, elem_idx_long, offset;
POP_I32(elem_val);
POP_I32(elem_idx);
/* if (elem_idx >= tbl_sz) goto exception; */
tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
GEN_INSN(CMP, cc->cmp_reg, elem_idx, tbl_sz);
if (!jit_emit_exception(cc, JIT_EXCE_OUT_OF_BOUNDS_TABLE_ACCESS,
JIT_OP_BGEU, cc->cmp_reg, NULL))
goto fail;
elem_idx_long = jit_cc_new_reg_I64(cc);
GEN_INSN(I32TOI64, elem_idx_long, elem_idx);
offset = jit_cc_new_reg_I64(cc);
GEN_INSN(MUL, offset, elem_idx_long, NEW_CONST(I64, sizeof(uint32)));
tbl_data = get_table_data_reg(cc->jit_frame, tbl_idx);
GEN_INSN(STI32, elem_val, tbl_data, offset);
return true;
fail:
return false;
}
static int
wasm_init_table(WASMModuleInstance *inst, uint32 tbl_idx, uint32 elem_idx,
uint32 dst, uint32 len, uint32 src)
{
WASMTableInstance *tbl;
uint32 tbl_sz;
WASMTableSeg *elem;
uint32 elem_len;
tbl = inst->tables[tbl_idx];
tbl_sz = tbl->cur_size;
if (dst > tbl_sz || tbl_sz - dst < len)
goto out_of_bounds;
elem = inst->module->table_segments + elem_idx;
elem_len = elem->function_count;
if (src > elem_len || elem_len - src < len)
goto out_of_bounds;
bh_memcpy_s((uint8 *)(tbl) + offsetof(WASMTableInstance, base_addr)
+ dst * sizeof(uint32),
(uint32)((tbl_sz - dst) * sizeof(uint32)),
elem->func_indexes + src, (uint32)(len * sizeof(uint32)));
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds table access");
return -1;
}
bool
jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx,
uint32 tbl_seg_idx)
{
JitReg len, src, dst, res;
JitReg args[6] = { 0 };
POP_I32(len);
POP_I32(src);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, tbl_idx);
args[2] = NEW_CONST(I32, tbl_seg_idx);
args[3] = dst;
args[4] = len;
args[5] = src;
if (!jit_emit_callnative(cc, wasm_init_table, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
static int
wasm_copy_table(WASMModuleInstance *inst, uint32 src_tbl_idx,
uint32 dst_tbl_idx, uint32 dst_offset, uint32 len,
uint32 src_offset)
{
WASMTableInstance *src_tbl, *dst_tbl;
uint32 src_tbl_sz, dst_tbl_sz;
src_tbl = inst->tables[src_tbl_idx];
src_tbl_sz = src_tbl->cur_size;
if (src_offset > src_tbl_sz || src_tbl_sz - src_offset < len)
goto out_of_bounds;
dst_tbl = inst->tables[dst_tbl_idx];
dst_tbl_sz = dst_tbl->cur_size;
if (dst_offset > dst_tbl_sz || dst_tbl_sz - dst_offset < len)
goto out_of_bounds;
bh_memmove_s((uint8 *)(dst_tbl) + offsetof(WASMTableInstance, base_addr)
+ dst_offset * sizeof(uint32),
(uint32)((dst_tbl_sz - dst_offset) * sizeof(uint32)),
(uint8 *)(src_tbl) + offsetof(WASMTableInstance, base_addr)
+ src_offset * sizeof(uint32),
(uint32)(len * sizeof(uint32)));
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds table access");
return -1;
}
bool
jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx,
uint32 dst_tbl_idx)
{
JitReg len, src, dst, res;
JitReg args[6] = { 0 };
POP_I32(len);
POP_I32(src);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, src_tbl_idx);
args[2] = NEW_CONST(I32, dst_tbl_idx);
args[3] = dst;
args[4] = len;
args[5] = src;
if (!jit_emit_callnative(cc, wasm_copy_table, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
bool
jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx)
{
JitReg res;
res = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
PUSH_I32(res);
return true;
fail:
return false;
}
bool
jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx)
{
JitReg tbl_sz, n, val, enlarge_ret, res;
JitReg args[4] = { 0 };
POP_I32(n);
POP_I32(val);
tbl_sz = get_table_cur_size_reg(cc->jit_frame, tbl_idx);
enlarge_ret = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, tbl_idx);
args[2] = n;
args[3] = val;
if (!jit_emit_callnative(cc, wasm_enlarge_table, enlarge_ret, args,
sizeof(args) / sizeof(args[0])))
goto fail;
/* Convert bool to uint32 */
GEN_INSN(AND, enlarge_ret, enlarge_ret, NEW_CONST(I32, 0xFF));
res = jit_cc_new_reg_I32(cc);
GEN_INSN(CMP, cc->cmp_reg, enlarge_ret, NEW_CONST(I32, 1));
GEN_INSN(SELECTEQ, res, cc->cmp_reg, tbl_sz, NEW_CONST(I32, -1));
PUSH_I32(res);
/* Ensure a refresh in next get memory related registers */
clear_table_regs(cc->jit_frame);
return true;
fail:
return false;
}
static int
wasm_fill_table(WASMModuleInstance *inst, uint32 tbl_idx, uint32 dst,
uint32 val, uint32 len)
{
WASMTableInstance *tbl;
uint32 tbl_sz;
tbl = inst->tables[tbl_idx];
tbl_sz = tbl->cur_size;
if (dst > tbl_sz || tbl_sz - dst < len)
goto out_of_bounds;
for (; len != 0; dst++, len--) {
((uint32 *)(tbl->base_addr))[dst] = val;
}
return 0;
out_of_bounds:
wasm_set_exception(inst, "out of bounds table access");
return -1;
}
bool
jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx)
{
JitReg len, val, dst, res;
JitReg args[5] = { 0 };
POP_I32(len);
POP_I32(val);
POP_I32(dst);
res = jit_cc_new_reg_I32(cc);
args[0] = get_module_inst_reg(cc->jit_frame);
args[1] = NEW_CONST(I32, tbl_idx);
args[2] = dst;
args[3] = val;
args[4] = len;
if (!jit_emit_callnative(cc, wasm_fill_table, res, args,
sizeof(args) / sizeof(args[0])))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, res, NEW_CONST(I32, 0));
if (!jit_emit_exception(cc, JIT_EXCE_ALREADY_THROWN, JIT_OP_BLTS,
cc->cmp_reg, NULL))
goto fail;
return true;
fail:
return false;
}
#endif

View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_TABLE_H_
#define _JIT_EMIT_TABLE_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
#if WASM_ENABLE_REF_TYPES != 0
bool
jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx);
bool
jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx,
uint32 tbl_seg_idx);
bool
jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx,
uint32 dst_tbl_idx);
bool
jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif

View File

@ -0,0 +1,323 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_variable.h"
#include "jit_emit_exception.h"
#include "../jit_frontend.h"
#define CHECK_LOCAL(idx) \
do { \
if (idx \
>= wasm_func->func_type->param_count + wasm_func->local_count) { \
jit_set_last_error(cc, "local index out of range"); \
goto fail; \
} \
} while (0)
static uint8
get_local_type(const WASMFunction *wasm_func, uint32 local_idx)
{
uint32 param_count = wasm_func->func_type->param_count;
return local_idx < param_count
? wasm_func->func_type->types[local_idx]
: wasm_func->local_types[local_idx - param_count];
}
bool
jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value = 0;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
value = local_i32(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_I64:
value = local_i64(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_F32:
value = local_f32(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_F64:
value = local_f64(cc->jit_frame, local_offset);
break;
default:
bh_assert(0);
break;
}
PUSH(value, local_type);
return true;
fail:
return false;
}
bool
jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
POP_I32(value);
set_local_i32(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_I64:
POP_I64(value);
set_local_i64(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_F32:
POP_F32(value);
set_local_f32(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_F64:
POP_F64(value);
set_local_f64(cc->jit_frame, local_offset, value);
break;
default:
bh_assert(0);
break;
}
return true;
fail:
return false;
}
bool
jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value = 0;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
POP_I32(value);
set_local_i32(cc->jit_frame, local_offset, value);
PUSH_I32(value);
break;
case VALUE_TYPE_I64:
POP_I64(value);
set_local_i64(cc->jit_frame, local_offset, value);
PUSH_I64(value);
break;
case VALUE_TYPE_F32:
POP_F32(value);
set_local_f32(cc->jit_frame, local_offset, value);
PUSH_F32(value);
break;
case VALUE_TYPE_F64:
POP_F64(value);
set_local_f64(cc->jit_frame, local_offset, value);
PUSH_F64(value);
break;
default:
bh_assert(0);
goto fail;
}
return true;
fail:
return false;
}
static uint8
get_global_type(const WASMModule *module, uint32 global_idx)
{
if (global_idx < module->import_global_count) {
const WASMGlobalImport *import_global =
&((module->import_globals + global_idx)->u.global);
return import_global->type;
}
else {
const WASMGlobal *global =
module->globals + (global_idx - module->import_global_count);
return global->type;
}
}
static uint32
get_global_data_offset(const WASMModule *module, uint32 global_idx)
{
if (global_idx < module->import_global_count) {
const WASMGlobalImport *import_global =
&((module->import_globals + global_idx)->u.global);
return import_global->data_offset;
}
else {
const WASMGlobal *global =
module->globals + (global_idx - module->import_global_count);
return global->data_offset;
}
}
bool
jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx)
{
uint32 data_offset;
uint8 global_type = 0;
JitReg value = 0;
bh_assert(global_idx < cc->cur_wasm_module->import_global_count
+ cc->cur_wasm_module->global_count);
data_offset = get_global_data_offset(cc->cur_wasm_module, global_idx);
global_type = get_global_type(cc->cur_wasm_module, global_idx);
switch (global_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
{
value = jit_cc_new_reg_I32(cc);
GEN_INSN(LDI32, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_I64:
{
value = jit_cc_new_reg_I64(cc);
GEN_INSN(LDI64, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_F32:
{
value = jit_cc_new_reg_F32(cc);
GEN_INSN(LDF32, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_F64:
{
value = jit_cc_new_reg_F64(cc);
GEN_INSN(LDF64, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
default:
{
jit_set_last_error(cc, "unexpected global type");
goto fail;
}
}
PUSH(value, global_type);
return true;
fail:
return false;
}
bool
jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx,
bool is_aux_stack)
{
uint32 data_offset;
uint8 global_type = 0;
JitReg value = 0;
bh_assert(global_idx < cc->cur_wasm_module->import_global_count
+ cc->cur_wasm_module->global_count);
data_offset = get_global_data_offset(cc->cur_wasm_module, global_idx);
global_type = get_global_type(cc->cur_wasm_module, global_idx);
switch (global_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
{
POP_I32(value);
if (is_aux_stack) {
JitReg aux_stack_bound = get_aux_stack_bound_reg(cc->jit_frame);
JitReg aux_stack_bottom =
get_aux_stack_bottom_reg(cc->jit_frame);
GEN_INSN(CMP, cc->cmp_reg, value, aux_stack_bound);
if (!(jit_emit_exception(cc, JIT_EXCE_AUX_STACK_OVERFLOW,
JIT_OP_BLEU, cc->cmp_reg, NULL)))
goto fail;
GEN_INSN(CMP, cc->cmp_reg, value, aux_stack_bottom);
if (!(jit_emit_exception(cc, JIT_EXCE_AUX_STACK_UNDERFLOW,
JIT_OP_BGTU, cc->cmp_reg, NULL)))
goto fail;
}
GEN_INSN(STI32, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_I64:
{
POP_I64(value);
GEN_INSN(STI64, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_F32:
{
POP_F32(value);
GEN_INSN(STF32, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
case VALUE_TYPE_F64:
{
POP_F64(value);
GEN_INSN(STF64, value, get_global_data_reg(cc->jit_frame),
NEW_CONST(I32, data_offset));
break;
}
default:
{
jit_set_last_error(cc, "unexpected global type");
goto fail;
}
}
return true;
fail:
return false;
}

View File

@ -0,0 +1,35 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_VARIABLE_H_
#define _JIT_EMIT_VARIABLE_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx);
bool
jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx,
bool is_aux_stack);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_VARIABLE_H_ */

View File

@ -0,0 +1,95 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set (IWASM_FAST_JIT_DIR ${CMAKE_CURRENT_LIST_DIR})
add_definitions(-DWASM_ENABLE_FAST_JIT=1)
if (WAMR_BUILD_FAST_JIT_DUMP EQUAL 1)
add_definitions(-DWASM_ENABLE_FAST_JIT_DUMP=1)
endif ()
include_directories (${IWASM_FAST_JIT_DIR})
if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
include(FetchContent)
if (NOT WAMR_BUILD_PLATFORM STREQUAL "linux-sgx")
FetchContent_Declare(
asmjit
GIT_REPOSITORY https://github.com/asmjit/asmjit.git
)
else ()
FetchContent_Declare(
asmjit
GIT_REPOSITORY https://github.com/asmjit/asmjit.git
PATCH_COMMAND git apply ${IWASM_FAST_JIT_DIR}/asmjit_sgx_patch.diff
)
endif ()
FetchContent_GetProperties(asmjit)
if (NOT asmjit_POPULATED)
message ("-- Fetching asmjit ..")
FetchContent_Populate(asmjit)
add_definitions(-DASMJIT_STATIC)
add_definitions(-DASMJIT_NO_DEPRECATED)
add_definitions(-DASMJIT_NO_BUILDER)
add_definitions(-DASMJIT_NO_COMPILER)
add_definitions(-DASMJIT_NO_JIT)
add_definitions(-DASMJIT_NO_LOGGING)
add_definitions(-DASMJIT_NO_TEXT)
add_definitions(-DASMJIT_NO_VALIDATION)
add_definitions(-DASMJIT_NO_INTROSPECTION)
add_definitions(-DASMJIT_NO_INTRINSICS)
add_definitions(-DASMJIT_NO_AARCH64)
add_definitions(-DASMJIT_NO_AARCH32)
include_directories("${asmjit_SOURCE_DIR}/src")
add_subdirectory(${asmjit_SOURCE_DIR} ${asmjit_BINARY_DIR} EXCLUDE_FROM_ALL)
file (GLOB_RECURSE cpp_source_asmjit
${asmjit_SOURCE_DIR}/src/asmjit/core/*.cpp
${asmjit_SOURCE_DIR}/src/asmjit/x86/*.cpp
)
endif ()
if (WAMR_BUILD_FAST_JIT_DUMP EQUAL 1)
FetchContent_Declare(
zycore
GIT_REPOSITORY https://github.com/zyantific/zycore-c.git
)
FetchContent_GetProperties(zycore)
if (NOT zycore_POPULATED)
message ("-- Fetching zycore ..")
FetchContent_Populate(zycore)
option(ZYDIS_BUILD_TOOLS "" OFF)
option(ZYDIS_BUILD_EXAMPLES "" OFF)
include_directories("${zycore_SOURCE_DIR}/include")
include_directories("${zycore_BINARY_DIR}")
add_subdirectory(${zycore_SOURCE_DIR} ${zycore_BINARY_DIR} EXCLUDE_FROM_ALL)
file (GLOB_RECURSE c_source_zycore ${zycore_SOURCE_DIR}/src/*.c)
endif ()
FetchContent_Declare(
zydis
GIT_REPOSITORY https://github.com/zyantific/zydis.git
GIT_TAG e14a07895136182a5b53e181eec3b1c6e0b434de
)
FetchContent_GetProperties(zydis)
if (NOT zydis_POPULATED)
message ("-- Fetching zydis ..")
FetchContent_Populate(zydis)
option(ZYDIS_BUILD_TOOLS "" OFF)
option(ZYDIS_BUILD_EXAMPLES "" OFF)
include_directories("${zydis_BINARY_DIR}")
include_directories("${zydis_SOURCE_DIR}/include")
include_directories("${zydis_SOURCE_DIR}/src")
add_subdirectory(${zydis_SOURCE_DIR} ${zydis_BINARY_DIR} EXCLUDE_FROM_ALL)
file (GLOB_RECURSE c_source_zydis ${zydis_SOURCE_DIR}/src/*.c)
endif ()
endif ()
endif ()
file (GLOB c_source_jit ${IWASM_FAST_JIT_DIR}/*.c ${IWASM_FAST_JIT_DIR}/fe/*.c)
if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
file (GLOB_RECURSE cpp_source_jit_cg ${IWASM_FAST_JIT_DIR}/cg/x86-64/*.cpp)
else ()
message (FATAL_ERROR "Fast JIT codegen for target ${WAMR_BUILD_TARGET} isn't implemented")
endif ()
set (IWASM_FAST_JIT_SOURCE ${c_source_jit} ${cpp_source_jit_cg}
${cpp_source_asmjit} ${c_source_zycore} ${c_source_zydis})

View File

@ -0,0 +1,65 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_codecache.h"
#include "mem_alloc.h"
#include "jit_compiler.h"
static void *code_cache_pool = NULL;
static uint32 code_cache_pool_size = 0;
static mem_allocator_t code_cache_pool_allocator = NULL;
bool
jit_code_cache_init(uint32 code_cache_size)
{
int map_prot = MMAP_PROT_READ | MMAP_PROT_WRITE | MMAP_PROT_EXEC;
int map_flags = MMAP_MAP_NONE;
if (!(code_cache_pool =
os_mmap(NULL, code_cache_size, map_prot, map_flags))) {
return false;
}
if (!(code_cache_pool_allocator =
mem_allocator_create(code_cache_pool, code_cache_size))) {
os_munmap(code_cache_pool, code_cache_size);
code_cache_pool = NULL;
return false;
}
code_cache_pool_size = code_cache_size;
return true;
}
void
jit_code_cache_destroy()
{
mem_allocator_destroy(code_cache_pool_allocator);
os_munmap(code_cache_pool, code_cache_pool_size);
}
void *
jit_code_cache_alloc(uint32 size)
{
return mem_allocator_malloc(code_cache_pool_allocator, size);
}
void
jit_code_cache_free(void *ptr)
{
if (ptr)
mem_allocator_free(code_cache_pool_allocator, ptr);
}
bool
jit_pass_register_jitted_code(JitCompContext *cc)
{
uint32 jit_func_idx =
cc->cur_wasm_func_idx - cc->cur_wasm_module->import_function_count;
cc->cur_wasm_func->fast_jit_jitted_code = cc->jitted_addr_begin;
cc->cur_wasm_module->fast_jit_func_ptrs[jit_func_idx] =
cc->jitted_addr_begin;
return true;
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_CODE_CACHE_H_
#define _JIT_CODE_CACHE_H_
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_code_cache_init(uint32 code_cache_size);
void
jit_code_cache_destroy();
void *
jit_code_cache_alloc(uint32 size);
void
jit_code_cache_free(void *ptr);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_CODE_CACHE_H_ */

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_compiler.h"
#include "jit_codegen.h"
bool
jit_pass_lower_cg(JitCompContext *cc)
{
return jit_codegen_lower(cc);
}
bool
jit_pass_codegen(JitCompContext *cc)
{
if (!jit_annl_enable_jitted_addr(cc))
return false;
return jit_codegen_gen_native(cc);
}

View File

@ -0,0 +1,84 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_CODEGEN_H_
#define _JIT_CODEGEN_H_
#include "bh_platform.h"
#include "jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Initialize codegen module, such as instruction encoder.
*
* @return true if succeeded; false if failed.
*/
bool
jit_codegen_init();
/**
* Destroy codegen module, such as instruction encoder.
*/
void
jit_codegen_destroy();
/**
* Get hard register information of each kind.
*
* @return the JitHardRegInfo array of each kind
*/
const JitHardRegInfo *
jit_codegen_get_hreg_info();
/**
* Get hard register by name.
*
* @param name the name of the hard register
*
* @return the hard register of the name
*/
JitReg
jit_codegen_get_hreg_by_name(const char *name);
/**
* Generate native code for the given compilation context
*
* @param cc the compilation context that is ready to do codegen
*
* @return true if succeeds, false otherwise
*/
bool
jit_codegen_gen_native(JitCompContext *cc);
/**
* lower unsupported operations to supported ones for the target.
*
* @param cc the compilation context that is ready to do codegen
*
* @return true if succeeds, false otherwise
*/
bool
jit_codegen_lower(JitCompContext *cc);
/**
* Dump native code in the given range to assembly.
*
* @param begin_addr begin address of the native code
* @param end_addr end address of the native code
*/
void
jit_codegen_dump_native(void *begin_addr, void *end_addr);
int
jit_codegen_interp_jitted_glue(void *self, JitInterpSwitchInfo *info, void *pc);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_CODEGEN_H_ */

View File

@ -0,0 +1,176 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_compiler.h"
#include "jit_ir.h"
#include "jit_codegen.h"
#include "jit_codecache.h"
#include "../interpreter/wasm.h"
typedef struct JitCompilerPass {
/* Name of the pass. */
const char *name;
/* The entry of the compiler pass. */
bool (*run)(JitCompContext *cc);
} JitCompilerPass;
/* clang-format off */
static JitCompilerPass compiler_passes[] = {
{ NULL, NULL },
#define REG_PASS(name) { #name, jit_pass_##name }
REG_PASS(dump),
REG_PASS(update_cfg),
REG_PASS(frontend),
REG_PASS(lower_cg),
REG_PASS(regalloc),
REG_PASS(codegen),
REG_PASS(register_jitted_code)
#undef REG_PASS
};
/* Number of compiler passes. */
#define COMPILER_PASS_NUM (sizeof(compiler_passes) / sizeof(compiler_passes[0]))
#if WASM_ENABLE_FAST_JIT_DUMP == 0
static const uint8 compiler_passes_without_dump[] = {
3, 4, 5, 6, 7, 0
};
#else
static const uint8 compiler_passes_with_dump[] = {
3, 2, 1, 4, 1, 5, 1, 6, 1, 7, 0
};
#endif
/* The exported global data of JIT compiler. */
static JitGlobals jit_globals = {
#if WASM_ENABLE_FAST_JIT_DUMP == 0
.passes = compiler_passes_without_dump,
#else
.passes = compiler_passes_with_dump,
#endif
.return_to_interp_from_jitted = NULL
};
/* clang-format on */
static bool
apply_compiler_passes(JitCompContext *cc)
{
const uint8 *p = jit_globals.passes;
for (; *p; p++) {
/* Set the pass NO. */
cc->cur_pass_no = p - jit_globals.passes;
bh_assert(*p < COMPILER_PASS_NUM);
if (!compiler_passes[*p].run(cc)) {
LOG_VERBOSE("JIT: compilation failed at pass[%td] = %s\n",
p - jit_globals.passes, compiler_passes[*p].name);
return false;
}
}
return true;
}
bool
jit_compiler_init(const JitCompOptions *options)
{
uint32 code_cache_size = options->code_cache_size > 0
? options->code_cache_size
: FAST_JIT_DEFAULT_CODE_CACHE_SIZE;
LOG_VERBOSE("JIT: compiler init with code cache size: %u\n",
code_cache_size);
if (!jit_code_cache_init(code_cache_size))
return false;
if (!jit_codegen_init())
goto fail1;
return true;
fail1:
jit_code_cache_destroy();
return false;
}
void
jit_compiler_destroy()
{
jit_codegen_destroy();
jit_code_cache_destroy();
}
JitGlobals *
jit_compiler_get_jit_globals()
{
return &jit_globals;
}
const char *
jit_compiler_get_pass_name(unsigned i)
{
return i < COMPILER_PASS_NUM ? compiler_passes[i].name : NULL;
}
bool
jit_compiler_compile(WASMModule *module, uint32 func_idx)
{
JitCompContext *cc;
char *last_error;
bool ret = true;
/* Initialize compilation context. */
if (!(cc = jit_calloc(sizeof(*cc))))
return false;
if (!jit_cc_init(cc, 64)) {
jit_free(cc);
return false;
}
cc->cur_wasm_module = module;
cc->cur_wasm_func =
module->functions[func_idx - module->import_function_count];
cc->cur_wasm_func_idx = func_idx;
cc->mem_space_unchanged = (!cc->cur_wasm_func->has_op_memory_grow
&& !cc->cur_wasm_func->has_op_func_call)
|| (!module->possible_memory_grow);
/* Apply compiler passes. */
if (!apply_compiler_passes(cc) || jit_get_last_error(cc)) {
last_error = jit_get_last_error(cc);
os_printf("fast jit compilation failed: %s\n",
last_error ? last_error : "unknown error");
ret = false;
}
/* Delete the compilation context. */
jit_cc_delete(cc);
return ret;
}
bool
jit_compiler_compile_all(WASMModule *module)
{
uint32 i;
for (i = 0; i < module->function_count; i++) {
if (!jit_compiler_compile(module, module->import_function_count + i)) {
return false;
}
}
return true;
}
int
jit_interp_switch_to_jitted(void *exec_env, JitInterpSwitchInfo *info, void *pc)
{
return jit_codegen_interp_jitted_glue(exec_env, info, pc);
}

View File

@ -0,0 +1,143 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_COMPILER_H_
#define _JIT_COMPILER_H_
#include "bh_platform.h"
#include "../interpreter/wasm_runtime.h"
#include "jit_ir.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct JitGlobals {
/* Compiler pass sequence, the last element must be 0 */
const uint8 *passes;
char *return_to_interp_from_jitted;
} JitGlobals;
/**
* Actions the interpreter should do when jitted code returns to
* interpreter.
*/
typedef enum JitInterpAction {
JIT_INTERP_ACTION_NORMAL, /* normal execution */
JIT_INTERP_ACTION_THROWN, /* exception was thrown */
JIT_INTERP_ACTION_CALL /* call wasm function */
} JitInterpAction;
/**
* Information exchanged between jitted code and interpreter.
*/
typedef struct JitInterpSwitchInfo {
/* Points to the frame that is passed to jitted code and the frame
that is returned from jitted code */
void *frame;
/* Output values from jitted code of different actions */
union {
/* IP and SP offsets for NORMAL */
struct {
int32 ip;
int32 sp;
} normal;
/* Function called from jitted code for CALL */
struct {
void *function;
} call;
/* Returned integer and/or floating point values for RETURN. This
is also used to pass return values from interpreter to jitted
code if the caller is in jitted code and the callee is in
interpreter. */
struct {
uint32 ival[2];
uint32 fval[2];
uint32 last_return_type;
} ret;
} out;
} JitInterpSwitchInfo;
/* Jit compiler options */
typedef struct JitCompOptions {
uint32 code_cache_size;
uint32 opt_level;
} JitCompOptions;
bool
jit_compiler_init(const JitCompOptions *option);
void
jit_compiler_destroy();
JitGlobals *
jit_compiler_get_jit_globals();
const char *
jit_compiler_get_pass_name(unsigned i);
bool
jit_compiler_compile(WASMModule *module, uint32 func_idx);
bool
jit_compiler_compile_all(WASMModule *module);
int
jit_interp_switch_to_jitted(void *self, JitInterpSwitchInfo *info, void *pc);
/*
* Pass declarations:
*/
/**
* Dump the compilation context.
*/
bool
jit_pass_dump(JitCompContext *cc);
/**
* Update CFG (usually before dump for better readability).
*/
bool
jit_pass_update_cfg(JitCompContext *cc);
/**
* Translate profiling result into MIR.
*/
bool
jit_pass_frontend(JitCompContext *cc);
/**
* Lower unsupported operations into supported ones.
*/
bool
jit_pass_lower_cg(JitCompContext *cc);
/**
* Register allocation.
*/
bool
jit_pass_regalloc(JitCompContext *cc);
/**
* Native code generation.
*/
bool
jit_pass_codegen(JitCompContext *cc);
/**
* Register the jitted code so that it can be executed.
*/
bool
jit_pass_register_jitted_code(JitCompContext *cc);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_COMPILER_H_ */

View File

@ -0,0 +1,331 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_dump.h"
#include "jit_compiler.h"
#include "jit_codegen.h"
void
jit_dump_reg(JitCompContext *cc, JitReg reg)
{
unsigned kind = jit_reg_kind(reg);
unsigned no = jit_reg_no(reg);
switch (kind) {
case JIT_REG_KIND_VOID:
os_printf("VOID");
break;
case JIT_REG_KIND_I32:
if (jit_reg_is_const(reg)) {
unsigned rel = jit_cc_get_const_I32_rel(cc, reg);
os_printf("0x%x", jit_cc_get_const_I32(cc, reg));
if (rel)
os_printf("(rel: 0x%x)", rel);
}
else
os_printf("i%d", no);
break;
case JIT_REG_KIND_I64:
if (jit_reg_is_const(reg))
os_printf("0x%llxL", jit_cc_get_const_I64(cc, reg));
else
os_printf("I%d", no);
break;
case JIT_REG_KIND_F32:
if (jit_reg_is_const(reg))
os_printf("%f", jit_cc_get_const_F32(cc, reg));
else
os_printf("f%d", no);
break;
case JIT_REG_KIND_F64:
if (jit_reg_is_const(reg))
os_printf("%fL", jit_cc_get_const_F64(cc, reg));
else
os_printf("D%d", no);
break;
case JIT_REG_KIND_L32:
os_printf("L%d", no);
break;
default:
bh_assert(!"Unsupported register kind.");
}
}
static void
jit_dump_insn_Reg(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
for (i = 0; i < opnd_num; i++) {
os_printf(i == 0 ? " " : ", ");
jit_dump_reg(cc, *(jit_insn_opnd(insn, i)));
}
os_printf("\n");
}
static void
jit_dump_insn_VReg(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
opnd_num = jit_insn_opndv_num(insn);
for (i = 0; i < opnd_num; i++) {
os_printf(i == 0 ? " " : ", ");
jit_dump_reg(cc, *(jit_insn_opndv(insn, i)));
}
os_printf("\n");
}
static void
jit_dump_insn_LookupSwitch(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
JitOpndLookupSwitch *opnd = jit_insn_opndls(insn);
os_printf(" ");
jit_dump_reg(cc, opnd->value);
os_printf("\n%16s: ", "default");
jit_dump_reg(cc, opnd->default_target);
os_printf("\n");
for (i = 0; i < opnd->match_pairs_num; i++) {
os_printf("%18d: ", opnd->match_pairs[i].value);
jit_dump_reg(cc, opnd->match_pairs[i].target);
os_printf("\n");
}
}
void
jit_dump_insn(JitCompContext *cc, JitInsn *insn)
{
switch (insn->opcode) {
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) \
case JIT_OP_##NAME: \
os_printf(" %-15s", #NAME); \
jit_dump_insn_##OPND_KIND(cc, insn, OPND_NUM); \
break;
#include "jit_ir.def"
#undef INSN
}
}
void
jit_dump_basic_block(JitCompContext *cc, JitBasicBlock *block)
{
unsigned i, label_index;
void *begin_addr, *end_addr;
JitBasicBlock *block_next;
JitInsn *insn;
JitRegVec preds = jit_basic_block_preds(block);
JitRegVec succs = jit_basic_block_succs(block);
JitReg label = jit_basic_block_label(block), label_next;
JitReg *reg;
jit_dump_reg(cc, label);
os_printf(":\n ; PREDS(");
JIT_REG_VEC_FOREACH(preds, i, reg)
{
if (i > 0)
os_printf(" ");
jit_dump_reg(cc, *reg);
}
os_printf(")\n ;");
if (jit_annl_is_enabled_begin_bcip(cc))
os_printf(" BEGIN_BCIP=0x%04tx",
*(jit_annl_begin_bcip(cc, label))
- (uint8 *)cc->cur_wasm_module->load_addr);
if (jit_annl_is_enabled_end_bcip(cc))
os_printf(" END_BCIP=0x%04tx",
*(jit_annl_end_bcip(cc, label))
- (uint8 *)cc->cur_wasm_module->load_addr);
os_printf("\n");
if (jit_annl_is_enabled_jitted_addr(cc)) {
begin_addr = *(jit_annl_jitted_addr(cc, label));
if (label == cc->entry_label) {
block_next = cc->_ann._label_basic_block[2];
label_next = jit_basic_block_label(block_next);
end_addr = *(jit_annl_jitted_addr(cc, label_next));
}
else if (label == cc->exit_label) {
end_addr = cc->jitted_addr_end;
}
else {
label_index = jit_reg_no(label);
if (label_index < jit_cc_label_num(cc) - 1)
block_next = cc->_ann._label_basic_block[label_index + 1];
else
block_next = cc->_ann._label_basic_block[1];
label_next = jit_basic_block_label(block_next);
end_addr = *(jit_annl_jitted_addr(cc, label_next));
}
jit_codegen_dump_native(begin_addr, end_addr);
}
else {
/* Dump IR. */
JIT_FOREACH_INSN(block, insn) jit_dump_insn(cc, insn);
}
os_printf(" ; SUCCS(");
JIT_REG_VEC_FOREACH(succs, i, reg)
{
if (i > 0)
os_printf(" ");
jit_dump_reg(cc, *reg);
}
os_printf(")\n\n");
}
static void
dump_func_name(JitCompContext *cc)
{
const char *func_name = NULL;
WASMModule *module = cc->cur_wasm_module;
#if WASM_ENABLE_CUSTOM_NAME_SECTION != 0
func_name = cc->cur_wasm_func->field_name;
#endif
/* if custom name section is not generated,
search symbols from export table */
if (!func_name) {
uint32 i;
for (i = 0; i < module->export_count; i++) {
if (module->exports[i].kind == EXPORT_KIND_FUNC
&& module->exports[i].index == cc->cur_wasm_func_idx) {
func_name = module->exports[i].name;
break;
}
}
}
/* function name not exported, print number instead */
if (func_name == NULL) {
os_printf("$f%d", cc->cur_wasm_func_idx);
}
else {
os_printf("%s", func_name);
}
}
static void
dump_cc_ir(JitCompContext *cc)
{
unsigned i, end;
JitBasicBlock *block;
JitReg label;
const char *kind_names[] = { "VOID", "I32", "I64", "F32",
"F64", "V64", "V128", "V256" };
os_printf("; Function: ");
dump_func_name(cc);
os_printf("\n");
os_printf("; Constant table sizes:");
for (i = 0; i < JIT_REG_KIND_L32; i++)
os_printf(" %s=%d", kind_names[i], cc->_const_val._num[i]);
os_printf("\n; Label number: %d", jit_cc_label_num(cc));
os_printf("\n; Instruction number: %d", jit_cc_insn_num(cc));
os_printf("\n; Register numbers:");
for (i = 0; i < JIT_REG_KIND_L32; i++)
os_printf(" %s=%d", kind_names[i], jit_cc_reg_num(cc, i));
os_printf("\n; Label annotations:");
#define ANN_LABEL(TYPE, NAME) \
if (jit_annl_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_LABEL
os_printf("\n; Instruction annotations:");
#define ANN_INSN(TYPE, NAME) \
if (jit_anni_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_INSN
os_printf("\n; Register annotations:");
#define ANN_REG(TYPE, NAME) \
if (jit_annr_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_REG
os_printf("\n\n");
if (jit_annl_is_enabled_next_label(cc)) {
/* Blocks have been reordered, use that order to dump. */
for (label = cc->entry_label; label;
label = *(jit_annl_next_label(cc, label)))
jit_dump_basic_block(cc, *(jit_annl_basic_block(cc, label)));
}
else {
/* Otherwise, use the default order. */
jit_dump_basic_block(cc, jit_cc_entry_basic_block(cc));
JIT_FOREACH_BLOCK(cc, i, end, block) jit_dump_basic_block(cc, block);
jit_dump_basic_block(cc, jit_cc_exit_basic_block(cc));
}
}
void
jit_dump_cc(JitCompContext *cc)
{
if (jit_cc_label_num(cc) <= 2)
return;
dump_cc_ir(cc);
}
bool
jit_pass_dump(JitCompContext *cc)
{
const JitGlobals *jit_globals = jit_compiler_get_jit_globals();
const uint8 *passes = jit_globals->passes;
uint8 pass_no = cc->cur_pass_no;
const char *pass_name =
pass_no > 0 ? jit_compiler_get_pass_name(passes[pass_no - 1]) : "NULL";
#if defined(BUILD_TARGET_X86_64) || defined(BUILD_TARGET_AMD_64)
if (!strcmp(pass_name, "lower_cg"))
/* Ignore lower codegen pass as it does nothing in x86-64 */
return true;
#endif
os_printf("JIT.COMPILER.DUMP: PASS_NO=%d PREV_PASS=%s\n\n", pass_no,
pass_name);
jit_dump_cc(cc);
os_printf("\n");
return true;
}
bool
jit_pass_update_cfg(JitCompContext *cc)
{
return jit_cc_update_cfg(cc);
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_DUMP_H_
#define _JIT_DUMP_H_
#include "jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Dump a register.
*
* @param cc compilation context of the register
* @param reg register to be dumped
*/
void
jit_dump_reg(JitCompContext *cc, JitReg reg);
/**
* Dump an instruction.
*
* @param cc compilation context of the instruction
* @param insn instruction to be dumped
*/
void
jit_dump_insn(JitCompContext *cc, JitInsn *insn);
/**
* Dump a block.
*
* @param cc compilation context of the block
* @param block block to be dumped
*/
void
jit_dump_block(JitCompContext *cc, JitBlock *block);
/**
* Dump a compilation context.
*
* @param cc compilation context to be dumped
*/
void
jit_dump_cc(JitCompContext *cc);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_DUMP_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,521 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_FRONTEND_H_
#define _JIT_FRONTEND_H_
#include "jit_utils.h"
#include "jit_ir.h"
#include "../interpreter/wasm_interp.h"
#if WASM_ENABLE_AOT != 0
#include "../aot/aot_runtime.h"
#endif
#if WASM_ENABLE_AOT == 0
typedef enum IntCond {
INT_EQZ = 0,
INT_EQ,
INT_NE,
INT_LT_S,
INT_LT_U,
INT_GT_S,
INT_GT_U,
INT_LE_S,
INT_LE_U,
INT_GE_S,
INT_GE_U
} IntCond;
typedef enum FloatCond {
FLOAT_EQ = 0,
FLOAT_NE,
FLOAT_LT,
FLOAT_GT,
FLOAT_LE,
FLOAT_GE,
FLOAT_UNO
} FloatCond;
#else
#define IntCond AOTIntCond
#define FloatCond AOTFloatCond
#endif
typedef enum IntArithmetic {
INT_ADD = 0,
INT_SUB,
INT_MUL,
INT_DIV_S,
INT_DIV_U,
INT_REM_S,
INT_REM_U
} IntArithmetic;
typedef enum V128Arithmetic {
V128_ADD = 0,
V128_SUB,
V128_MUL,
V128_DIV,
V128_NEG,
V128_MIN,
V128_MAX,
} V128Arithmetic;
typedef enum IntBitwise {
INT_AND = 0,
INT_OR,
INT_XOR,
} IntBitwise;
typedef enum V128Bitwise {
V128_NOT,
V128_AND,
V128_ANDNOT,
V128_OR,
V128_XOR,
V128_BITSELECT,
} V128Bitwise;
typedef enum IntShift {
INT_SHL = 0,
INT_SHR_S,
INT_SHR_U,
INT_ROTL,
INT_ROTR
} IntShift;
typedef enum FloatMath {
FLOAT_ABS = 0,
FLOAT_NEG,
FLOAT_CEIL,
FLOAT_FLOOR,
FLOAT_TRUNC,
FLOAT_NEAREST,
FLOAT_SQRT
} FloatMath;
typedef enum FloatArithmetic {
FLOAT_ADD = 0,
FLOAT_SUB,
FLOAT_MUL,
FLOAT_DIV,
FLOAT_MIN,
FLOAT_MAX,
} FloatArithmetic;
typedef enum JitExceptionID {
JIT_EXCE_UNREACHABLE = 0,
JIT_EXCE_OUT_OF_MEMORY,
JIT_EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
JIT_EXCE_INTEGER_OVERFLOW,
JIT_EXCE_INTEGER_DIVIDE_BY_ZERO,
JIT_EXCE_INVALID_CONVERSION_TO_INTEGER,
JIT_EXCE_INVALID_FUNCTION_TYPE_INDEX,
JIT_EXCE_INVALID_FUNCTION_INDEX,
JIT_EXCE_UNDEFINED_ELEMENT,
JIT_EXCE_UNINITIALIZED_ELEMENT,
JIT_EXCE_CALL_UNLINKED_IMPORT_FUNC,
JIT_EXCE_NATIVE_STACK_OVERFLOW,
JIT_EXCE_UNALIGNED_ATOMIC,
JIT_EXCE_AUX_STACK_OVERFLOW,
JIT_EXCE_AUX_STACK_UNDERFLOW,
JIT_EXCE_OUT_OF_BOUNDS_TABLE_ACCESS,
JIT_EXCE_OPERAND_STACK_OVERFLOW,
JIT_EXCE_ALREADY_THROWN,
JIT_EXCE_NUM,
} JitExceptionID;
/**
* Translate instructions in a function. The translated block must
* end with a branch instruction whose targets are offsets relating to
* the end bcip of the translated block, which are integral constants.
* If a target of a branch is really a constant value (which should be
* rare), put it into a register and then jump to the register instead
* of using the constant value directly in the target. In the
* translation process, don't create any new labels. The code bcip of
* the begin and end of the translated block is stored in the
* jit_annl_begin_bcip and jit_annl_end_bcip annotations of the label
* of the block, which must be the same as the bcips used in
* profiling.
*
* NOTE: the function must explicitly set SP to correct value when the
* entry's bcip is the function's entry address.
*
* @param cc containing compilation context of generated IR
* @param entry entry of the basic block to be translated. If its
* value is NULL, the function will clean up any pass local data that
* might be created previously.
* @param is_reached a bitmap recording which bytecode has been
* reached as a block entry
*
* @return IR block containing translated instructions if succeeds,
* NULL otherwise
*/
JitBasicBlock *
jit_frontend_translate_func(JitCompContext *cc);
/**
* Generate a block leaving the compiled code, which must store the
* target bcip and other necessary information for switching to
* interpreter or other compiled code and then jump to the exit of the
* cc.
*
* @param cc the compilation context
* @param bcip the target bytecode instruction pointer
* @param sp_offset stack pointer offset at the beginning of the block
*
* @return the leaving block if succeeds, NULL otherwise
*/
JitBlock *
jit_frontend_gen_leaving_block(JitCompContext *cc, void *bcip,
unsigned sp_offset);
/**
* Lower the IR of the given compilation context.
*
* @param cc the compilation context
*
* @return true if succeeds, false otherwise
*/
bool
jit_frontend_lower(JitCompContext *cc);
JitReg
get_module_inst_reg(JitFrame *frame);
JitReg
get_module_reg(JitFrame *frame);
JitReg
get_fast_jit_func_ptrs_reg(JitFrame *frame);
JitReg
get_global_data_reg(JitFrame *frame);
JitReg
get_aux_stack_bound_reg(JitFrame *frame);
JitReg
get_aux_stack_bottom_reg(JitFrame *frame);
JitReg
get_memories_reg(JitFrame *frame);
JitReg
get_memory_inst_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_memory_data_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_memory_data_end_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_1byte_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_2bytes_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_4bytes_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_8bytes_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_mem_bound_check_16bytes_reg(JitFrame *frame, uint32 mem_idx);
JitReg
get_tables_reg(JitFrame *frame);
JitReg
get_table_inst_reg(JitFrame *frame, uint32 table_idx);
JitReg
get_table_data_reg(JitFrame *frame, uint32 table_idx);
JitReg
get_table_cur_size_reg(JitFrame *frame, uint32 table_idx);
void
clear_fixed_virtual_regs(JitFrame *frame);
void
clear_memory_regs(JitFrame *frame);
void
clear_table_regs(JitFrame *frame);
/**
* Get the offset from frame pointer to the n-th local variable slot.
*
* @param n the index to the local variable array
*
* @return the offset from frame pointer to the local variable slot
*/
static inline unsigned
offset_of_local(unsigned n)
{
return offsetof(WASMInterpFrame, lp) + n * 4;
}
/**
* Generate instruction to load an integer from the frame.
*
* This and the below gen_load_X functions generate instructions to
* load values from the frame into registers if the values have not
* been loaded yet.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_i32(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a i64 integer from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_i64(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a floating point value from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_f32(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a double value from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_f64(JitFrame *frame, unsigned n);
/**
* Generate instructions to commit computation result to the frame.
* The general principle is to only commit values that will be used
* through the frame.
*
* @param frame the frame information
* @param begin the begin value slot to commit
* @param end the end value slot to commit
*/
void
gen_commit_values(JitFrame *frame, JitValueSlot *begin, JitValueSlot *end);
/**
* Generate instructions to commit SP and IP pointers to the frame.
*
* @param frame the frame information
*/
void
gen_commit_sp_ip(JitFrame *frame);
/**
* Generate commit instructions for the block end.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_branch(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->sp);
}
/**
* Generate commit instructions for exception checks.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_exception(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->lp + frame->max_locals);
gen_commit_sp_ip(frame);
}
/**
* Generate commit instructions to commit all status.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_all(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->sp);
gen_commit_sp_ip(frame);
}
static inline void
clear_values(JitFrame *frame)
{
size_t total_size =
sizeof(JitValueSlot) * (frame->max_locals + frame->max_stacks);
memset(frame->lp, 0, total_size);
frame->committed_sp = NULL;
frame->committed_ip = NULL;
clear_fixed_virtual_regs(frame);
}
static inline void
push_i32(JitFrame *frame, JitReg value)
{
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
}
static inline void
push_i64(JitFrame *frame, JitReg value)
{
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
}
static inline void
push_f32(JitFrame *frame, JitReg value)
{
push_i32(frame, value);
}
static inline void
push_f64(JitFrame *frame, JitReg value)
{
push_i64(frame, value);
}
static inline JitReg
pop_i32(JitFrame *frame)
{
frame->sp--;
return gen_load_i32(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_i64(JitFrame *frame)
{
frame->sp -= 2;
return gen_load_i64(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_f32(JitFrame *frame)
{
frame->sp--;
return gen_load_f32(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_f64(JitFrame *frame)
{
frame->sp -= 2;
return gen_load_f64(frame, frame->sp - frame->lp);
}
static inline void
pop(JitFrame *frame, int n)
{
frame->sp -= n;
memset(frame->sp, 0, n * sizeof(*frame->sp));
}
static inline JitReg
local_i32(JitFrame *frame, int n)
{
return gen_load_i32(frame, n);
}
static inline JitReg
local_i64(JitFrame *frame, int n)
{
return gen_load_i64(frame, n);
}
static inline JitReg
local_f32(JitFrame *frame, int n)
{
return gen_load_f32(frame, n);
}
static inline JitReg
local_f64(JitFrame *frame, int n)
{
return gen_load_f64(frame, n);
}
static void
set_local_i32(JitFrame *frame, int n, JitReg val)
{
frame->lp[n].reg = val;
frame->lp[n].dirty = 1;
}
static void
set_local_i64(JitFrame *frame, int n, JitReg val)
{
frame->lp[n].reg = val;
frame->lp[n].dirty = 1;
frame->lp[n + 1].reg = val;
frame->lp[n + 1].dirty = 1;
}
static inline void
set_local_f32(JitFrame *frame, int n, JitReg val)
{
set_local_i32(frame, n, val);
}
static inline void
set_local_f64(JitFrame *frame, int n, JitReg val)
{
set_local_i64(frame, n, val);
}
#define POP(jit_value, value_type) \
do { \
if (!jit_cc_pop_value(cc, value_type, &jit_value)) \
goto fail; \
} while (0)
#define POP_I32(v) POP(v, VALUE_TYPE_I32)
#define POP_I64(v) POP(v, VALUE_TYPE_I64)
#define POP_F32(v) POP(v, VALUE_TYPE_F32)
#define POP_F64(v) POP(v, VALUE_TYPE_F64)
#define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF)
#define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF)
#define PUSH(jit_value, value_type) \
do { \
if (!jit_cc_push_value(cc, value_type, jit_value)) \
goto fail; \
} while (0)
#define PUSH_I32(v) PUSH(v, VALUE_TYPE_I32)
#define PUSH_I64(v) PUSH(v, VALUE_TYPE_I64)
#define PUSH_F32(v) PUSH(v, VALUE_TYPE_F32)
#define PUSH_F64(v) PUSH(v, VALUE_TYPE_F64)
#define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF)
#define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF)
#endif

1403
core/iwasm/fast-jit/jit_ir.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,302 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/**
* @file jit-ir.def
*
* @brief Definition of JIT IR instructions and annotations.
*/
/**
* @def INSN (NAME, OPND_KIND, OPND_NUM, FIRST_USE)
*
* Definition of IR instructions
*
* @param NAME name of the opcode
* @param OPND_KIND kind of the operand(s)
* @param OPND_NUM number of the operand(s)
* @param FIRST_USE index of the first use register
*
* @p OPND_KIND and @p OPND_NUM together determine the format of an
* instruction. There are four kinds of formats:
*
* 1) Reg: fixed-number register operands, @p OPND_NUM specifies the
* number of operands;
*
* 2) VReg: variable-number register operands, @p OPND_NUM specifies
* the number of fixed register operands;
*
* 3) TableSwitch: tableswitch instruction's format, @p OPND_NUM must
* be 1;
*
* 4) LookupSwitch: lookupswitch instruction's format, @p OPND_NUM
* must be 1.
*
* Instruction operands are all registers and they are organized in an
* order that all registers defined by the instruction, if any, appear
* before the registers used by the instruction. The @p FIRST_USE is
* the index of the first use register in the register vector sorted
* in this order. Use @c jit_insn_opnd_regs to get the register
* vector in this order and use @c jit_insn_opnd_first_use to get the
* index of the first use register.
*
* Every instruction with name @p NAME has the following definitions:
*
* @c JEFF_OP_NAME: the enum opcode of insn NAME
* @c jit_insn_new_NAME (...): creates a new instance of insn NAME
*
* An instruction is deleted by function:
*
* @c jit_insn_delete (@p insn)
*
* In the scope of this IR's terminology, operand and argument have
* different meanings. The operand is a general notation, which
* denotes every raw operand of an instruction, while the argument
* only denotes the variable part of operands of instructions of VReg
* kind. For example, a VReg instruction phi node "r0 = phi(r1, r2)"
* has three operands opnd[0]: r0, opnd[1]: r1 and opnd[2]: r2, but
* only two arguments arg[0]: r1 and arg[1]: r2. Operands or
* arguments of instructions with various formats can be access
* through the following APIs:
*
* @c jit_insn_opnd (@p insn, @p n): for Reg_N formats
* @c jit_insn_opndv (@p insn, @p n): for VReg_N formats
* @c jit_insn_opndv_num (@p insn): for VReg_N formats
* @c jit_insn_opndts (@p insn): for TableSwitch_1 format
* @c jit_insn_opndls (@p insn): for LookupSwitch_1 format
*/
#ifndef INSN
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE)
#endif
/* Move and conversion instructions that transfer values among
registers of the same kind (move) or different kinds (convert) */
INSN(MOV, Reg, 2, 1)
INSN(PHI, VReg, 1, 1)
/* conversion. will extend or truncate */
INSN(I8TOI32, Reg, 2, 1)
INSN(I8TOI64, Reg, 2, 1)
INSN(I16TOI32, Reg, 2, 1)
INSN(I16TOI64, Reg, 2, 1)
INSN(I32TOI8, Reg, 2, 1)
INSN(I32TOU8, Reg, 2, 1)
INSN(I32TOI16, Reg, 2, 1)
INSN(I32TOU16, Reg, 2, 1)
INSN(I32TOI64, Reg, 2, 1)
INSN(I32TOF32, Reg, 2, 1)
INSN(I32TOF64, Reg, 2, 1)
INSN(U32TOI64, Reg, 2, 1)
INSN(U32TOF32, Reg, 2, 1)
INSN(U32TOF64, Reg, 2, 1)
INSN(I64TOI8, Reg, 2, 1)
INSN(I64TOI16, Reg, 2, 1)
INSN(I64TOI32, Reg, 2, 1)
INSN(I64TOF32, Reg, 2, 1)
INSN(I64TOF64, Reg, 2, 1)
INSN(F32TOI32, Reg, 2, 1)
INSN(F32TOI64, Reg, 2, 1)
INSN(F32TOF64, Reg, 2, 1)
INSN(F32TOU32, Reg, 2, 1)
INSN(F64TOI32, Reg, 2, 1)
INSN(F64TOI64, Reg, 2, 1)
INSN(F64TOF32, Reg, 2, 1)
INSN(F64TOU32, Reg, 2, 1)
/**
* Re-interpret binary presentations:
* *(i32 *)&f32, *(i64 *)&f64, *(f32 *)&i32, *(f64 *)&i64
*/
INSN(I32CASTF32, Reg, 2, 1)
INSN(I64CASTF64, Reg, 2, 1)
INSN(F32CASTI32, Reg, 2, 1)
INSN(F64CASTI64, Reg, 2, 1)
/* Arithmetic and bitwise instructions: */
INSN(NEG, Reg, 2, 1)
INSN(NOT, Reg, 2, 1)
INSN(ADD, Reg, 3, 1)
INSN(SUB, Reg, 3, 1)
INSN(MUL, Reg, 3, 1)
INSN(DIV_S, Reg, 3, 1)
INSN(REM_S, Reg, 3, 1)
INSN(DIV_U, Reg, 3, 1)
INSN(REM_U, Reg, 3, 1)
INSN(SHL, Reg, 3, 1)
INSN(SHRS, Reg, 3, 1)
INSN(SHRU, Reg, 3, 1)
INSN(ROTL, Reg, 3, 1)
INSN(ROTR, Reg, 3, 1)
INSN(OR, Reg, 3, 1)
INSN(XOR, Reg, 3, 1)
INSN(AND, Reg, 3, 1)
INSN(CMP, Reg, 3, 1)
INSN(MAX, Reg, 3, 1)
INSN(MIN, Reg, 3, 1)
INSN(CLZ, Reg, 2, 1)
INSN(CTZ, Reg, 2, 1)
INSN(POPCNT, Reg, 2, 1)
/* Select instruction: */
INSN(SELECTEQ, Reg, 4, 1)
INSN(SELECTNE, Reg, 4, 1)
INSN(SELECTGTS, Reg, 4, 1)
INSN(SELECTGES, Reg, 4, 1)
INSN(SELECTLTS, Reg, 4, 1)
INSN(SELECTLES, Reg, 4, 1)
INSN(SELECTGTU, Reg, 4, 1)
INSN(SELECTGEU, Reg, 4, 1)
INSN(SELECTLTU, Reg, 4, 1)
INSN(SELECTLEU, Reg, 4, 1)
/* Memory access instructions: */
INSN(LDEXECENV, Reg, 1, 1)
INSN(LDJITINFO, Reg, 1, 1)
INSN(LDI8, Reg, 3, 1)
INSN(LDU8, Reg, 3, 1)
INSN(LDI16, Reg, 3, 1)
INSN(LDU16, Reg, 3, 1)
INSN(LDI32, Reg, 3, 1)
INSN(LDU32, Reg, 3, 1)
INSN(LDI64, Reg, 3, 1)
INSN(LDU64, Reg, 3, 1)
INSN(LDF32, Reg, 3, 1)
INSN(LDF64, Reg, 3, 1)
INSN(LDPTR, Reg, 3, 1)
INSN(LDV64, Reg, 3, 1)
INSN(LDV128, Reg, 3, 1)
INSN(LDV256, Reg, 3, 1)
INSN(STI8, Reg, 3, 0)
INSN(STI16, Reg, 3, 0)
INSN(STI32, Reg, 3, 0)
INSN(STI64, Reg, 3, 0)
INSN(STF32, Reg, 3, 0)
INSN(STF64, Reg, 3, 0)
INSN(STPTR, Reg, 3, 0)
INSN(STV64, Reg, 3, 1)
INSN(STV128, Reg, 3, 1)
INSN(STV256, Reg, 3, 1)
/* Control instructions */
INSN(JMP, Reg, 1, 0)
INSN(BEQ, Reg, 3, 0)
INSN(BNE, Reg, 3, 0)
INSN(BGTS, Reg, 3, 0)
INSN(BGES, Reg, 3, 0)
INSN(BLTS, Reg, 3, 0)
INSN(BLES, Reg, 3, 0)
INSN(BGTU, Reg, 3, 0)
INSN(BGEU, Reg, 3, 0)
INSN(BLTU, Reg, 3, 0)
INSN(BLEU, Reg, 3, 0)
INSN(LOOKUPSWITCH, LookupSwitch, 1, 0)
/* Call and return instructions */
INSN(CALLNATIVE, VReg, 2, 1)
INSN(CALLBC, Reg, 3, 2)
INSN(RETURNBC, Reg, 3, 0)
INSN(RETURN, Reg, 1, 0)
#undef INSN
/**
* @def ANN_LABEL (TYPE, NAME)
*
* Definition of label annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_annl_NAME (cc, label): accesses the annotation NAME of
* label @p label
* @c jit_annl_enable_NAME (cc): enables the annotation NAME
* @c jit_annl_disable_NAME (cc): disables the annotation NAME
* @c jit_annl_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_LABEL
#define ANN_LABEL(TYPE, NAME)
#endif
/* Basic Block of a label. */
ANN_LABEL(JitBasicBlock *, basic_block)
/* Predecessor number of the block that is only used in
jit_cc_update_cfg for updating the CFG. */
ANN_LABEL(uint16, pred_num)
/* Execution frequency of a block. We can split critical edges with
empty blocks so we don't need to store frequencies of edges. */
ANN_LABEL(uint16, freq)
/* Begin bytecode instruction pointer of the block. */
ANN_LABEL(uint8 *, begin_bcip)
/* End bytecode instruction pointer of the block. */
ANN_LABEL(uint8 *, end_bcip)
/* Stack pointer offset at the end of the block. */
ANN_LABEL(uint16, end_sp)
/* The label of the next physically adjacent block. */
ANN_LABEL(JitReg, next_label)
/* Compiled code address of the block. */
ANN_LABEL(void *, jitted_addr)
#undef ANN_LABEL
/**
* @def ANN_INSN (TYPE, NAME)
*
* Definition of instruction annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_anni_NAME (cc, insn): accesses the annotation NAME of
* instruction @p insn
* @c jit_anni_enable_NAME (cc): enables the annotation NAME
* @c jit_anni_disable_NAME (cc): disables the annotation NAME
* @c jit_anni_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_INSN
#define ANN_INSN(TYPE, NAME)
#endif
/* A private annotation for linking instructions with the same hash
value, which is only used by the compilation context's hash table
of instructions. */
ANN_INSN(JitInsn *, _hash_link)
#undef ANN_INSN
/**
* @def ANN_REG (TYPE, NAME)
*
* Definition of register annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_annr_NAME (cc, reg): accesses the annotation NAME of
* register @p reg
* @c jit_annr_enable_NAME (cc): enables the annotation NAME
* @c jit_annr_disable_NAME (cc): disables the annotation NAME
* @c jit_annr_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_REG
#define ANN_REG(TYPE, NAME)
#endif
/* Defining instruction of registers satisfying SSA property. */
ANN_REG(JitInsn *, def_insn)
#undef ANN_REG

1874
core/iwasm/fast-jit/jit_ir.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,840 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_utils.h"
#include "jit_compiler.h"
#if BH_DEBUG != 0
#define VREG_DEF_SANITIZER
#endif
/**
* A uint16 stack for storing distances of occurrences of virtual
* registers.
*/
typedef struct UintStack {
/* Capacity of the stack. */
uint32 capacity;
/* Top index of the stack. */
uint32 top;
/* Elements of the vector. */
uint32 elem[1];
} UintStack;
static bool
uint_stack_push(UintStack **stack, unsigned val)
{
unsigned capacity = *stack ? (*stack)->capacity : 0;
unsigned top = *stack ? (*stack)->top : 0;
bh_assert(top <= capacity);
if (top == capacity) {
const unsigned elem_size = sizeof((*stack)->elem[0]);
unsigned new_capacity = capacity ? capacity + capacity / 2 : 4;
UintStack *new_stack =
jit_malloc(offsetof(UintStack, elem) + elem_size * new_capacity);
if (!new_stack)
return false;
new_stack->capacity = new_capacity;
new_stack->top = top;
if (*stack)
memcpy(new_stack->elem, (*stack)->elem, elem_size * top);
jit_free(*stack);
*stack = new_stack;
}
(*stack)->elem[(*stack)->top++] = val;
return true;
}
static int
uint_stack_top(UintStack *stack)
{
return stack->elem[stack->top - 1];
}
static void
uint_stack_delete(UintStack **stack)
{
jit_free(*stack);
*stack = NULL;
}
static void
uint_stack_pop(UintStack **stack)
{
bh_assert((*stack)->top > 0);
/**
* TODO: the fact of empty distances stack means there is no instruction
* using current JitReg anymore. so shall we release the HardReg and clean
* VirtualReg information?
*/
if (--(*stack)->top == 0)
uint_stack_delete(stack);
}
/**
* Information of a virtual register.
*/
typedef struct VirtualReg {
/* The hard register allocated to this virtual register. */
JitReg hreg;
/* The spill slot allocated to this virtual register. */
JitReg slot;
/* The hard register allocated to global virtual registers. It is 0
for local registers, whose lifetime is within one basic block. */
JitReg global_hreg;
/* Distances from the beginning of basic block of all occurrences of the
virtual register in the basic block. */
UintStack *distances;
} VirtualReg;
/**
* Information of a hard register.
*/
typedef struct HardReg {
/* The virtual register this hard register is allocated to. */
JitReg vreg;
} HardReg;
/**
* Information of a spill slot.
*/
typedef struct SpillSlot {
/* The virtual register this spill slot is allocated to. */
JitReg vreg;
} SpillSlot;
typedef struct RegallocContext {
/* The compiler context. */
JitCompContext *cc;
/* Information of virtual registers. The register allocation must
not increase the virtual register number during the allocation
process. */
VirtualReg *vregs[JIT_REG_KIND_L32];
/* Information of hard registers. */
HardReg *hregs[JIT_REG_KIND_L32];
/* Number of elements in the spill_slots array. */
uint32 spill_slot_num;
/* Information of spill slots. */
SpillSlot *spill_slots;
/* The last define-released hard register. */
JitReg last_def_released_hreg;
} RegallocContext;
/**
* Get the VirtualReg structure of the given virtual register.
*
* @param rc the regalloc context
* @param vreg the virtual register
*
* @return the VirtualReg structure of the given virtual register
*/
static VirtualReg *
rc_get_vr(RegallocContext *rc, JitReg vreg)
{
unsigned kind = jit_reg_kind(vreg);
unsigned no = jit_reg_no(vreg);
bh_assert(jit_reg_is_variable(vreg));
return &rc->vregs[kind][no];
}
/**
* Get the HardReg structure of the given hard register.
*
* @param rc the regalloc context
* @param hreg the hard register
*
* @return the HardReg structure of the given hard register
*/
static HardReg *
rc_get_hr(RegallocContext *rc, JitReg hreg)
{
unsigned kind = jit_reg_kind(hreg);
unsigned no = jit_reg_no(hreg);
bh_assert(jit_reg_is_variable(hreg) && jit_cc_is_hreg(rc->cc, hreg));
return &rc->hregs[kind][no];
}
/**
* Get the SpillSlot structure of the given slot.
*
* @param rc the regalloc context
* @param slot the constant register representing the slot index
*
* @return the SpillSlot of the given slot
*/
static SpillSlot *
rc_get_spill_slot(RegallocContext *rc, JitReg slot)
{
unsigned index = jit_cc_get_const_I32(rc->cc, slot);
bh_assert(index < rc->spill_slot_num);
return &rc->spill_slots[index];
}
/**
* Get the stride in the spill slots of the register.
*
* @param reg a virtual register
*
* @return stride in the spill slots
*/
static unsigned
get_reg_stride(JitReg reg)
{
static const uint8 strides[] = { 0, 1, 2, 1, 2, 2, 4, 8, 0 };
return strides[jit_reg_kind(reg)];
}
/**
* Allocate a spill slot for the given virtual register.
*
* @param rc the regalloc context
* @param vreg the virtual register
*
* @return the spill slot encoded in a consant register
*/
static JitReg
rc_alloc_spill_slot(RegallocContext *rc, JitReg vreg)
{
const unsigned stride = get_reg_stride(vreg);
unsigned mask, new_num, i, j;
SpillSlot *slots;
bh_assert(stride > 0);
for (i = 0; i < rc->spill_slot_num; i += stride)
for (j = i;; j++) {
if (j == i + stride)
/* Found a free slot for vreg. */
goto found;
if (rc->spill_slots[j].vreg)
break;
}
/* No free slot, increase the slot number. */
mask = stride - 1;
/* Align the slot index. */
i = (rc->spill_slot_num + mask) & ~mask;
new_num = i == 0 ? 32 : i + i / 2;
if (!(slots = jit_calloc(sizeof(*slots) * new_num)))
return 0;
if (rc->spill_slots)
memcpy(slots, rc->spill_slots, sizeof(*slots) * rc->spill_slot_num);
jit_free(rc->spill_slots);
rc->spill_slots = slots;
rc->spill_slot_num = new_num;
found:
/* Now, i is the first slot for vreg. */
if ((i + stride) * 4 > rc->cc->spill_cache_size)
/* No frame space for the spill area. */
return 0;
/* Allocate the slot(s) to vreg. */
for (j = i; j < i + stride; j++)
rc->spill_slots[j].vreg = vreg;
return jit_cc_new_const_I32(rc->cc, i);
}
/**
* Free a spill slot.
*
* @param rc the regalloc context
* @param slot_reg the constant register representing the slot index
*/
static void
rc_free_spill_slot(RegallocContext *rc, JitReg slot_reg)
{
if (slot_reg) {
SpillSlot *slot = rc_get_spill_slot(rc, slot_reg);
const JitReg vreg = slot->vreg;
const unsigned stride = get_reg_stride(vreg);
unsigned i;
for (i = 0; i < stride; i++)
slot[i].vreg = 0;
}
}
static void
rc_destroy(RegallocContext *rc)
{
unsigned i, j;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned vreg_num = jit_cc_reg_num(rc->cc, i);
if (rc->vregs[i])
for (j = 0; j < vreg_num; j++)
uint_stack_delete(&rc->vregs[i][j].distances);
jit_free(rc->vregs[i]);
jit_free(rc->hregs[i]);
}
jit_free(rc->spill_slots);
}
static bool
rc_init(RegallocContext *rc, JitCompContext *cc)
{
unsigned i, j;
memset(rc, 0, sizeof(*rc));
rc->cc = cc;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned vreg_num = jit_cc_reg_num(cc, i);
const unsigned hreg_num = jit_cc_hreg_num(cc, i);
if (vreg_num > 0
&& !(rc->vregs[i] = jit_calloc(sizeof(VirtualReg) * vreg_num)))
goto fail;
if (hreg_num > 0
&& !(rc->hregs[i] = jit_calloc(sizeof(HardReg) * hreg_num)))
goto fail;
/* Hard registers can only be allocated to themselves. */
for (j = 0; j < hreg_num; j++)
rc->vregs[i][j].global_hreg = jit_reg_new(i, j);
}
return true;
fail:
rc_destroy(rc);
return false;
}
/**
* Check whether the given register is an allocation candidate, which
* must be a variable register that is not fixed hard register.
*
* @param cc the compilation context
* @param reg the register
*
* @return true if the register is an allocation candidate
*/
static bool
is_alloc_candidate(JitCompContext *cc, JitReg reg)
{
return (jit_reg_is_variable(reg)
&& (!jit_cc_is_hreg(cc, reg) || !jit_cc_is_hreg_fixed(cc, reg)));
}
#ifdef VREG_DEF_SANITIZER
static void
check_vreg_definition(RegallocContext *rc, JitInsn *insn)
{
JitRegVec regvec = jit_insn_opnd_regs(insn);
JitReg *regp, reg_defined = 0;
unsigned i, first_use = jit_insn_opnd_first_use(insn);
/* check if there is the definition of an vr before its references */
JIT_REG_VEC_FOREACH(regvec, i, regp)
{
VirtualReg *vr = NULL;
if (!is_alloc_candidate(rc->cc, *regp))
continue;
/* a strong assumption that there is only one defined reg */
if (i < first_use) {
reg_defined = *regp;
continue;
}
/**
* both definition and references are in one instruction,
* like MOV i3, i3
*/
if (reg_defined == *regp)
continue;
vr = rc_get_vr(rc, *regp);
bh_assert(vr->distances);
}
}
#endif
/**
* Collect distances from the beginning of basic block of all occurrences of
* each virtual register.
*
* @param rc the regalloc context
* @param basic_block the basic block
*
* @return distance of the end instruction if succeeds, -1 otherwise
*/
static int
collect_distances(RegallocContext *rc, JitBasicBlock *basic_block)
{
JitInsn *insn;
int distance = 1;
JIT_FOREACH_INSN(basic_block, insn)
{
JitRegVec regvec = jit_insn_opnd_regs(insn);
unsigned i;
JitReg *regp;
#ifdef VREG_DEF_SANITIZER
check_vreg_definition(rc, insn);
#endif
/* NOTE: the distance may be pushed more than once if the
virtual register occurs multiple times in the
instruction. */
JIT_REG_VEC_FOREACH(regvec, i, regp)
if (is_alloc_candidate(rc->cc, *regp))
if (!uint_stack_push(&(rc_get_vr(rc, *regp))->distances, distance))
return -1;
/* Integer overflow check, normally it won't happen, but
we had better add the check here */
if (distance >= INT32_MAX)
return -1;
distance++;
}
return distance;
}
static JitReg
offset_of_spill_slot(JitCompContext *cc, JitReg slot)
{
return jit_cc_new_const_I32(cc, cc->spill_cache_offset
+ jit_cc_get_const_I32(cc, slot) * 4);
}
/**
* Reload the virtual register from memory. Reload instruction will
* be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register to be reloaded
* @param cur_insn the current instruction after which the reload
* insertion will be inserted
*
* @return the reload instruction if succeeds, NULL otherwise
*/
static JitInsn *
reload_vreg(RegallocContext *rc, JitReg vreg, JitInsn *cur_insn)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
HardReg *hr = rc_get_hr(rc, vr->hreg);
JitInsn *insn = NULL;
if (vreg == rc->cc->exec_env_reg)
/* Reload exec_env_reg with LDEXECENV. */
insn = jit_cc_new_insn(rc->cc, LDEXECENV, vr->hreg);
else
/* Allocate spill slot if not yet and reload from there. */
{
JitReg fp_reg = rc->cc->fp_reg, offset;
if (!vr->slot && !(vr->slot = rc_alloc_spill_slot(rc, vreg)))
/* Cannot allocte spill slot (due to OOM or frame size limit). */
return NULL;
offset = offset_of_spill_slot(rc->cc, vr->slot);
switch (jit_reg_kind(vreg)) {
case JIT_REG_KIND_I32:
insn = jit_cc_new_insn(rc->cc, LDI32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_I64:
insn = jit_cc_new_insn(rc->cc, LDI64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F32:
insn = jit_cc_new_insn(rc->cc, LDF32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F64:
insn = jit_cc_new_insn(rc->cc, LDF64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V64:
insn = jit_cc_new_insn(rc->cc, LDV64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V128:
insn =
jit_cc_new_insn(rc->cc, LDV128, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V256:
insn =
jit_cc_new_insn(rc->cc, LDV256, vr->hreg, fp_reg, offset);
break;
default:
bh_assert(0);
}
}
if (insn)
jit_insn_insert_after(cur_insn, insn);
bh_assert(hr->vreg == vreg);
hr->vreg = vr->hreg = 0;
return insn;
}
/**
* Spill the virtual register (which cannot be exec_env_reg) to memory.
* Spill instruction will be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register to be reloaded
* @param cur_insn the current instruction after which the reload
* insertion will be inserted
*
* @return the spill instruction if succeeds, NULL otherwise
*/
static JitInsn *
spill_vreg(RegallocContext *rc, JitReg vreg, JitInsn *cur_insn)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
JitReg fp_reg = rc->cc->fp_reg, offset;
JitInsn *insn;
/* There is no chance to spill exec_env_reg. */
bh_assert(vreg != rc->cc->exec_env_reg);
bh_assert(vr->hreg && vr->slot);
offset = offset_of_spill_slot(rc->cc, vr->slot);
switch (jit_reg_kind(vreg)) {
case JIT_REG_KIND_I32:
insn = jit_cc_new_insn(rc->cc, STI32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_I64:
insn = jit_cc_new_insn(rc->cc, STI64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F32:
insn = jit_cc_new_insn(rc->cc, STF32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F64:
insn = jit_cc_new_insn(rc->cc, STF64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V64:
insn = jit_cc_new_insn(rc->cc, STV64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V128:
insn = jit_cc_new_insn(rc->cc, STV128, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V256:
insn = jit_cc_new_insn(rc->cc, STV256, vr->hreg, fp_reg, offset);
break;
default:
bh_assert(0);
return NULL;
}
if (insn)
jit_insn_insert_after(cur_insn, insn);
return insn;
}
/**
* Allocate a hard register for the virtual register. Necessary
* reloade instruction will be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register
* @param insn the instruction after which the reload insertion will
* be inserted
* @param distance the distance of the current instruction
*
* @return the hard register allocated if succeeds, 0 otherwise
*/
static JitReg
allocate_hreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance)
{
const int kind = jit_reg_kind(vreg);
const HardReg *hregs = rc->hregs[kind];
const unsigned hreg_num = jit_cc_hreg_num(rc->cc, kind);
JitReg hreg, vreg_to_reload = 0;
int min_distance = distance, vr_distance;
VirtualReg *vr = rc_get_vr(rc, vreg);
unsigned i;
if (hreg_num == 0)
/* Unsupported hard register kind. */
{
jit_set_last_error(rc->cc, "unsupported hard register kind");
return 0;
}
if (vr->global_hreg)
/* It has globally allocated register, we can only use it. */
{
if ((vreg_to_reload = (rc_get_hr(rc, vr->global_hreg))->vreg))
if (!reload_vreg(rc, vreg_to_reload, insn))
return 0;
return vr->global_hreg;
}
/* Use the last define-released register if its kind is correct and
it's free so as to optimize for two-operand instructions. */
if (jit_reg_kind(rc->last_def_released_hreg) == kind
&& (rc_get_hr(rc, rc->last_def_released_hreg))->vreg == 0)
return rc->last_def_released_hreg;
/* No hint given, just try to pick any free register. */
for (i = 0; i < hreg_num; i++) {
hreg = jit_reg_new(kind, i);
if (jit_cc_is_hreg_fixed(rc->cc, hreg))
continue;
if (hregs[i].vreg == 0)
/* Found a free one, return it. */
return hreg;
}
/* No free registers, need to spill and reload one. */
for (i = 0; i < hreg_num; i++) {
if (jit_cc_is_hreg_fixed(rc->cc, jit_reg_new(kind, i)))
continue;
vr = rc_get_vr(rc, hregs[i].vreg);
/* TODO: since the hregs[i] is in use, its distances should be valid */
vr_distance = vr->distances ? uint_stack_top(vr->distances) : 0;
if (vr_distance < min_distance) {
min_distance = vr_distance;
vreg_to_reload = hregs[i].vreg;
hreg = jit_reg_new(kind, i);
}
}
bh_assert(min_distance < distance);
if (!reload_vreg(rc, vreg_to_reload, insn))
return 0;
return hreg;
}
/**
* Allocate a hard register for the virtual register if not allocated
* yet. Necessary spill and reloade instructions will be inserted
* before/after and after the given instruction. This operation will
* convert the virtual register's state from 1 or 3 to 2.
*
* @param rc the regalloc context
* @param vreg the virtual register
* @param insn the instruction after which the spill and reload
* insertions will be inserted
* @param distance the distance of the current instruction
*
* @return the hard register allocated to the virtual register if
* succeeds, 0 otherwise
*/
static JitReg
allocate_for_vreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
if (vr->hreg)
/* It has had a hard register, reuse it. */
return vr->hreg;
/* Not allocated yet. */
if ((vr->hreg = allocate_hreg(rc, vreg, insn, distance)))
(rc_get_hr(rc, vr->hreg))->vreg = vreg;
return vr->hreg;
}
/**
* Clobber live registers.
*
* @param rc the regalloc context
* @param is_native whether it's native ABI or JITed ABI
* @param insn the instruction after which the reload insertion will
* be inserted
*
* @return true if succeeds, false otherwise
*/
static bool
clobber_live_regs(RegallocContext *rc, bool is_native, JitInsn *insn)
{
unsigned i, j;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned hreg_num = jit_cc_hreg_num(rc->cc, i);
for (j = 0; j < hreg_num; j++) {
JitReg hreg = jit_reg_new(i, j);
bool caller_saved =
(is_native ? jit_cc_is_hreg_caller_saved_native(rc->cc, hreg)
: jit_cc_is_hreg_caller_saved_jitted(rc->cc, hreg));
if (caller_saved && rc->hregs[i][j].vreg)
if (!reload_vreg(rc, rc->hregs[i][j].vreg, insn))
return false;
}
}
return true;
}
/**
* Do local register allocation for the given basic block
*
* @param rc the regalloc context
* @param basic_block the basic block
* @param distance the distance of the last instruction of the basic block
*
* @return true if succeeds, false otherwise
*/
static bool
allocate_for_basic_block(RegallocContext *rc, JitBasicBlock *basic_block,
int distance)
{
JitInsn *insn;
JIT_FOREACH_INSN_REVERSE(basic_block, insn)
{
JitRegVec regvec = jit_insn_opnd_regs(insn);
unsigned first_use = jit_insn_opnd_first_use(insn);
unsigned i;
JitReg *regp;
distance--;
JIT_REG_VEC_FOREACH_DEF(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
const JitReg vreg = *regp;
VirtualReg *vr = rc_get_vr(rc, vreg);
if (!(*regp = allocate_for_vreg(rc, vreg, insn, distance)))
return false;
/* Spill the register if required. */
if (vr->slot && !spill_vreg(rc, vreg, insn))
return false;
bh_assert(uint_stack_top(vr->distances) == distance);
uint_stack_pop(&vr->distances);
/* Record the define-released hard register. */
rc->last_def_released_hreg = vr->hreg;
/* Release the hreg and spill slot. */
rc_free_spill_slot(rc, vr->slot);
(rc_get_hr(rc, vr->hreg))->vreg = 0;
vr->hreg = vr->slot = 0;
}
if (insn->opcode == JIT_OP_CALLBC) {
if (!clobber_live_regs(rc, false, insn))
return false;
/* The exec_env_reg is implicitly used by the callee. */
if (!allocate_for_vreg(rc, rc->cc->exec_env_reg, insn, distance))
return false;
}
else if (insn->opcode == JIT_OP_CALLNATIVE) {
if (!clobber_live_regs(rc, true, insn))
return false;
}
JIT_REG_VEC_FOREACH_USE(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
if (!allocate_for_vreg(rc, *regp, insn, distance))
return false;
}
JIT_REG_VEC_FOREACH_USE(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
VirtualReg *vr = rc_get_vr(rc, *regp);
bh_assert(uint_stack_top(vr->distances) == distance);
uint_stack_pop(&vr->distances);
/* be sure that the hreg exists and hasn't been spilled out */
bh_assert(vr->hreg != 0);
*regp = vr->hreg;
}
}
return true;
}
bool
jit_pass_regalloc(JitCompContext *cc)
{
RegallocContext rc = { 0 };
unsigned label_index, end_label_index;
JitBasicBlock *basic_block;
VirtualReg *self_vr;
bool retval = false;
if (!rc_init(&rc, cc))
return false;
/* NOTE: don't allocate new virtual registers during allocation
because the rc->vregs array is fixed size. */
/* TODO: allocate hard registers for global virtual registers here.
Currently, exec_env_reg is the only global virtual register. */
self_vr = rc_get_vr(&rc, cc->exec_env_reg);
JIT_FOREACH_BLOCK_ENTRY_EXIT(cc, label_index, end_label_index, basic_block)
{
int distance;
/* TODO: initialize hreg for live-out registers. */
self_vr->hreg = self_vr->global_hreg;
(rc_get_hr(&rc, cc->exec_env_reg))->vreg = cc->exec_env_reg;
/**
* TODO: the allocation of a basic block keeps using vregs[]
* and hregs[] from previous basic block
*/
if ((distance = collect_distances(&rc, basic_block)) < 0)
goto cleanup_and_return;
if (!allocate_for_basic_block(&rc, basic_block, distance))
goto cleanup_and_return;
/* TODO: generate necessary spills for live-in registers. */
}
retval = true;
cleanup_and_return:
rc_destroy(&rc);
return retval;
}

View File

@ -0,0 +1,19 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_utils.h"
JitBitmap *
jit_bitmap_new(uintptr_t begin_index, unsigned bitnum)
{
JitBitmap *bitmap;
if ((bitmap = jit_calloc(offsetof(JitBitmap, map) + (bitnum + 7) / 8))) {
bitmap->begin_index = begin_index;
bitmap->end_index = begin_index + bitnum;
}
return bitmap;
}

View File

@ -0,0 +1,136 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_UTILS_H_
#define _JIT_UTILS_H_
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* A simple fixed size bitmap.
*/
typedef struct JitBitmap {
/* The first valid bit index. */
uintptr_t begin_index;
/* The last valid bit index plus one. */
uintptr_t end_index;
/* The bitmap. */
uint8 map[1];
} JitBitmap;
static inline void *
jit_malloc(unsigned int size)
{
return wasm_runtime_malloc(size);
}
static inline void *
jit_calloc(unsigned int size)
{
void *ret = wasm_runtime_malloc(size);
if (ret) {
memset(ret, 0, size);
}
return ret;
}
static inline void
jit_free(void *ptr)
{
if (ptr)
wasm_runtime_free(ptr);
}
/**
* Create a new bitmap.
*
* @param begin_index the first valid bit index
* @param bitnum maximal bit number of the bitmap.
*
* @return the new bitmap if succeeds, NULL otherwise.
*/
JitBitmap *
jit_bitmap_new(uintptr_t begin_index, unsigned bitnum);
/**
* Delete a bitmap.
*
* @param bitmap the bitmap to be deleted
*/
static inline void
jit_bitmap_delete(JitBitmap *bitmap)
{
jit_free(bitmap);
}
/**
* Check whether the given index is in the range of the bitmap.
*
* @param bitmap the bitmap
* @param n the bit index
*
* @return true if the index is in range, false otherwise
*/
static inline bool
jit_bitmap_is_in_range(JitBitmap *bitmap, unsigned n)
{
return n >= bitmap->begin_index && n < bitmap->end_index;
}
/**
* Get a bit in the bitmap
*
* @param bitmap the bitmap
* @param n the n-th bit to be get
*
* @return value of the bit
*/
static inline int
jit_bitmap_get_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
return (bitmap->map[idx / 8] >> (idx % 8)) & 1;
}
/**
* Set a bit in the bitmap.
*
* @param bitmap the bitmap
* @param n the n-th bit to be set
*/
static inline void
jit_bitmap_set_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
bitmap->map[idx / 8] |= 1 << (idx % 8);
}
/**
* Clear a bit in the bitmap.
*
* @param bitmap the bitmap
* @param n the n-th bit to be cleared
*/
static inline void
jit_bitmap_clear_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
bitmap->map[idx / 8] &= ~(1 << (idx % 8));
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -139,6 +139,9 @@ typedef struct RuntimeInitArgs {
char ip_addr[128];
int platform_port;
int instance_port;
/* Fast JIT code cache size */
uint32_t fast_jit_code_cache_size;
} RuntimeInitArgs;
#ifndef WASM_VALKIND_T_DEFINED

View File

@ -204,6 +204,10 @@ typedef struct WASMGlobalImport {
WASMModule *import_module;
WASMGlobal *import_global_linked;
#endif
#if WASM_ENABLE_FAST_JIT != 0
/* The data offset of current global in global data */
uint32 data_offset;
#endif
} WASMGlobalImport;
typedef struct WASMImport {
@ -254,12 +258,19 @@ struct WASMFunction {
uint8 *consts;
uint32 const_cell_num;
#endif
#if WASM_ENABLE_FAST_JIT != 0
void *fast_jit_jitted_code;
#endif
};
struct WASMGlobal {
uint8 type;
bool is_mutable;
InitializerExpression init_expr;
#if WASM_ENABLE_FAST_JIT != 0
/* The data offset of current global in global data */
uint32 data_offset;
#endif
};
typedef struct WASMExport {
@ -443,9 +454,12 @@ struct WASMModule {
#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0
bh_list fast_opcode_list;
uint8 *buf_code;
uint64 buf_code_size;
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0 \
|| WASM_ENABLE_FAST_JIT != 0
uint8 *load_addr;
uint64 load_size;
uint64 buf_code_size;
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0
@ -470,6 +484,11 @@ struct WASMModule {
#if WASM_ENABLE_LOAD_CUSTOM_SECTION != 0
WASMCustomSection *custom_section_list;
#endif
#if WASM_ENABLE_FAST_JIT != 0
/* point to JITed functions */
void **fast_jit_func_ptrs;
#endif
};
typedef struct BlockType {
@ -612,6 +631,7 @@ wasm_get_smallest_type_idx(WASMType **types, uint32 type_count,
if (wasm_type_equal(types[cur_type_idx], types[i]))
return i;
}
(void)type_count;
return cur_type_idx;
}

View File

@ -26,6 +26,10 @@ typedef struct WASMInterpFrame {
/* Instruction pointer of the bytecode array. */
uint8 *ip;
#if WASM_ENABLE_FAST_JIT != 0
uint8 *jitted_return_addr;
#endif
#if WASM_ENABLE_PERF_PROFILING != 0
uint64 time_started;
#endif
@ -47,12 +51,13 @@ typedef struct WASMInterpFrame {
WASMBranchBlock *csp_boundary;
WASMBranchBlock *csp;
/* Frame data, the layout is:
lp: param_cell_count + local_cell_count
sp_bottom to sp_boundary: stack of data
csp_bottom to csp_boundary: stack of block
ref to frame end: data types of local vairables and stack data
*/
/**
* Frame data, the layout is:
* lp: parameters and local variables
* sp_bottom to sp_boundary: wasm operand stack
* csp_bottom to csp_boundary: wasm label stack
* jit spill cache: only available for fast jit
*/
uint32 lp[1];
#endif
} WASMInterpFrame;

View File

@ -16,6 +16,9 @@
#include "../libraries/thread-mgr/thread_manager.h"
#include "../libraries/debug-engine/debug_engine.h"
#endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#endif
typedef int32 CellType_I32;
typedef int64 CellType_I64;
@ -855,6 +858,20 @@ wasm_interp_call_func_native(WASMModuleInstance *module_inst,
wasm_exec_env_set_cur_frame(exec_env, prev_frame);
}
#if WASM_ENABLE_FAST_JIT != 0
bool
jit_invoke_native(WASMExecEnv *exec_env, uint32 func_idx,
WASMInterpFrame *prev_frame)
{
WASMModuleInstance *module_inst =
(WASMModuleInstance *)exec_env->module_inst;
WASMFunctionInstance *cur_func = module_inst->functions + func_idx;
wasm_interp_call_func_native(module_inst, exec_env, cur_func, prev_frame);
return wasm_get_exception(module_inst) ? false : true;
}
#endif
#if WASM_ENABLE_MULTI_MODULE != 0
static void
wasm_interp_call_func_bytecode(WASMModuleInstance *module,
@ -3897,7 +3914,56 @@ wasm_interp_call_wasm(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
}
}
else {
#if WASM_ENABLE_FAST_JIT == 0
wasm_interp_call_func_bytecode(module_inst, exec_env, function, frame);
#else
JitGlobals *jit_globals = jit_compiler_get_jit_globals();
JitInterpSwitchInfo info;
WASMType *func_type = function->u.func->func_type;
uint8 type = func_type->result_count
? func_type->types[func_type->param_count]
: VALUE_TYPE_VOID;
#if WASM_ENABLE_REF_TYPES != 0
if (type == VALUE_TYPE_EXTERNREF || type == VALUE_TYPE_FUNCREF)
type = VALUE_TYPE_I32;
#endif
info.out.ret.last_return_type = type;
info.frame = frame;
frame->jitted_return_addr =
(uint8 *)jit_globals->return_to_interp_from_jitted;
jit_interp_switch_to_jitted(exec_env, &info,
function->u.func->fast_jit_jitted_code);
if (func_type->result_count) {
switch (type) {
case VALUE_TYPE_I32:
*(frame->sp - function->ret_cell_num) =
info.out.ret.ival[0];
break;
case VALUE_TYPE_I64:
*(frame->sp - function->ret_cell_num) =
info.out.ret.ival[0];
*(frame->sp - function->ret_cell_num + 1) =
info.out.ret.ival[1];
break;
case VALUE_TYPE_F32:
*(frame->sp - function->ret_cell_num) =
info.out.ret.fval[0];
break;
case VALUE_TYPE_F64:
*(frame->sp - function->ret_cell_num) =
info.out.ret.fval[0];
*(frame->sp - function->ret_cell_num + 1) =
info.out.ret.fval[1];
break;
default:
bh_assert(0);
break;
}
}
(void)wasm_interp_call_func_bytecode;
#endif
}
/* Output the return value to the caller */

View File

@ -14,6 +14,10 @@
#if WASM_ENABLE_DEBUG_INTERP != 0
#include "../libraries/debug-engine/debug_engine.h"
#endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#include "../fast-jit/jit_codecache.h"
#endif
/* Read a value of given type from the address pointed to by the given
pointer and increase the pointer to the position just after the
@ -2890,6 +2894,28 @@ fail:
return false;
}
#if WASM_ENABLE_FAST_JIT != 0
static void
calculate_global_data_offset(WASMModule *module)
{
uint32 i, data_offset;
data_offset = 0;
for (i = 0; i < module->import_global_count; i++) {
WASMGlobalImport *import_global =
&((module->import_globals + i)->u.global);
import_global->data_offset = data_offset;
data_offset += wasm_value_type_size(import_global->type);
}
for (i = 0; i < module->global_count; i++) {
WASMGlobal *global = module->globals + i;
global->data_offset = data_offset;
data_offset += wasm_value_type_size(global->type);
}
}
#endif
static bool
wasm_loader_prepare_bytecode(WASMModule *module, WASMFunction *func,
uint32 cur_func_idx, char *error_buf,
@ -3277,6 +3303,21 @@ load_from_sections(WASMModule *module, WASMSection *sections,
#endif
}
#if WASM_ENABLE_FAST_JIT != 0
calculate_global_data_offset(module);
if (module->function_count
&& !(module->fast_jit_func_ptrs =
loader_malloc(sizeof(void *) * module->function_count,
error_buf, error_buf_size))) {
return false;
}
if (!jit_compiler_compile_all(module)) {
set_error_buf(error_buf, error_buf_size, "fast jit compilation failed");
return false;
}
#endif
#if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_module_mem_consumption((WASMModuleCommon *)module);
#endif
@ -3652,7 +3693,7 @@ wasm_loader_load(uint8 *buf, uint32 size,
return NULL;
}
#if WASM_ENABLE_DEBUG_INTERP != 0
#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_FAST_JIT != 0
module->load_addr = (uint8 *)buf;
module->load_size = size;
#endif
@ -3800,6 +3841,16 @@ wasm_loader_unload(WASMModule *module)
wasm_runtime_destroy_custom_sections(module->custom_section_list);
#endif
#if WASM_ENABLE_FAST_JIT != 0
if (module->fast_jit_func_ptrs) {
for (i = 0; i < module->function_count; i++) {
if (module->fast_jit_func_ptrs[i])
jit_code_cache_free(module->fast_jit_func_ptrs[i]);
}
wasm_runtime_free(module->fast_jit_func_ptrs);
}
#endif
wasm_runtime_free(module);
}
@ -7584,7 +7635,7 @@ re_scan:
PUSH_OFFSET_TYPE(local_type);
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_DEBUG_INTERP == 0)
&& (WASM_ENABLE_FAST_JIT == 0) && (WASM_ENABLE_DEBUG_INTERP == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_GET_LOCAL_FAST;
if (is_32bit_type(local_type)) {
@ -7648,7 +7699,7 @@ re_scan:
}
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_DEBUG_INTERP == 0)
&& (WASM_ENABLE_FAST_JIT == 0) && (WASM_ENABLE_DEBUG_INTERP == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_SET_LOCAL_FAST;
if (is_32bit_type(local_type)) {
@ -7708,7 +7759,7 @@ re_scan:
- wasm_value_type_cell_num(local_type)));
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_DEBUG_INTERP == 0)
&& (WASM_ENABLE_FAST_JIT == 0) && (WASM_ENABLE_DEBUG_INTERP == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_TEE_LOCAL_FAST;
if (is_32bit_type(local_type)) {

View File

@ -11,6 +11,10 @@
#include "wasm_runtime.h"
#include "../common/wasm_native.h"
#include "../common/wasm_memory.h"
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#include "../fast-jit/jit_codecache.h"
#endif
/* Read a value of given type from the address pointed to by the given
pointer and increase the pointer to the position just after the
@ -2139,6 +2143,18 @@ load_from_sections(WASMModule *module, WASMSection *sections,
}
}
#if WASM_ENABLE_FAST_JIT != 0
if (!(module->fast_jit_func_ptrs =
loader_malloc(sizeof(void *) * module->function_count, error_buf,
error_buf_size))) {
return false;
}
if (!jit_compiler_compile_all(module)) {
set_error_buf(error_buf, error_buf_size, "fast jit compilation failed");
return false;
}
#endif
#if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_module_mem_consumption(module);
#endif
@ -2356,6 +2372,11 @@ wasm_loader_load(uint8 *buf, uint32 size, char *error_buf,
return NULL;
}
#if WASM_ENABLE_FAST_JIT != 0
module->load_addr = (uint8 *)buf;
module->load_size = size;
#endif
if (!load(buf, size, module, error_buf, error_buf_size)) {
goto fail;
}
@ -2453,6 +2474,16 @@ wasm_loader_unload(WASMModule *module)
}
#endif
#if WASM_ENABLE_FAST_JIT != 0
if (module->fast_jit_func_ptrs) {
for (i = 0; i < module->function_count; i++) {
if (module->fast_jit_func_ptrs[i])
jit_code_cache_free(module->fast_jit_func_ptrs[i]);
}
wasm_runtime_free(module->fast_jit_func_ptrs);
}
#endif
wasm_runtime_free(module);
}
@ -5778,7 +5809,8 @@ re_scan:
operand_offset = local_offset;
PUSH_OFFSET_TYPE(local_type);
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0)
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_GET_LOCAL_FAST;
if (is_32bit_type(local_type))
@ -5838,7 +5870,8 @@ re_scan:
POP_OFFSET_TYPE(local_type);
}
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0)
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_SET_LOCAL_FAST;
if (is_32bit_type(local_type))
@ -5894,7 +5927,8 @@ re_scan:
*(loader_ctx->frame_offset
- wasm_value_type_cell_num(local_type)));
#else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0)
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) {
*p_org++ = EXT_OP_TEE_LOCAL_FAST;
if (is_32bit_type(local_type))

View File

@ -56,7 +56,7 @@ wasm_load(uint8 *buf, uint32 size, char *error_buf, uint32 error_buf_size)
WASMModule *
wasm_load_from_sections(WASMSection *section_list, char *error_buf,
uint32_t error_buf_size)
uint32 error_buf_size)
{
return wasm_loader_load_from_sections(section_list, error_buf,
error_buf_size);
@ -349,6 +349,24 @@ memory_instantiate(WASMModuleInstance *module_inst, uint32 num_bytes_per_page,
}
}
#if WASM_ENABLE_FAST_JIT != 0
if (memory_data_size > 0) {
#if UINTPTR_MAX == UINT64_MAX
memory->mem_bound_check_1byte = memory_data_size - 1;
memory->mem_bound_check_2bytes = memory_data_size - 2;
memory->mem_bound_check_4bytes = memory_data_size - 4;
memory->mem_bound_check_8bytes = memory_data_size - 8;
memory->mem_bound_check_16bytes = memory_data_size - 16;
#else
memory->mem_bound_check_1byte = (uint32)memory_data_size - 1;
memory->mem_bound_check_2bytes = (uint32)memory_data_size - 2;
memory->mem_bound_check_4bytes = (uint32)memory_data_size - 4;
memory->mem_bound_check_8bytes = (uint32)memory_data_size - 8;
memory->mem_bound_check_16bytes = (uint32)memory_data_size - 16;
#endif
}
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
if (0 != os_mutex_init(&memory->mem_lock)) {
set_error_buf(error_buf, error_buf_size, "init mutex failed");
@ -693,6 +711,10 @@ functions_instantiate(const WASMModule *module, WASMModuleInstance *module_inst,
function++;
}
#if WASM_ENABLE_FAST_JIT != 0
module_inst->fast_jit_func_ptrs = module->fast_jit_func_ptrs;
#endif
bh_assert((uint32)(function - functions) == function_count);
(void)module_inst;
return functions;
@ -2470,6 +2492,22 @@ wasm_enlarge_memory(WASMModuleInstance *module, uint32 inc_page_count)
memory->memory_data_end =
memory->memory_data + memory->num_bytes_per_page * total_page_count;
#if WASM_ENABLE_FAST_JIT != 0
#if UINTPTR_MAX == UINT64_MAX
memory->mem_bound_check_1byte = total_size - 1;
memory->mem_bound_check_2bytes = total_size - 2;
memory->mem_bound_check_4bytes = total_size - 4;
memory->mem_bound_check_8bytes = total_size - 8;
memory->mem_bound_check_16bytes = total_size - 16;
#else
memory->mem_bound_check_1byte = (uint32)total_size - 1;
memory->mem_bound_check_2bytes = (uint32)total_size - 2;
memory->mem_bound_check_4bytes = (uint32)total_size - 4;
memory->mem_bound_check_8bytes = (uint32)total_size - 8;
memory->mem_bound_check_16bytes = (uint32)total_size - 16;
#endif
#endif
return ret;
}
#else
@ -2564,14 +2602,14 @@ wasm_enlarge_table(WASMModuleInstance *module_inst, uint32 table_idx,
}
#endif /* WASM_ENABLE_REF_TYPES != 0 */
bool
wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
uint32_t element_indices, uint32_t argc, uint32_t argv[])
static bool
call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 argc, uint32 argv[], bool check_type_idx, uint32 type_idx)
{
WASMModuleInstance *module_inst = NULL;
WASMTableInstance *table_inst = NULL;
uint32_t function_indices = 0;
WASMFunctionInstance *function_inst = NULL;
uint32 func_idx = 0;
WASMFunctionInstance *func_inst = NULL;
module_inst = (WASMModuleInstance *)exec_env->module_inst;
bh_assert(module_inst);
@ -2582,7 +2620,7 @@ wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
goto got_exception;
}
if (element_indices >= table_inst->cur_size) {
if (elem_idx >= table_inst->cur_size) {
wasm_set_exception(module_inst, "undefined element");
goto got_exception;
}
@ -2591,8 +2629,8 @@ wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
* please be aware that table_inst->base_addr may point
* to another module's table
**/
function_indices = ((uint32_t *)table_inst->base_addr)[element_indices];
if (function_indices == NULL_REF) {
func_idx = ((uint32 *)table_inst->base_addr)[elem_idx];
if (func_idx == NULL_REF) {
wasm_set_exception(module_inst, "uninitialized element");
goto got_exception;
}
@ -2600,14 +2638,29 @@ wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
/**
* we insist to call functions owned by the module itself
**/
if (function_indices >= module_inst->function_count) {
if (func_idx >= module_inst->function_count) {
wasm_set_exception(module_inst, "unknown function");
goto got_exception;
}
function_inst = module_inst->functions + function_indices;
func_inst = module_inst->functions + func_idx;
interp_call_wasm(module_inst, exec_env, function_inst, argc, argv);
if (check_type_idx) {
WASMType *cur_type = module_inst->module->types[type_idx];
WASMType *cur_func_type;
if (func_inst->is_import_func)
cur_func_type = func_inst->u.func_import->func_type;
else
cur_func_type = func_inst->u.func->func_type;
if (!wasm_type_equal(cur_type, cur_func_type)) {
wasm_set_exception(module_inst, "indirect call type mismatch");
goto got_exception;
}
}
interp_call_wasm(module_inst, exec_env, func_inst, argc, argv);
(void)clear_wasi_proc_exit_exception(module_inst);
return !wasm_get_exception(module_inst) ? true : false;
@ -2616,6 +2669,23 @@ got_exception:
return false;
}
bool
wasm_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 argc, uint32 argv[])
{
return call_indirect(exec_env, tbl_idx, elem_idx, argc, argv, false, 0);
}
#if WASM_ENABLE_FAST_JIT != 0
bool
jit_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 type_idx, uint32 argc, uint32 argv[])
{
return call_indirect(exec_env, tbl_idx, elem_idx, argc, argv, true,
type_idx);
}
#endif
#if WASM_ENABLE_THREAD_MGR != 0
bool
wasm_set_aux_stack(WASMExecEnv *exec_env, uint32 start_offset, uint32 size)

View File

@ -52,6 +52,22 @@ struct WASMMemoryInstance {
Note: when memory is re-allocated, the heap data and memory data
must be copied to new memory also. */
uint8 *memory_data;
#if WASM_ENABLE_FAST_JIT != 0
#if UINTPTR_MAX == UINT64_MAX
uint64 mem_bound_check_1byte;
uint64 mem_bound_check_2bytes;
uint64 mem_bound_check_4bytes;
uint64 mem_bound_check_8bytes;
uint64 mem_bound_check_16bytes;
#else
uint32 mem_bound_check_1byte;
uint32 mem_bound_check_2bytes;
uint32 mem_bound_check_4bytes;
uint32 mem_bound_check_8bytes;
uint32 mem_bound_check_16bytes;
#endif
#endif
};
struct WASMTableInstance {
@ -167,6 +183,10 @@ struct WASMModuleInstance {
/* Array of function pointers to import functions */
void **import_func_ptrs;
#if WASM_ENABLE_FAST_JIT != 0
/* point to JITed functions */
void **fast_jit_func_ptrs;
#endif
WASMMemoryInstance **memories;
WASMTableInstance **tables;
@ -280,7 +300,7 @@ wasm_load(uint8 *buf, uint32 size, char *error_buf, uint32 error_buf_size);
WASMModule *
wasm_load_from_sections(WASMSection *section_list, char *error_buf,
uint32_t error_buf_size);
uint32 error_buf_size);
void
wasm_unload(WASMModule *module);
@ -366,16 +386,22 @@ wasm_get_app_addr_range(WASMModuleInstance *module_inst, uint32 app_offset,
uint32 *p_app_start_offset, uint32 *p_app_end_offset);
bool
wasm_get_native_addr_range(WASMModuleInstance *module_inst, uint8_t *native_ptr,
uint8_t **p_native_start_addr,
uint8_t **p_native_end_addr);
wasm_get_native_addr_range(WASMModuleInstance *module_inst, uint8 *native_ptr,
uint8 **p_native_start_addr,
uint8 **p_native_end_addr);
bool
wasm_enlarge_memory(WASMModuleInstance *module, uint32 inc_page_count);
bool
wasm_call_indirect(WASMExecEnv *exec_env, uint32_t tbl_idx,
uint32_t element_indices, uint32_t argc, uint32_t argv[]);
wasm_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 argc, uint32 argv[]);
#if WASM_ENABLE_FAST_JIT != 0
bool
jit_call_indirect(WASMExecEnv *exec_env, uint32 tbl_idx, uint32 elem_idx,
uint32 type_idx, uint32 argc, uint32 argv[]);
#endif
#if WASM_ENABLE_THREAD_MGR != 0
bool

View File

@ -29,19 +29,20 @@ The script `runtime_lib.cmake` defines a number of variables for configuring the
cmake -DWAMR_BUILD_PLATFORM=linux -DWAMR_BUILD_TARGET=ARM
```
#### **Configure interpreter**
#### **Configure interpreters**
- **WAMR_BUILD_INTERP**=1/0: enable or disable WASM interpreter
- **WAMR_BUILD_FAST_INTERP**=1/0: build fast (default) or classic WASM interpreter.
NOTE: the fast interpreter runs ~2X faster than classic interpreter, but consumes about 2X memory to hold the WASM bytecode code.
NOTE: the fast interpreter runs ~2X faster than classic interpreter, but consumes about 2X memory to hold the pre-compiled code.
#### **Configure AOT and JIT**
#### **Configure AOT and JITs**
- **WAMR_BUILD_AOT**=1/0, default to enable if not set
- **WAMR_BUILD_JIT**=1/0, default to disable if not set
- **WAMR_BUILD_AOT**=1/0, enable AOT or not, default to enable if not set
- **WAMR_BUILD_JIT**=1/0, enable LLVM JIT or not, default to disable if not set
- **WAMR_BUILD_LAZY_JIT**=1/0, whether to use Lazy JIT mode or not when *WAMR_BUILD_JIT* is set, default to enable if not set
- **WAMR_BUILD_FAST_JIT**=1/0, enable Fast JIT or not, default to disable if not set
#### **Configure LIBC**
@ -206,8 +207,7 @@ sudo dnf install glibc-devel.i686
After installing dependencies, build the source code:
``` Bash
cd product-mini/platforms/linux/
mkdir build
cd build
mkdir build && cd build
cmake ..
make
# iwasm is generated under current directory
@ -216,30 +216,49 @@ make
By default in Linux, the `fast interpreter`, `AOT` and `Libc WASI` are enabled, and JIT is disabled.
And the build target is set to X86_64 or X86_32 depending on the platform's bitwidth.
To run a wasm file with interpreter mode:
There are total 6 running modes supported: fast interpreter, classi interpreter, AOT, LLVM Lazy JIT, LLVM MC JIT and Fast JIT.
(1) To run a wasm file with `fast interpreter` mode - build iwasm with default build and then:
```Bash
iwasm <wasm file>
```
To run an AOT file, firstly please refer to [Build wamrc AOT compiler](../README.md#build-wamrc-aot-compiler) to build wamrc, and then:
Or
```Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_INTERP=1
make
```
(2) To disable `fast interpreter` and enable `classic interpreter` instead:
``` Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_INTERP=0
make
```
(3) To run an AOT file, firstly please refer to [Build wamrc AOT compiler](../README.md#build-wamrc-aot-compiler) to build wamrc, and then:
```Bash
wamrc -o <AOT file> <WASM file>
iwasm <AOT file>
```
To enable the `JIT` mode, firstly we should build LLVM:
(4) To enable the `LLVM Lazy JIT` mode, firstly we should build LLVM library:
``` Bash
cd product-mini/platforms/linux/
./build_llvm.sh (The llvm source code is cloned under <wamr_root_dir>/core/deps/llvm and auto built)
```
Then pass argument `-DWAMR_BUILD_JIT=1` to cmake to enable WASM JIT:
Then pass argument `-DWAMR_BUILD_JIT=1` to cmake to enable LLVM Lazy JIT:
``` Bash
mkdir build
cd build
mkdir build && cd build
cmake .. -DWAMR_BUILD_JIT=1
# or "cmake .. -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0" to disable LLVM Lazy JIT and enable LLVM MC JIT
make
```
(5) Or disable `LLVM Lazy JIT` and enable `LLVM MC JIT` instead:
```Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_JIT=1 -DWAMR_BUILD_LAZY_JIT=0
make
```
@ -248,13 +267,13 @@ by creating threads to compile the WASM functions parallely, and for the main th
module will not be compiled until they are firstly called and haven't been compiled by the compilation threads.
To disable it and enable LLVM MC JIT instead, please pass argument `-DWAMR_BUILD_LAZY_JIT=0` to cmake.
To disable `fast interpreter` and enable `classic interpreter` instead:
(6) To enable the `Fast JIT` mode:
``` Bash
mkdir build
cd build
cmake .. -DWAMR_BUILD_FAST_INTERP=0
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_JIT=1
make
```
The Fast JIT is a lightweight JIT engine with quick startup, small footprint and good portability, and gains ~50% performance of AOT.
Linux SGX (Intel Software Guard Extension)
-------------------------

View File

@ -11,12 +11,18 @@ After installing the dependencies, build the source code:
``` Bash
source <SGX_SDK dir>/environment
cd product-mini/platforms/linux-sgx/
mkdir build
cd build
mkdir build && cd build
cmake ..
make
```
By default the `fast interpreter` and `AOT` is enabled. If to enable `Fast JIT`, run:
```Bash
mkdir build && cd build
cmake .. -DWAMR_BUILD_FAST_JIT=1
make
```
This builds two libraries required by SGX application:
- libvmlib.a for Enclave part
- libvmlib_untrusted.a for App part

View File

@ -51,6 +51,11 @@ if (NOT DEFINED WAMR_BUILD_JIT)
set (WAMR_BUILD_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_FAST_JIT)
# Disable Fast JIT by default
set (WAMR_BUILD_FAST_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN)
# Enable libc builtin support by default
set (WAMR_BUILD_LIBC_BUILTIN 1)

View File

@ -44,6 +44,11 @@ if (NOT DEFINED WAMR_BUILD_JIT)
set (WAMR_BUILD_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_FAST_JIT)
# Disable Fast JIT by default
set (WAMR_BUILD_FAST_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN)
# Enable libc builtin support by default
set (WAMR_BUILD_LIBC_BUILTIN 1)

View File

@ -52,6 +52,11 @@ if (NOT DEFINED WAMR_BUILD_JIT)
set (WAMR_BUILD_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_FAST_JIT)
# Disable Fast JIT by default
set (WAMR_BUILD_FAST_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN)
# Enable libc builtin support by default
set (WAMR_BUILD_LIBC_BUILTIN 1)
@ -117,14 +122,20 @@ set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gc-sections -pie -f
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -Wformat -Wformat-security -Wshadow")
# set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wconversion -Wsign-conversion")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wall -Wextra -Wformat -Wformat-security")
if (WAMR_BUILD_TARGET MATCHES "X86_.*" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
if (NOT (CMAKE_C_COMPILER MATCHES ".*clang.*" OR CMAKE_C_COMPILER_ID MATCHES ".*Clang"))
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mindirect-branch-register")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mindirect-branch-register")
# UNDEFINED BEHAVIOR, refer to https://en.cppreference.com/w/cpp/language/ub
if(CMAKE_BUILD_TYPE STREQUAL "Debug" AND NOT WAMR_BUILD_JIT EQUAL 1)
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined \
-fno-sanitize=bounds,bounds-strict,alignment \
-fno-sanitize-recover")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined \
-fno-sanitize=bounds,bounds-strict,alignment \
-fno-sanitize-recover")
endif()
else ()
# UNDEFINED BEHAVIOR, refer to https://en.cppreference.com/w/cpp/language/ub
@ -132,6 +143,9 @@ if (WAMR_BUILD_TARGET MATCHES "X86_.*" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fsanitize=undefined \
-fno-sanitize=bounds,alignment \
-fno-sanitize-recover")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=undefined \
-fno-sanitize=bounds,alignment \
-fno-sanitize-recover")
endif()
endif ()
endif ()

View File

@ -34,6 +34,10 @@ print_help()
#endif
printf(" --stack-size=n Set maximum stack size in bytes, default is 16 KB\n");
printf(" --heap-size=n Set maximum heap size in bytes, default is 16 KB\n");
#if WASM_ENABLE_FAST_JIT != 0
printf(" --jit-codecache-size=n Set fast jit maximum code cache size in bytes,\n");
printf(" default is %u KB\n", FAST_JIT_DEFAULT_CODE_CACHE_SIZE / 1024);
#endif
printf(" --repl Start a very simple REPL (read-eval-print-loop) mode\n"
" that runs commands in the form of \"FUNC ARG...\"\n");
#if WASM_ENABLE_LIBC_WASI != 0
@ -295,6 +299,9 @@ main(int argc, char *argv[])
uint8 *wasm_file_buf = NULL;
uint32 wasm_file_size;
uint32 stack_size = 16 * 1024, heap_size = 16 * 1024;
#if WASM_ENABLE_FAST_JIT != 0
uint32 jit_code_cache_size = FAST_JIT_DEFAULT_CODE_CACHE_SIZE;
#endif
wasm_module_t wasm_module = NULL;
wasm_module_inst_t wasm_module_inst = NULL;
RuntimeInitArgs init_args;
@ -354,6 +361,13 @@ main(int argc, char *argv[])
return print_help();
heap_size = atoi(argv[0] + 12);
}
#if WASM_ENABLE_FAST_JIT != 0
else if (!strncmp(argv[0], "--jit-codecache-size=", 21)) {
if (argv[0][21] == '\0')
return print_help();
jit_code_cache_size = atoi(argv[0] + 21);
}
#endif
#if WASM_ENABLE_LIBC_WASI != 0
else if (!strncmp(argv[0], "--dir=", 6)) {
if (argv[0][6] == '\0')
@ -471,6 +485,10 @@ main(int argc, char *argv[])
init_args.mem_alloc_option.allocator.free_func = free;
#endif
#if WASM_ENABLE_FAST_JIT != 0
init_args.fast_jit_code_cache_size = jit_code_cache_size;
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0
init_args.platform_port = 0;
init_args.instance_port = instance_port;

View File

@ -8,7 +8,7 @@
function DEBUG() {
[[ -n $(env | grep "\<DEBUG\>") ]] && $@
}
DEBUG set -xEevuo pipefail
DEBUG set -xv pipefail
function help()
{
@ -16,7 +16,7 @@ function help()
echo "-c clean previous test results, not start test"
echo "-s {suite_name} test only one suite (spec)"
echo "-m set compile target of iwasm(x86_64\x86_32\armv7_vfp\thumbv7_vfp\riscv64_lp64d\riscv64_lp64)"
echo "-t set compile type of iwasm(classic-interp\fast-interp\jit\aot)"
echo "-t set compile type of iwasm(classic-interp\fast-interp\jit\aot\fast-jit)"
echo "-M enable multi module feature"
echo "-p enable multi thread feature"
echo "-S enable SIMD feature"
@ -29,7 +29,7 @@ function help()
OPT_PARSED=""
WABT_BINARY_RELEASE="NO"
#default type
TYPE=("classic-interp" "fast-interp" "jit" "aot")
TYPE=("classic-interp" "fast-interp" "jit" "aot" "fast-jit")
#default target
TARGET="X86_64"
ENABLE_MULTI_MODULE=0
@ -80,7 +80,7 @@ do
t)
echo "set compile type of wamr " ${OPTARG}
if [[ ${OPTARG} != "classic-interp" && ${OPTARG} != "fast-interp" \
&& ${OPTARG} != "jit" && ${OPTARG} != "aot" ]]; then
&& ${OPTARG} != "jit" && ${OPTARG} != "aot" && ${OPTARG} != "fast-jit" ]]; then
echo "*----- please varify a type of compile when using -t! -----*"
help
exit 1
@ -186,11 +186,19 @@ readonly AOT_COMPILE_FLAGS="\
-DWAMR_BUILD_SPEC_TEST=1 \
-DCOLLECT_CODE_COVERAGE=${COLLECT_CODE_COVERAGE}"
readonly FAST_JIT_COMPILE_FLAGS="\
-DWAMR_BUILD_TARGET=${TARGET} \
-DWAMR_BUILD_INTERP=1 -DWAMR_BUILD_FAST_INTERP=0 \
-DWAMR_BUILD_JIT=0 -DWAMR_BUILD_AOT=0 \
-DWAMR_BUILD_FAST_JIT=1 \
-DWAMR_BUILD_SPEC_TEST=1"
readonly COMPILE_FLAGS=(
"${CLASSIC_INTERP_COMPILE_FLAGS}"
"${FAST_INTERP_COMPILE_FLAGS}"
"${JIT_COMPILE_FLAGS}"
"${AOT_COMPILE_FLAGS}"
"${FAST_JIT_COMPILE_FLAGS}"
)
# TODO: with libiwasm.so only
@ -609,6 +617,16 @@ function trigger()
collect_coverage aot
;;
"fast-jit")
echo "work in fast-jit mode"
# jit
BUILD_FLAGS="$FAST_JIT_COMPILE_FLAGS $EXTRA_COMPILE_FLAGS"
build_iwasm_with_cfg $BUILD_FLAGS
for suite in "${TEST_CASE_ARR[@]}"; do
$suite"_test" fast-jit
done
;;
*)
echo "unexpected mode, do nothing"
;;
@ -627,6 +645,6 @@ else
fi
echo -e "\033[32mTest finish. Reports are under ${REPORT_DIR} \033[0m"
DEBUG set +xEevuo pipefail
DEBUG set +xv pipefail
echo "TEST SUCCESSFUL"
exit 0