Import Fast JIT framework (#1016)

Import Fast JIT framework and translate some opcodes in the frontend.
This commit is contained in:
Wenyong Huang 2022-03-09 12:34:56 +08:00 committed by GitHub
parent 3c39054317
commit 24aae4f0d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
51 changed files with 10491 additions and 8 deletions

View File

@ -138,6 +138,8 @@ if (WAMR_BUILD_JIT EQUAL 1)
else () else ()
message (" WAMR LLVM MC JIT enabled") message (" WAMR LLVM MC JIT enabled")
endif () endif ()
elseif (WAMR_BUILD_FAST_JIT EQUAL 1)
message (" WAMR Fast JIT enabled")
else () else ()
message (" WAMR JIT disabled") message (" WAMR JIT disabled")
endif () endif ()

View File

@ -48,7 +48,11 @@ if (NOT DEFINED WAMR_BUILD_TARGET)
endif () endif ()
################ optional according to settings ################ ################ optional according to settings ################
if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1) if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1
OR WAMR_BUILD_FAST_JIT EQUAL 1)
if (WAMR_BUILD_FAST_JIT EQUAL 1)
set (WAMR_BUILD_FAST_INTERP 0)
endif ()
include (${IWASM_DIR}/interpreter/iwasm_interp.cmake) include (${IWASM_DIR}/interpreter/iwasm_interp.cmake)
endif () endif ()
@ -59,6 +63,10 @@ if (WAMR_BUILD_AOT EQUAL 1)
endif () endif ()
endif () endif ()
if (NOT WAMR_BUILD_JIT EQUAL 1 AND WAMR_BUILD_FAST_JIT EQUAL 1)
include (${IWASM_DIR}/fast-jit/iwasm_fast_jit.cmake)
endif ()
if (WAMR_BUILD_APP_FRAMEWORK EQUAL 1) if (WAMR_BUILD_APP_FRAMEWORK EQUAL 1)
include (${APP_FRAMEWORK_DIR}/app_framework.cmake) include (${APP_FRAMEWORK_DIR}/app_framework.cmake)
include (${SHARED_DIR}/coap/lib_coap.cmake) include (${SHARED_DIR}/coap/lib_coap.cmake)
@ -137,6 +145,7 @@ set (source_all
${IWASM_INTERP_SOURCE} ${IWASM_INTERP_SOURCE}
${IWASM_AOT_SOURCE} ${IWASM_AOT_SOURCE}
${IWASM_COMPL_SOURCE} ${IWASM_COMPL_SOURCE}
${IWASM_FAST_JIT_SOURCE}
${WASM_APP_LIB_SOURCE_ALL} ${WASM_APP_LIB_SOURCE_ALL}
${NATIVE_INTERFACE_SOURCE} ${NATIVE_INTERFACE_SOURCE}
${APP_MGR_SOURCE} ${APP_MGR_SOURCE}

View File

@ -84,6 +84,12 @@ typedef struct WASMExecEnv {
void **native_symbol; void **native_symbol;
#endif #endif
#if WASM_ENABLE_FAST_JIT != 0
/* Cache for jit native operations, mainly for operations of float,
double and long, such as F64TOI64, F32TOI64, I64 MUL/REM, and so on. */
uint64 jit_cache[2];
#endif
#if WASM_ENABLE_THREAD_MGR != 0 #if WASM_ENABLE_THREAD_MGR != 0
/* thread return value */ /* thread return value */
void *thread_ret_value; void *thread_ret_value;

View File

@ -27,6 +27,9 @@
#if WASM_ENABLE_SHARED_MEMORY != 0 #if WASM_ENABLE_SHARED_MEMORY != 0
#include "wasm_shared_memory.h" #include "wasm_shared_memory.h"
#endif #endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#endif
#include "../common/wasm_c_api_internal.h" #include "../common/wasm_c_api_internal.h"
#if WASM_ENABLE_MULTI_MODULE != 0 #if WASM_ENABLE_MULTI_MODULE != 0
@ -146,8 +149,20 @@ wasm_runtime_env_init()
} }
#endif #endif
#if WASM_ENABLE_FAST_JIT != 0
if (!jit_compiler_init()) {
goto fail9;
}
#endif
return true; return true;
#if WASM_ENABLE_FAST_JIT != 0
fail9:
#if WASM_ENABLE_REF_TYPES != 0
wasm_externref_map_destroy();
#endif
#endif
#if WASM_ENABLE_REF_TYPES != 0 #if WASM_ENABLE_REF_TYPES != 0
fail8: fail8:
#endif #endif
@ -208,6 +223,10 @@ wasm_runtime_init()
void void
wasm_runtime_destroy() wasm_runtime_destroy()
{ {
#if WASM_ENABLE_FAST_JIT != 0
jit_compiler_destroy();
#endif
#if WASM_ENABLE_REF_TYPES != 0 #if WASM_ENABLE_REF_TYPES != 0
wasm_externref_map_destroy(); wasm_externref_map_destroy();
#endif #endif

View File

@ -489,6 +489,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index)
break; break;
case WASM_OP_GET_GLOBAL: case WASM_OP_GET_GLOBAL:
case WASM_OP_GET_GLOBAL_64:
read_leb_uint32(frame_ip, frame_ip_end, global_idx); read_leb_uint32(frame_ip, frame_ip_end, global_idx);
if (!aot_compile_op_get_global(comp_ctx, func_ctx, global_idx)) if (!aot_compile_op_get_global(comp_ctx, func_ctx, global_idx))
return false; return false;

View File

@ -0,0 +1,120 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_codegen.h"
bool
jit_codegen_init()
{
return true;
}
void
jit_codegen_destroy()
{}
/* clang-format off */
static const uint8 hreg_info_I4[3][7] = {
/* ebp, eax, ebx, ecx, edx, edi, esi */
{ 1, 0, 0, 0, 0, 0, 1 }, /* fixed, esi is freely used */
{ 0, 1, 0, 1, 1, 0, 0 }, /* caller_saved_native */
{ 0, 1, 0, 1, 1, 1, 0 } /* caller_saved_jitted */
};
static const uint8 hreg_info_I8[3][16] = {
/* rbp, rax, rbx, rcx, rdx, rdi, rsi, rsp,
r8, r9, r10, r11, r12, r13, r14, r15 */
{ 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 1 }, /* fixed, rsi is freely used */
{ 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 0, 0, 0, 0, 0 }, /* caller_saved_native */
{ 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 0, 0, 0, 0, 0, 0 }, /* caller_saved_jitted */
};
static uint8 hreg_info_F4[3][16] = {
{ 0, 0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1, 1, 1 }, /* fixed, rsi is freely used */
{ 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1 }, /* caller_saved_native */
{ 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1 }, /* caller_saved_jitted */
};
static uint8 hreg_info_F8[3][16] = {
{ 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0 }, /* fixed, rsi is freely used */
{ 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1 }, /* caller_saved_native */
{ 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1 }, /* caller_saved_jitted */
};
static const JitHardRegInfo hreg_info = {
{
{ 0, NULL, NULL, NULL }, /* VOID */
{ sizeof(hreg_info_I4[0]), /* I4 */
hreg_info_I4[0],
hreg_info_I4[1],
hreg_info_I4[2] },
{ sizeof(hreg_info_I8[0]), /* I8 */
hreg_info_I8[0],
hreg_info_I8[1],
hreg_info_I8[2] },
{ sizeof(hreg_info_F4[0]), /* F4 */
hreg_info_F4[0],
hreg_info_F4[1],
hreg_info_F4[2] },
{ sizeof(hreg_info_F8[0]), /* F8 */
hreg_info_F8[0],
hreg_info_F8[1],
hreg_info_F8[2] },
{ 0, NULL, NULL, NULL }, /* V8 */
{ 0, NULL, NULL, NULL }, /* V16 */
{ 0, NULL, NULL, NULL } /* V32 */
},
/* frame pointer hreg index: rbp */
0,
/* exec_env hreg index: r15 */
15,
/* cmp hreg index: esi */
6
};
/* clang-format on */
const JitHardRegInfo *
jit_codegen_get_hreg_info()
{
return &hreg_info;
}
bool
jit_codegen_gen_native(JitCompContext *cc)
{
jit_set_last_error(cc, "jit_codegen_gen_native failed");
return false;
}
bool
jit_codegen_lower(JitCompContext *cc)
{
return true;
}
void
jit_codegen_dump_native(void *begin_addr, void *end_addr)
{}
bool
jit_codegen_call_func_jitted(void *exec_env, void *frame, void *func_inst,
void *target)
{
return false;
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_compare.h"
#include "../jit_frontend.h"
bool
jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond)
{
return false;
}
bool
jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond)
{
return false;
}
bool
jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond)
{
return false;
}
bool
jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond)
{
return false;
}

View File

@ -0,0 +1,32 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_COMPARE_H_
#define _JIT_EMIT_COMPARE_H_
#include "../jit_compiler.h"
#include "../jit_frontend.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond);
bool
jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond);
bool
jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond);
bool
jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_COMPARE_H_ */

View File

@ -0,0 +1,47 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_const.h"
#include "../jit_frontend.h"
bool
jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const)
{
JitReg value = NEW_CONST(I32, i32_const);
PUSH_I32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const)
{
JitReg value = NEW_CONST(I64, i64_const);
PUSH_I64(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const)
{
JitReg value = NEW_CONST(F32, f32_const);
PUSH_F32(value);
return true;
fail:
return false;
}
bool
jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const)
{
JitReg value = NEW_CONST(F64, f64_const);
PUSH_F64(value);
return true;
fail:
return false;
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONST_H_
#define _JIT_EMIT_CONST_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const);
bool
jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const);
bool
jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const);
bool
jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONST_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,55 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONTROL_H_
#define _JIT_EMIT_CONTROL_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_block(JitCompContext *cc, uint8 **p_frame_ip,
uint8 *frame_ip_end, uint32 label_type, uint32 param_count,
uint8 *param_types, uint32 result_count,
uint8 *result_types);
bool
jit_compile_op_else(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_end(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip);
bool
jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip);
bool
jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count,
uint8 **p_frame_ip);
bool
jit_compile_op_return(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_compile_op_unreachable(JitCompContext *cc, uint8 **p_frame_ip);
bool
jit_handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip);
#if WASM_ENABLE_THREAD_MGR != 0
bool
jit_check_suspend_flags(JitCompContext *cc);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONTROL_H_ */

View File

@ -0,0 +1,115 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_conversion.h"
#include "../jit_frontend.h"
bool
jit_compile_op_i32_wrap_i64(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool saturating)
{
return false;
}
bool
jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool saturating)
{
return false;
}
bool
jit_compile_op_i64_extend_i32(JitCompContext *comp_ctx, bool sign)
{
return false;
}
bool
jit_compile_op_i64_extend_i64(JitCompContext *comp_ctx, int8 bitwidth)
{
return false;
}
bool
jit_compile_op_i32_extend_i32(JitCompContext *comp_ctx, int8 bitwidth)
{
return false;
}
bool
jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool saturating)
{
return false;
}
bool
jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool saturating)
{
return false;
}
bool
jit_compile_op_f32_convert_i32(JitCompContext *comp_ctx, bool sign)
{
return false;
}
bool
jit_compile_op_f32_convert_i64(JitCompContext *comp_ctx, bool sign)
{
return false;
}
bool
jit_compile_op_f32_demote_f64(JitCompContext *comp_ctx)
{
return false;
}
bool
jit_compile_op_f64_convert_i32(JitCompContext *comp_ctx, bool sign)
{
return false;
}
bool
jit_compile_op_f64_convert_i64(JitCompContext *comp_ctx, bool sign)
{
return false;
}
bool
jit_compile_op_f64_promote_f32(JitCompContext *comp_ctx)
{
return false;
}
bool
jit_compile_op_i64_reinterpret_f64(JitCompContext *comp_ctx)
{
return false;
}
bool
jit_compile_op_i32_reinterpret_f32(JitCompContext *comp_ctx)
{
return false;
}
bool
jit_compile_op_f64_reinterpret_i64(JitCompContext *comp_ctx)
{
return false;
}
bool
jit_compile_op_f32_reinterpret_i32(JitCompContext *comp_ctx)
{
return false;
}

View File

@ -0,0 +1,73 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_CONVERSION_H_
#define _JIT_EMIT_CONVERSION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_wrap_i64(JitCompContext *cc);
bool
jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool saturating);
bool
jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool saturating);
bool
jit_compile_op_i64_extend_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_i64_extend_i64(JitCompContext *comp_ctx, int8 bitwidth);
bool
jit_compile_op_i32_extend_i32(JitCompContext *comp_ctx, int8 bitwidth);
bool
jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool saturating);
bool
jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool saturating);
bool
jit_compile_op_f32_convert_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f32_convert_i64(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f32_demote_f64(JitCompContext *comp_ctx);
bool
jit_compile_op_f64_convert_i32(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f64_convert_i64(JitCompContext *comp_ctx, bool sign);
bool
jit_compile_op_f64_promote_f32(JitCompContext *comp_ctx);
bool
jit_compile_op_i64_reinterpret_f64(JitCompContext *comp_ctx);
bool
jit_compile_op_i32_reinterpret_f32(JitCompContext *comp_ctx);
bool
jit_compile_op_f64_reinterpret_i64(JitCompContext *comp_ctx);
bool
jit_compile_op_f32_reinterpret_i32(JitCompContext *comp_ctx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_CONVERSION_H_ */

View File

@ -0,0 +1,14 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_exception.h"
#include "../jit_frontend.h"
bool
jit_emit_exception(JitCompContext *cc, int32 exception_id, bool is_cond_br,
JitReg cond_br_if, JitBasicBlock *cond_br_else_block)
{
return false;
}

View File

@ -0,0 +1,23 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_EXCEPTION_H_
#define _JIT_EMIT_EXCEPTION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_emit_exception(JitCompContext *cc, int32 exception_id, bool is_cond_br,
JitReg cond_br_if, JitBasicBlock *cond_br_else_block);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_EXCEPTION_H_ */

View File

@ -0,0 +1,38 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_function.h"
#include "../jit_frontend.h"
bool
jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call)
{
return false;
}
bool
jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
uint32 tbl_idx)
{
return false;
}
bool
jit_compile_op_ref_null(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_ref_is_null(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx)
{
return false;
}

View File

@ -0,0 +1,35 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_FUNCTION_H_
#define _JIT_EMIT_FUNCTION_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call);
bool
jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx,
uint32 tbl_idx);
bool
jit_compile_op_ref_null(JitCompContext *cc);
bool
jit_compile_op_ref_is_null(JitCompContext *cc);
bool
jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_FUNCTION_H_ */

View File

@ -0,0 +1,127 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_memory.h"
#include "../jit_frontend.h"
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
return false;
}
bool
jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic)
{
return false;
}
bool
jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset)
{
return false;
}
bool
jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset)
{
return false;
}
bool
jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
return false;
}
bool
jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic)
{
return false;
}
bool
jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset)
{
return false;
}
bool
jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset)
{
return false;
}
bool
jit_compile_op_memory_size(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_memory_grow(JitCompContext *cc)
{
return false;
}
#if WASM_ENABLE_BULK_MEMORY != 0
bool
jit_compile_op_memory_init(JitCompContext *cc, uint32 seg_index)
{
return false;
}
bool
jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_index)
{
return false;
}
bool
jit_compile_op_memory_copy(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_memory_fill(JitCompContext *cc)
{
return false;
}
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes)
{
return false;
}
bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes)
{
return false;
}
#endif

View File

@ -0,0 +1,88 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_MEMORY_H_
#define _JIT_EMIT_MEMORY_H_
#include "../jit_compiler.h"
#if WASM_ENABLE_SHARED_MEMORY != 0
#include "../../common/wasm_shared_memory.h"
#endif
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic);
bool
jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool sign, bool atomic);
bool
jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic);
bool
jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes, bool atomic);
bool
jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset);
bool
jit_compile_op_memory_size(JitCompContext *cc);
bool
jit_compile_op_memory_grow(JitCompContext *cc);
#if WASM_ENABLE_BULK_MEMORY != 0
bool
jit_compile_op_memory_init(JitCompContext *cc, uint32 seg_index);
bool
jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_index);
bool
jit_compile_op_memory_copy(JitCompContext *cc);
bool
jit_compile_op_memory_fill(JitCompContext *cc);
#endif
#if WASM_ENABLE_SHARED_MEMORY != 0
bool
jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type,
uint32 align, uint32 offset, uint32 bytes);
bool
jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align,
uint32 offset, uint32 bytes);
bool
jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset,
uint32 bytes);
#endif
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_MEMORY_H_ */

View File

@ -0,0 +1,291 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_numberic.h"
#include "../jit_frontend.h"
#define PUSH_INT(v) \
do { \
if (is_i32) \
PUSH_I32(v); \
else \
PUSH_I64(v); \
} while (0)
#define POP_INT(v) \
do { \
if (is_i32) \
POP_I32(v); \
else \
POP_I64(v); \
} while (0)
#define PUSH_FLOAT(v) \
do { \
if (is_f32) \
PUSH_F32(v); \
else \
PUSH_F64(v); \
} while (0)
#define POP_FLOAT(v) \
do { \
if (is_f32) \
POP_F32(v); \
else \
POP_F64(v); \
} while (0)
#define DEF_INT_UNARY_OP(op, err) \
do { \
JitReg res, operand; \
POP_INT(operand); \
if (!(res = op)) { \
if (err) \
jit_set_last_error(cc, err); \
goto fail; \
} \
PUSH_INT(res); \
} while (0)
#define DEF_INT_BINARY_OP(op, err) \
do { \
JitReg res, left, right; \
POP_INT(right); \
POP_INT(left); \
if (!(res = op)) { \
if (err) \
jit_set_last_error(cc, err); \
goto fail; \
} \
PUSH_INT(res); \
} while (0)
#define DEF_FP_UNARY_OP(op, err) \
do { \
JitReg res, operand; \
POP_FLOAT(operand); \
if (!(res = op)) { \
if (err) \
jit_set_last_error(cc, err); \
goto fail; \
} \
PUSH_FLOAT(res); \
} while (0)
#define DEF_FP_BINARY_OP(op, err) \
do { \
JitReg res, left, right; \
POP_FLOAT(right); \
POP_FLOAT(left); \
if (!(res = op)) { \
if (err) \
jit_set_last_error(cc, err); \
goto fail; \
} \
PUSH_FLOAT(res); \
} while (0)
bool
jit_compile_op_i32_clz(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_i32_ctz(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_i32_popcnt(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_i64_clz(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_i64_ctz(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_i64_popcnt(JitCompContext *cc)
{
return false;
}
#define IS_CONST_ZERO(val) \
(jit_reg_is_const(val) \
&& ((is_i32 && jit_cc_get_const_I32(cc, val) == 0) \
|| (!is_i32 && jit_cc_get_const_I64(cc, val) == 0)))
static JitReg
compile_int_add(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
{
JitReg res;
/* If one of the operands is 0, just return the other */
if (IS_CONST_ZERO(left))
return right;
if (IS_CONST_ZERO(right))
return left;
/* Build add */
res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
GEN_INSN(ADD, res, left, right);
return res;
}
static JitReg
compile_int_sub(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
{
JitReg res;
/* If the right operand is 0, just return the left */
if (IS_CONST_ZERO(right))
return left;
/* Build sub */
res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
GEN_INSN(SUB, res, left, right);
return res;
}
static JitReg
compile_int_mul(JitCompContext *cc, JitReg left, JitReg right, bool is_i32)
{
JitReg res;
/* If one of the operands is 0, just return constant 0 */
if (IS_CONST_ZERO(left) || IS_CONST_ZERO(right))
return is_i32 ? NEW_CONST(I32, 0) : NEW_CONST(I64, 0);
/* Build mul */
res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc);
GEN_INSN(MUL, res, left, right);
return res;
}
static bool
compile_int_div(JitCompContext *cc, IntArithmetic arith_op, bool is_i32,
uint8 **p_frame_ip)
{
/* TODO */
bh_assert(0);
return false;
}
static bool
compile_op_int_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
bool is_i32, uint8 **p_frame_ip)
{
switch (arith_op) {
case INT_ADD:
DEF_INT_BINARY_OP(compile_int_add(cc, left, right, is_i32),
"compile int add fail.");
return true;
case INT_SUB:
DEF_INT_BINARY_OP(compile_int_sub(cc, left, right, is_i32),
"compile int sub fail.");
return true;
case INT_MUL:
DEF_INT_BINARY_OP(compile_int_mul(cc, left, right, is_i32),
"compile int mul fail.");
return true;
case INT_DIV_S:
case INT_DIV_U:
case INT_REM_S:
case INT_REM_U:
return compile_int_div(cc, arith_op, is_i32, p_frame_ip);
default:
bh_assert(0);
return false;
}
fail:
return false;
}
bool
jit_compile_op_i32_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
uint8 **p_frame_ip)
{
return compile_op_int_arithmetic(cc, arith_op, true, p_frame_ip);
}
bool
jit_compile_op_i64_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
uint8 **p_frame_ip)
{
return compile_op_int_arithmetic(cc, arith_op, false, p_frame_ip);
}
bool
jit_compile_op_i32_bitwise(JitCompContext *cc, IntBitwise bitwise_op)
{
return false;
}
bool
jit_compile_op_i64_bitwise(JitCompContext *cc, IntBitwise bitwise_op)
{
return false;
}
bool
jit_compile_op_i32_shift(JitCompContext *cc, IntShift shift_op)
{
return false;
}
bool
jit_compile_op_i64_shift(JitCompContext *cc, IntShift shift_op)
{
return false;
}
bool
jit_compile_op_f32_math(JitCompContext *cc, FloatMath math_op)
{
return false;
}
bool
jit_compile_op_f64_math(JitCompContext *cc, FloatMath math_op)
{
return false;
}
bool
jit_compile_op_f32_arithmetic(JitCompContext *cc, FloatArithmetic arith_op)
{
return false;
}
bool
jit_compile_op_f64_arithmetic(JitCompContext *cc, FloatArithmetic arith_op)
{
return false;
}
bool
jit_compile_op_f32_copysign(JitCompContext *cc)
{
return false;
}
bool
jit_compile_op_f64_copysign(JitCompContext *cc)
{
return false;
}

View File

@ -0,0 +1,76 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_NUMBERIC_H_
#define _JIT_EMIT_NUMBERIC_H_
#include "../jit_compiler.h"
#include "../jit_frontend.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_i32_clz(JitCompContext *cc);
bool
jit_compile_op_i32_ctz(JitCompContext *cc);
bool
jit_compile_op_i32_popcnt(JitCompContext *cc);
bool
jit_compile_op_i64_clz(JitCompContext *cc);
bool
jit_compile_op_i64_ctz(JitCompContext *cc);
bool
jit_compile_op_i64_popcnt(JitCompContext *cc);
bool
jit_compile_op_i32_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
uint8 **p_frame_ip);
bool
jit_compile_op_i64_arithmetic(JitCompContext *cc, IntArithmetic arith_op,
uint8 **p_frame_ip);
bool
jit_compile_op_i32_bitwise(JitCompContext *cc, IntBitwise bitwise_op);
bool
jit_compile_op_i64_bitwise(JitCompContext *cc, IntBitwise bitwise_op);
bool
jit_compile_op_i32_shift(JitCompContext *cc, IntShift shift_op);
bool
jit_compile_op_i64_shift(JitCompContext *cc, IntShift shift_op);
bool
jit_compile_op_f32_math(JitCompContext *cc, FloatMath math_op);
bool
jit_compile_op_f64_math(JitCompContext *cc, FloatMath math_op);
bool
jit_compile_op_f32_arithmetic(JitCompContext *cc, FloatArithmetic arith_op);
bool
jit_compile_op_f64_arithmetic(JitCompContext *cc, FloatArithmetic arith_op);
bool
jit_compile_op_f32_copysign(JitCompContext *cc);
bool
jit_compile_op_f64_copysign(JitCompContext *cc);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_NUMBERIC_H_ */

View File

@ -0,0 +1,112 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_parametric.h"
#include "../jit_frontend.h"
static bool
pop_value_from_wasm_stack(JitCompContext *cc, bool is_32bit, JitReg *p_value,
uint8 *p_type)
{
JitValue *jit_value;
uint8 type;
if (!cc->block_stack.block_list_end) {
jit_set_last_error(cc, "WASM block stack underflow.");
return false;
}
if (!cc->block_stack.block_list_end->value_stack.value_list_end) {
jit_set_last_error(cc, "WASM data stack underflow.");
return false;
}
jit_value =
jit_value_stack_pop(&cc->block_stack.block_list_end->value_stack);
type = jit_value->type;
if (p_type != NULL) {
*p_type = jit_value->type;
}
if (p_value != NULL) {
*p_value = jit_value->value;
}
wasm_runtime_free(jit_value);
/* is_32: i32, f32, ref.func, ref.extern, v128 */
if (is_32bit
&& !(type == VALUE_TYPE_I32 || type == VALUE_TYPE_F32
#if WASM_ENABLE_REF_TYPES != 0
|| type == VALUE_TYPE_FUNCREF || type == VALUE_TYPE_EXTERNREF
#endif
|| type == VALUE_TYPE_V128)) {
jit_set_last_error(cc, "invalid WASM stack data type.");
return false;
}
/* !is_32: i64, f64 */
if (!is_32bit && !(type == VALUE_TYPE_I64 || type == VALUE_TYPE_F64)) {
jit_set_last_error(cc, "invalid WASM stack data type.");
return false;
}
switch (type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_FUNCREF:
case VALUE_TYPE_EXTERNREF:
#endif
pop_i32(cc->jit_frame);
break;
case VALUE_TYPE_I64:
pop_i64(cc->jit_frame);
break;
case VALUE_TYPE_F32:
pop_f32(cc->jit_frame);
break;
case VALUE_TYPE_F64:
pop_f64(cc->jit_frame);
break;
}
return true;
}
bool
jit_compile_op_drop(JitCompContext *cc, bool is_drop_32)
{
if (!pop_value_from_wasm_stack(cc, is_drop_32, NULL, NULL))
return false;
return true;
}
bool
jit_compile_op_select(JitCompContext *cc, bool is_select_32)
{
JitReg val1, val2, cond, selected;
uint8 val1_type, val2_type;
POP_I32(cond);
if (!pop_value_from_wasm_stack(cc, is_select_32, &val2, &val2_type)
|| !pop_value_from_wasm_stack(cc, is_select_32, &val1, &val1_type)) {
return false;
}
if (val1_type != val2_type) {
jit_set_last_error(cc, "invalid stack values with different type");
return false;
}
if (is_select_32)
selected = jit_cc_new_reg_I32(cc);
else
selected = jit_cc_new_reg_I64(cc);
GEN_INSN(SELECTNE, selected, cond, val1, val2);
PUSH(selected, val1_type);
return true;
fail:
return false;
}

View File

@ -0,0 +1,25 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_PARAMETRIC_H_
#define _JIT_EMIT_PARAMETRIC_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_drop(JitCompContext *cc, bool is_drop_32);
bool
jit_compile_op_select(JitCompContext *cc, bool is_select_32);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_PARAMETRIC_H_ */

View File

@ -0,0 +1,57 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_table.h"
#include "../jit_frontend.h"
bool
jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx)
{
return false;
}
bool
jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx)
{
return false;
}
bool
jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx)
{
return false;
}
bool
jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx,
uint32 tbl_seg_idx)
{
return false;
}
bool
jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx,
uint32 dst_tbl_idx)
{
return false;
}
bool
jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx)
{
return false;
}
bool
jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx)
{
return false;
}
bool
jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx)
{
return false;
}

View File

@ -0,0 +1,45 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_TABLE_H_
#define _JIT_EMIT_TABLE_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx);
bool
jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx,
uint32 tbl_seg_idx);
bool
jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx,
uint32 dst_tbl_idx);
bool
jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx);
bool
jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif

View File

@ -0,0 +1,175 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_emit_variable.h"
#include "../jit_frontend.h"
#define CHECK_LOCAL(idx) \
do { \
if (idx \
>= wasm_func->func_type->param_count + wasm_func->local_count) { \
jit_set_last_error(cc, "local index out of range"); \
goto fail; \
} \
} while (0)
static uint8
get_local_type(const WASMFunction *wasm_func, uint32 local_idx)
{
uint32 param_count = wasm_func->func_type->param_count;
return local_idx < param_count
? wasm_func->func_type->types[local_idx]
: wasm_func->local_types[local_idx - param_count];
}
bool
jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
value = local_i32(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_I64:
value = local_i64(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_F32:
value = local_f32(cc->jit_frame, local_offset);
break;
case VALUE_TYPE_F64:
value = local_f64(cc->jit_frame, local_offset);
break;
default:
bh_assert(0);
break;
}
PUSH(value, local_type);
return true;
fail:
return false;
}
bool
jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
POP_I32(value);
set_local_i32(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_I64:
POP_I64(value);
set_local_i64(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_F32:
POP_F32(value);
set_local_f32(cc->jit_frame, local_offset, value);
break;
case VALUE_TYPE_F64:
POP_F64(value);
set_local_f64(cc->jit_frame, local_offset, value);
break;
default:
bh_assert(0);
break;
}
return true;
fail:
return false;
}
bool
jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx)
{
WASMFunction *wasm_func = cc->cur_wasm_func;
uint16 *local_offsets = wasm_func->local_offsets;
uint16 local_offset;
uint8 local_type;
JitReg value;
CHECK_LOCAL(local_idx);
local_offset = local_offsets[local_idx];
local_type = get_local_type(wasm_func, local_idx);
switch (local_type) {
case VALUE_TYPE_I32:
#if WASM_ENABLE_REF_TYPES != 0
case VALUE_TYPE_EXTERNREF:
case VALUE_TYPE_FUNCREF:
#endif
POP_I32(value);
set_local_i32(cc->jit_frame, local_offset, value);
PUSH_I32(value);
break;
case VALUE_TYPE_I64:
POP_I64(value);
set_local_i64(cc->jit_frame, local_offset, value);
PUSH_I64(value);
break;
case VALUE_TYPE_F32:
POP_F32(value);
set_local_f32(cc->jit_frame, local_offset, value);
PUSH_F32(value);
break;
case VALUE_TYPE_F64:
POP_F64(value);
set_local_f64(cc->jit_frame, local_offset, value);
PUSH_F64(value);
break;
default:
bh_assert(0);
break;
}
return true;
fail:
return false;
}
bool
jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx)
{
return false;
}
bool
jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx,
bool is_aux_stack)
{
return false;
}

View File

@ -0,0 +1,35 @@
/*
* Copyright (C) 2019 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_EMIT_VARIABLE_H_
#define _JIT_EMIT_VARIABLE_H_
#include "../jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx);
bool
jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx);
bool
jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx,
bool is_aux_stack);
#ifdef __cplusplus
} /* end of extern "C" */
#endif
#endif /* end of _JIT_EMIT_VARIABLE_H_ */

View File

@ -0,0 +1,18 @@
# Copyright (C) 2019 Intel Corporation. All rights reserved.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
set (IWASM_FAST_JIT_DIR ${CMAKE_CURRENT_LIST_DIR})
add_definitions (-DWASM_ENABLE_FAST_JIT=1)
include_directories (${IWASM_FAST_JIT_DIR})
file (GLOB c_source_jit ${IWASM_FAST_JIT_DIR}/*.c ${IWASM_FAST_JIT_DIR}/fe/*.c)
if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64")
file (GLOB_RECURSE c_source_jit_cg ${IWASM_FAST_JIT_DIR}/cg/x86-64/*.c)
else ()
message (FATAL_ERROR "Fast JIT codegen for target ${WAMR_BUILD_TARGET} isn't implemented")
endif ()
set (IWASM_FAST_JIT_SOURCE ${c_source_jit} ${c_source_jit_cg})

View File

@ -0,0 +1,61 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_codecache.h"
#include "mem_alloc.h"
#include "jit_compiler.h"
static void *code_cache_pool = NULL;
static uint32 code_cache_pool_size = 0;
static mem_allocator_t code_cache_pool_allocator = NULL;
bool
jit_code_cache_init(uint32 code_cache_size)
{
int map_prot = MMAP_PROT_READ | MMAP_PROT_WRITE | MMAP_PROT_EXEC;
int map_flags = MMAP_MAP_NONE;
if (!(code_cache_pool =
os_mmap(NULL, code_cache_size, map_prot, map_flags))) {
return false;
}
if (!(code_cache_pool_allocator =
mem_allocator_create(code_cache_pool, code_cache_size))) {
os_munmap(code_cache_pool, code_cache_size);
code_cache_pool = NULL;
return false;
}
code_cache_pool_size = code_cache_size;
return true;
}
void
jit_code_cache_destroy()
{
mem_allocator_destroy(code_cache_pool_allocator);
os_munmap(code_cache_pool, code_cache_pool_size);
}
void *
jit_code_cache_malloc(uint32 size)
{
return mem_allocator_malloc(code_cache_pool_allocator, size);
}
void
jit_code_cache_free(void *ptr)
{
if (ptr)
mem_allocator_free(code_cache_pool_allocator, ptr);
}
bool
jit_pass_register_jitted_code(JitCompContext *cc)
{
/* TODO */
return false;
}

View File

@ -0,0 +1,31 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_CODE_CACHE_H_
#define _JIT_CODE_CACHE_H_
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_code_cache_init(uint32 code_cache_size);
void
jit_code_cache_destroy();
void *
jit_code_cache_alloc(uint32 size);
void
jit_code_cache_free(void *ptr);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_CODE_CACHE_H_ */

View File

@ -0,0 +1,26 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_compiler.h"
#include "jit_codegen.h"
bool
jit_pass_lower_cg(JitCompContext *cc)
{
return jit_codegen_lower(cc);
}
bool
jit_pass_codegen(JitCompContext *cc)
{
#if 0
bh_assert(jit_annl_is_enabled_next_label(cc));
if (!jit_annl_enable_jitted_addr(cc))
return false;
#endif
return jit_codegen_gen_native(cc);
}

View File

@ -0,0 +1,80 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_CODEGEN_H_
#define _JIT_CODEGEN_H_
#include "bh_platform.h"
#include "jit_ir.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Initialize codegen module, such as instruction encoder.
*
* @return true if succeeded; false if failed.
*/
bool
jit_codegen_init();
/**
* Destroy codegen module, such as instruction encoder.
*/
void
jit_codegen_destroy();
/**
* Get hard register information of each kind.
*
* @return the JitHardRegInfo array of each kind
*/
const JitHardRegInfo *
jit_codegen_get_hreg_info();
/**
* Generate native code for the given compilation context
*
* @param cc the compilation context that is ready to do codegen
*
* @return true if succeeds, false otherwise
*/
bool
jit_codegen_gen_native(JitCompContext *cc);
/**
* lower unsupported operations to supported ones for the target.
*
* @param cc the compilation context that is ready to do codegen
*
* @return true if succeeds, false otherwise
*/
bool
jit_codegen_lower(JitCompContext *cc);
/**
* Dump native code in the given range to assembly.
*
* @param begin_addr begin address of the native code
* @param end_addr end address of the native code
*/
void
jit_codegen_dump_native(void *begin_addr, void *end_addr);
/**
* Call jitted code
*
* @param exec_env the current exec_env
*/
bool
jit_codegen_call_func_jitted(void *exec_env, void *frame, void *func_inst,
void *target);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_CODEGEN_H_ */

View File

@ -0,0 +1,196 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_compiler.h"
#include "jit_ir.h"
#include "jit_codegen.h"
#include "jit_codecache.h"
#include "../interpreter/wasm.h"
typedef struct JitGlobals {
/* Compiler pass sequence. The last element must be 0. */
const uint8 *passes;
/* Code cache size. */
uint32 code_cache_size;
} JitGlobals;
typedef struct JitCompilerPass {
/* Name of the pass. */
const char *name;
/* The entry of the compiler pass. */
bool (*run)(JitCompContext *cc);
} JitCompilerPass;
/* clang-format off */
static JitCompilerPass compiler_passes[] = {
{ NULL, NULL },
#define REG_PASS(name) { #name, jit_pass_##name }
REG_PASS(dump),
REG_PASS(update_cfg),
REG_PASS(frontend),
REG_PASS(lower_fe),
REG_PASS(lower_cg),
REG_PASS(regalloc),
REG_PASS(codegen),
REG_PASS(register_jitted_code)
#undef REG_PASS
};
/* Number of compiler passes. */
#define COMPILER_PASS_NUM (sizeof(compiler_passes) / sizeof(compiler_passes[0]))
#define WASM_ENABLE_FAST_JIT_DUMP 1
#if WASM_ENABLE_FAST_JIT_DUMP == 0
static const uint8 compiler_passes_without_dump[] = {
3, 4, 5, 6, 7, 8, 0
};
#else
static const uint8 compiler_passes_with_dump[] = {
3, 2, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 0
};
#endif
/* The exported global data of JIT compiler. */
JitGlobals jit_globals = {
#if WASM_ENABLE_FAST_JIT_DUMP == 0
.passes = compiler_passes_without_dump,
#else
.passes = compiler_passes_with_dump,
#endif
.code_cache_size = 10 * 1024 * 1024
};
/* clang-format on */
static bool
apply_compiler_passes(JitCompContext *cc)
{
const uint8 *p = jit_globals.passes;
for (; *p; p++) {
/* Set the pass NO. */
cc->cur_pass_no = p - jit_globals.passes;
bh_assert(*p < COMPILER_PASS_NUM);
if (!compiler_passes[*p].run(cc)) {
LOG_VERBOSE("JIT: compilation failed at pass[%d] = %s\n",
p - jit_globals.passes, compiler_passes[*p].name);
return false;
}
}
return true;
}
bool
jit_compiler_init()
{
/* TODO: get code cache size with global configs */
if (!jit_code_cache_init(jit_globals.code_cache_size))
return false;
if (!jit_codegen_init())
goto fail1;
return true;
fail1:
jit_code_cache_destroy();
return false;
}
void
jit_compiler_destroy()
{
jit_codegen_destroy();
jit_code_cache_destroy();
}
const char *
jit_compiler_get_pass_name(unsigned i)
{
return i < COMPILER_PASS_NUM ? compiler_passes[i].name : NULL;
}
bool
jit_compiler_compile(WASMModule *module, uint32 func_idx)
{
JitCompContext *cc;
bool ret = true;
/* Initialize compilation context. */
if (!(cc = jit_calloc(sizeof(*cc))))
return false;
if (!jit_cc_init(cc, 64)) {
jit_free(cc);
return false;
}
cc->cur_wasm_module = module;
cc->cur_wasm_func = module->functions[func_idx];
cc->cur_wasm_func_idx = func_idx;
cc->mem_space_unchanged = (!cc->cur_wasm_func->has_op_memory_grow
&& !cc->cur_wasm_func->has_op_func_call)
|| (!module->possible_memory_grow);
/* Apply compiler passes. */
if (!apply_compiler_passes(cc)) {
os_printf("fast jit compilation failed: %s\n", jit_get_last_error(cc));
ret = false;
}
/* Delete the compilation context. */
jit_cc_delete(cc);
return ret;
}
bool
jit_compiler_compile_all(WASMModule *module)
{
JitCompContext *cc;
bool ret = false;
uint32 i;
/* Initialize compilation context. */
if (!(cc = jit_calloc(sizeof(*cc))))
return false;
if (!jit_cc_init(cc, 64)) {
jit_free(cc);
return false;
}
for (i = 0; i < module->function_count; i++) {
cc->cur_wasm_module = module;
cc->cur_wasm_func = module->functions[i];
cc->cur_wasm_func_idx = i;
cc->mem_space_unchanged = (!cc->cur_wasm_func->has_op_memory_grow
&& !cc->cur_wasm_func->has_op_func_call)
|| (!module->possible_memory_grow);
/* Apply compiler passes. */
if (!apply_compiler_passes(cc)) {
os_printf("fast jit compilation failed: %s\n",
jit_get_last_error(cc));
ret = false;
break;
}
}
/* Delete the compilation context. */
jit_cc_delete(cc);
return ret;
}
bool
jit_interp_switch_to_jitted(void *exec_env, void *frame,
WASMFunctionInstance *func_inst, void *target)
{
return jit_codegen_call_func_jitted(exec_env, func_inst, frame, target);
}

View File

@ -0,0 +1,92 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_COMPILER_H_
#define _JIT_COMPILER_H_
#include "bh_platform.h"
#include "../interpreter/wasm_runtime.h"
#include "jit_ir.h"
#ifdef __cplusplus
extern "C" {
#endif
bool
jit_compiler_init();
void
jit_compiler_destroy();
const char *
jit_compiler_get_pass_name(unsigned i);
bool
jit_compiler_compile(WASMModule *module, uint32 func_idx);
bool
jit_compiler_compile_all(WASMModule *module);
bool
jit_interp_switch_to_jitted(void *exec_env, void *frame,
WASMFunctionInstance *func_inst, void *target);
/*
* Pass declarations:
*/
/**
* Dump the compilation context.
*/
bool
jit_pass_dump(JitCompContext *cc);
/**
* Update CFG (usually before dump for better readability).
*/
bool
jit_pass_update_cfg(JitCompContext *cc);
/**
* Translate profiling result into MIR.
*/
bool
jit_pass_frontend(JitCompContext *cc);
/**
* Convert MIR to LIR.
*/
bool
jit_pass_lower_fe(JitCompContext *cc);
/**
* Lower unsupported operations into supported ones.
*/
bool
jit_pass_lower_cg(JitCompContext *cc);
/**
* Register allocation.
*/
bool
jit_pass_regalloc(JitCompContext *cc);
/**
* Native code generation.
*/
bool
jit_pass_codegen(JitCompContext *cc);
/**
* Register the jitted code so that it can be executed.
*/
bool
jit_pass_register_jitted_code(JitCompContext *cc);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_COMPILER_H_ */

View File

@ -0,0 +1,322 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_dump.h"
#include "jit_compiler.h"
#include "jit_codegen.h"
void
jit_dump_reg(JitCompContext *cc, JitReg reg)
{
unsigned kind = jit_reg_kind(reg);
unsigned no = jit_reg_no(reg);
switch (kind) {
case JIT_REG_KIND_VOID:
os_printf("VOID");
break;
case JIT_REG_KIND_I32:
if (jit_reg_is_const(reg)) {
unsigned rel = jit_cc_get_const_I32_rel(cc, reg);
os_printf("0x%x", jit_cc_get_const_I32(cc, reg));
if (rel)
os_printf("(rel: 0x%x)", rel);
}
else
os_printf("i%d", no);
break;
case JIT_REG_KIND_I64:
if (jit_reg_is_const(reg))
os_printf("0x%llxL", jit_cc_get_const_I64(cc, reg));
else
os_printf("I%d", no);
break;
case JIT_REG_KIND_F32:
if (jit_reg_is_const(reg))
os_printf("%f", jit_cc_get_const_F32(cc, reg));
else
os_printf("f%d", no);
break;
case JIT_REG_KIND_F64:
if (jit_reg_is_const(reg))
os_printf("%fL", jit_cc_get_const_F64(cc, reg));
else
os_printf("D%d", no);
break;
case JIT_REG_KIND_L32:
os_printf("L%d", no);
break;
default:
bh_assert(!"Unsupported register kind.");
}
}
static void
jit_dump_insn_Reg(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
for (i = 0; i < opnd_num; i++) {
os_printf(i == 0 ? " " : ", ");
jit_dump_reg(cc, *(jit_insn_opnd(insn, i)));
}
os_printf("\n");
}
static void
jit_dump_insn_VReg(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
opnd_num = jit_insn_opndv_num(insn);
for (i = 0; i < opnd_num; i++) {
os_printf(i == 0 ? " " : ", ");
jit_dump_reg(cc, *(jit_insn_opndv(insn, i)));
}
os_printf("\n");
}
static void
jit_dump_insn_TableSwitch(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
int i;
JitOpndTableSwitch *opnd = jit_insn_opndts(insn);
os_printf(" ");
jit_dump_reg(cc, opnd->value);
os_printf("\n%16s: ", "default");
jit_dump_reg(cc, opnd->default_target);
os_printf("\n");
for (i = opnd->low_value; i <= opnd->high_value; i++) {
os_printf("%18d: ", i);
jit_dump_reg(cc, opnd->targets[i - opnd->low_value]);
os_printf("\n");
}
}
static void
jit_dump_insn_LookupSwitch(JitCompContext *cc, JitInsn *insn, unsigned opnd_num)
{
unsigned i;
JitOpndLookupSwitch *opnd = jit_insn_opndls(insn);
os_printf(" ");
jit_dump_reg(cc, opnd->value);
os_printf("\n%16s: ", "default");
jit_dump_reg(cc, opnd->default_target);
os_printf("\n");
for (i = 0; i < opnd->match_pairs_num; i++) {
os_printf("%18d: ", opnd->match_pairs[i].value);
jit_dump_reg(cc, opnd->match_pairs[i].target);
os_printf("\n");
}
}
void
jit_dump_insn(JitCompContext *cc, JitInsn *insn)
{
switch (insn->opcode) {
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) \
case JIT_OP_##NAME: \
os_printf(" %-15s", #NAME); \
jit_dump_insn_##OPND_KIND(cc, insn, OPND_NUM); \
break;
#include "jit_ir.def"
#undef INSN
}
}
void
jit_dump_basic_block(JitCompContext *cc, JitBasicBlock *block)
{
unsigned i;
JitInsn *insn;
JitRegVec preds = jit_basic_block_preds(block);
JitRegVec succs = jit_basic_block_succs(block);
JitReg label = jit_basic_block_label(block);
JitReg *reg;
jit_dump_reg(cc, label);
os_printf(":\n ; PREDS(");
JIT_REG_VEC_FOREACH(preds, i, reg)
{
if (i > 0)
os_printf(" ");
jit_dump_reg(cc, *reg);
}
os_printf(")\n ;");
if (jit_annl_is_enabled_begin_bcip(cc))
os_printf(" BEGIN_BCIP=0x%04x",
*(jit_annl_begin_bcip(cc, label))
- (uint8 *)cc->cur_wasm_module->load_addr);
if (jit_annl_is_enabled_end_bcip(cc))
os_printf(" END_BCIP=0x%04x",
*(jit_annl_end_bcip(cc, label))
- (uint8 *)cc->cur_wasm_module->load_addr);
os_printf("\n");
if (jit_annl_is_enabled_jitted_addr(cc))
/* Dump assembly. */
jit_codegen_dump_native(
*(jit_annl_jitted_addr(cc, label)),
label != cc->exit_label
? *(jit_annl_jitted_addr(cc, *(jit_annl_next_label(cc, label))))
: cc->jitted_addr_end);
else
/* Dump IR. */
JIT_FOREACH_INSN(block, insn)
jit_dump_insn(cc, insn);
os_printf(" ; SUCCS(");
JIT_REG_VEC_FOREACH(succs, i, reg)
{
if (i > 0)
os_printf(" ");
jit_dump_reg(cc, *reg);
}
os_printf(")\n\n");
}
static void
dump_func_name(JitCompContext *cc)
{
const char *func_name = NULL;
WASMModule *module = cc->cur_wasm_module;
#if WASM_ENABLE_CUSTOM_NAME_SECTION != 0
func_name = func->field_name;
#endif
/* if custom name section is not generated,
search symbols from export table */
if (!func_name) {
uint32 i;
for (i = 0; i < module->export_count; i++) {
if (module->exports[i].kind == EXPORT_KIND_FUNC
&& module->exports[i].index == cc->cur_wasm_func_idx) {
func_name = module->exports[i].name;
break;
}
}
}
/* function name not exported, print number instead */
if (func_name == NULL) {
os_printf("$f%d", cc->cur_wasm_func_idx);
}
else {
os_printf("%s", func_name);
}
}
static void
dump_cc_ir(JitCompContext *cc)
{
unsigned i, end;
JitBasicBlock *block;
JitReg label;
const char *kind_names[] = { "VOID", "I32", "I64", "F32",
"F64", "V64", "V128", "V256" };
os_printf("; Function: ");
dump_func_name(cc);
os_printf("\n");
os_printf("; Constant table sizes:");
for (i = 0; i < JIT_REG_KIND_L32; i++)
os_printf(" %s=%d", kind_names[i], cc->_const_val._num[i]);
os_printf("\n; Label number: %d", jit_cc_label_num(cc));
os_printf("\n; Instruction number: %d", jit_cc_insn_num(cc));
os_printf("\n; Register numbers:");
for (i = 0; i < JIT_REG_KIND_L32; i++)
os_printf(" %s=%d", kind_names[i], jit_cc_reg_num(cc, i));
os_printf("\n; Label annotations:");
#define ANN_LABEL(TYPE, NAME) \
if (jit_annl_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_LABEL
os_printf("\n; Instruction annotations:");
#define ANN_INSN(TYPE, NAME) \
if (jit_anni_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_INSN
os_printf("\n; Register annotations:");
#define ANN_REG(TYPE, NAME) \
if (jit_annr_is_enabled_##NAME(cc)) \
os_printf(" %s", #NAME);
#include "jit_ir.def"
#undef ANN_REG
os_printf("\n\n");
if (jit_annl_is_enabled_next_label(cc))
/* Blocks have been reordered, use that order to dump. */
for (label = cc->entry_label; label;
label = *(jit_annl_next_label(cc, label)))
jit_dump_basic_block(cc, *(jit_annl_basic_block(cc, label)));
else
/* Otherwise, use the default order. */
{
jit_dump_basic_block(cc, jit_cc_entry_basic_block(cc));
JIT_FOREACH_BLOCK(cc, i, end, block)
jit_dump_basic_block(cc, block);
jit_dump_basic_block(cc, jit_cc_exit_basic_block(cc));
}
}
void
jit_dump_cc(JitCompContext *cc)
{
if (jit_cc_label_num(cc) <= 2)
return;
dump_cc_ir(cc);
}
bool
jit_pass_dump(JitCompContext *cc)
{
os_printf("JIT.COMPILER.DUMP: PASS_NO=%d PREV_PASS=%s\n\n", cc->cur_pass_no,
(cc->cur_pass_no > 0 ? jit_compiler_get_pass_name(cc->cur_pass_no)
: "NULL"));
jit_dump_cc(cc);
os_printf("\n");
return true;
}
bool
jit_pass_update_cfg(JitCompContext *cc)
{
return jit_cc_update_cfg(cc);
}

View File

@ -0,0 +1,54 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_DUMP_H_
#define _JIT_DUMP_H_
#include "jit_compiler.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* Dump a register.
*
* @param cc compilation context of the register
* @param reg register to be dumped
*/
void
jit_dump_reg(JitCompContext *cc, JitReg reg);
/**
* Dump an instruction.
*
* @param cc compilation context of the instruction
* @param insn instruction to be dumped
*/
void
jit_dump_insn(JitCompContext *cc, JitInsn *insn);
/**
* Dump a block.
*
* @param cc compilation context of the block
* @param block block to be dumped
*/
void
jit_dump_block(JitCompContext *cc, JitBlock *block);
/**
* Dump a compilation context.
*
* @param cc compilation context to be dumped
*/
void
jit_dump_cc(JitCompContext *cc);
#ifdef __cplusplus
}
#endif
#endif /* end of _JIT_DUMP_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,477 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_FRONTEND_H_
#define _JIT_FRONTEND_H_
#include "jit_utils.h"
#include "jit_ir.h"
#include "../interpreter/wasm_interp.h"
typedef enum IntCond {
INT_EQZ = 0,
INT_EQ,
INT_NE,
INT_LT_S,
INT_LT_U,
INT_GT_S,
INT_GT_U,
INT_LE_S,
INT_LE_U,
INT_GE_S,
INT_GE_U
} IntCond;
typedef enum FloatCond {
FLOAT_EQ = 0,
FLOAT_NE,
FLOAT_LT,
FLOAT_GT,
FLOAT_LE,
FLOAT_GE,
FLOAT_UNO
} FloatCond;
typedef enum IntArithmetic {
INT_ADD = 0,
INT_SUB,
INT_MUL,
INT_DIV_S,
INT_DIV_U,
INT_REM_S,
INT_REM_U
} IntArithmetic;
typedef enum V128Arithmetic {
V128_ADD = 0,
V128_SUB,
V128_MUL,
V128_DIV,
V128_NEG,
V128_MIN,
V128_MAX,
} V128Arithmetic;
typedef enum IntBitwise {
INT_AND = 0,
INT_OR,
INT_XOR,
} IntBitwise;
typedef enum V128Bitwise {
V128_NOT,
V128_AND,
V128_ANDNOT,
V128_OR,
V128_XOR,
V128_BITSELECT,
} V128Bitwise;
typedef enum IntShift {
INT_SHL = 0,
INT_SHR_S,
INT_SHR_U,
INT_ROTL,
INT_ROTR
} IntShift;
typedef enum FloatMath {
FLOAT_ABS = 0,
FLOAT_NEG,
FLOAT_CEIL,
FLOAT_FLOOR,
FLOAT_TRUNC,
FLOAT_NEAREST,
FLOAT_SQRT
} FloatMath;
typedef enum FloatArithmetic {
FLOAT_ADD = 0,
FLOAT_SUB,
FLOAT_MUL,
FLOAT_DIV,
FLOAT_MIN,
FLOAT_MAX,
} FloatArithmetic;
typedef enum JitExceptionID {
EXCE_UNREACHABLE = 0,
EXCE_OUT_OF_MEMORY,
EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS,
EXCE_INTEGER_OVERFLOW,
EXCE_INTEGER_DIVIDE_BY_ZERO,
EXCE_INVALID_CONVERSION_TO_INTEGER,
EXCE_INVALID_FUNCTION_TYPE_INDEX,
EXCE_INVALID_FUNCTION_INDEX,
EXCE_UNDEFINED_ELEMENT,
EXCE_UNINITIALIZED_ELEMENT,
EXCE_CALL_UNLINKED_IMPORT_FUNC,
EXCE_NATIVE_STACK_OVERFLOW,
EXCE_UNALIGNED_ATOMIC,
EXCE_AUX_STACK_OVERFLOW,
EXCE_AUX_STACK_UNDERFLOW,
EXCE_OUT_OF_BOUNDS_TABLE_ACCESS,
EXCE_OPERAND_STACK_OVERFLOW,
EXCE_NUM,
} JitExceptionID;
/**
* Translate instructions in a function. The translated block must
* end with a branch instruction whose targets are offsets relating to
* the end bcip of the translated block, which are integral constants.
* If a target of a branch is really a constant value (which should be
* rare), put it into a register and then jump to the register instead
* of using the constant value directly in the target. In the
* translation process, don't create any new labels. The code bcip of
* the begin and end of the translated block is stored in the
* jit_annl_begin_bcip and jit_annl_end_bcip annotations of the label
* of the block, which must be the same as the bcips used in
* profiling.
*
* NOTE: the function must explicitly set SP to correct value when the
* entry's bcip is the function's entry address.
*
* @param cc containing compilation context of generated IR
* @param entry entry of the basic block to be translated. If its
* value is NULL, the function will clean up any pass local data that
* might be created previously.
* @param is_reached a bitmap recording which bytecode has been
* reached as a block entry
*
* @return IR block containing translated instructions if succeeds,
* NULL otherwise
*/
JitBasicBlock *
jit_frontend_translate_func(JitCompContext *cc);
/**
* Generate a block leaving the compiled code, which must store the
* target bcip and other necessary information for switching to
* interpreter or other compiled code and then jump to the exit of the
* cc.
*
* @param cc the compilation context
* @param bcip the target bytecode instruction pointer
* @param sp_offset stack pointer offset at the beginning of the block
*
* @return the leaving block if succeeds, NULL otherwise
*/
JitBlock *
jit_frontend_gen_leaving_block(JitCompContext *cc, void *bcip,
unsigned sp_offset);
#if 0
/**
* Print the qualified name of the given function.
*
* @param function the function whose name to be printed
*/
void
jit_frontend_print_function_name(void *function);
/**
* Get the full name of the function. If the input buffer lengh
* is less than the actual function name length, the function will
* simply return the actuall length and won't write to the buffer.
*
* @param function pointer to a function
* @param buf buffer for the returned name
* @param buf_len lengh of the buffer
*
* @return actual length of the name
*/
unsigned
jit_frontend_get_function_name(void *function, char *buf, unsigned buf_len);
/**
* Convert the bcip in the given function to an internal offset.
*
* @param function function containing the bcip
* @param bcip bytecode instruction pointer
*
* @return converted offset of the bcip
*/
unsigned
jit_frontend_bcip_to_offset(void *function, void *bcip);
#endif
/**
* Lower the IR of the given compilation context.
*
* @param cc the compilation context
*
* @return true if succeeds, false otherwise
*/
bool
jit_frontend_lower(JitCompContext *cc);
/**
* Get the offset from frame pointer to the n-th local variable slot.
*
* @param n the index to the local variable array
*
* @return the offset from frame pointer to the local variable slot
*/
static inline unsigned
offset_of_local(unsigned n)
{
return offsetof(WASMInterpFrame, lp) + n * 4;
}
/**
* Generate instruction to load an integer from the frame.
*
* This and the below gen_load_X functions generate instructions to
* load values from the frame into registers if the values have not
* been loaded yet.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_i32(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a i64 integer from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_i64(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a floating point value from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_f32(JitFrame *frame, unsigned n);
/**
* Generate instruction to load a double value from the frame.
*
* @param frame the frame information
* @param n slot index to the local variable array
*
* @return register holding the loaded value
*/
JitReg
gen_load_f64(JitFrame *frame, unsigned n);
/**
* Generate instructions to commit computation result to the frame.
* The general principle is to only commit values that will be used
* through the frame.
*
* @param frame the frame information
* @param begin the begin value slot to commit
* @param end the end value slot to commit
*/
void
gen_commit_values(JitFrame *frame, JitValueSlot *begin, JitValueSlot *end);
/**
* Generate instructions to commit SP and IP pointers to the frame.
*
* @param frame the frame information
*/
void
gen_commit_sp_ip(JitFrame *frame);
/**
* Generate commit instructions for the block end.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_branch(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->sp);
}
/**
* Generate commit instructions for exception checks.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_exception(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->lp + frame->max_locals);
}
/**
* Generate commit instructions to commit all status.
*
* @param frame the frame information
*/
static inline void
gen_commit_for_all(JitFrame *frame)
{
gen_commit_values(frame, frame->lp, frame->sp);
gen_commit_sp_ip(frame);
}
static inline void
clear_values(JitFrame *frame)
{
size_t total_size =
sizeof(JitValueSlot) * (frame->max_locals + frame->max_stacks);
memset(frame->lp, 0, total_size);
}
static inline void
push_i32(JitFrame *frame, JitReg value)
{
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
}
static inline void
push_i64(JitFrame *frame, JitReg value)
{
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
frame->sp->reg = value;
frame->sp->dirty = 1;
frame->sp++;
}
static inline void
push_f32(JitFrame *frame, JitReg value)
{
push_i32(frame, value);
}
static inline void
push_f64(JitFrame *frame, JitReg value)
{
push_i64(frame, value);
}
static inline JitReg
pop_i32(JitFrame *frame)
{
frame->sp--;
return gen_load_i32(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_i64(JitFrame *frame)
{
frame->sp -= 2;
return gen_load_i64(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_f32(JitFrame *frame)
{
frame->sp--;
return gen_load_f32(frame, frame->sp - frame->lp);
}
static inline JitReg
pop_f64(JitFrame *frame)
{
frame->sp -= 2;
return gen_load_f64(frame, frame->sp - frame->lp);
}
static inline void
pop(JitFrame *frame, int n)
{
frame->sp -= n;
memset(frame->sp, 0, n * sizeof(*frame->sp));
}
static inline JitReg
local_i32(JitFrame *frame, int n)
{
return gen_load_i32(frame, n);
}
static inline JitReg
local_i64(JitFrame *frame, int n)
{
return gen_load_i64(frame, n);
}
static inline JitReg
local_f32(JitFrame *frame, int n)
{
return gen_load_f32(frame, n);
}
static inline JitReg
local_f64(JitFrame *frame, int n)
{
return gen_load_f64(frame, n);
}
static void
set_local_i32(JitFrame *frame, int n, JitReg val)
{
frame->lp[n].reg = val;
frame->lp[n].dirty = 1;
}
static void
set_local_i64(JitFrame *frame, int n, JitReg val)
{
frame->lp[n].reg = val;
frame->lp[n].dirty = 1;
frame->lp[n + 1].reg = val;
frame->lp[n + 1].dirty = 1;
}
static inline void
set_local_f32(JitFrame *frame, int n, JitReg val)
{
set_local_i32(frame, n, val);
}
static inline void
set_local_f64(JitFrame *frame, int n, JitReg val)
{
set_local_i64(frame, n, val);
}
#define POP(jit_value, value_type) \
do { \
if (!jit_cc_pop_value(cc, value_type, &jit_value)) \
goto fail; \
} while (0)
#define POP_I32(v) POP(v, VALUE_TYPE_I32)
#define POP_I64(v) POP(v, VALUE_TYPE_I64)
#define POP_F32(v) POP(v, VALUE_TYPE_F32)
#define POP_F64(v) POP(v, VALUE_TYPE_F64)
#define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF)
#define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF)
#define PUSH(jit_value, value_type) \
do { \
if (!jit_cc_push_value(cc, value_type, jit_value)) \
goto fail; \
} while (0)
#define PUSH_I32(v) PUSH(v, VALUE_TYPE_I32)
#define PUSH_I64(v) PUSH(v, VALUE_TYPE_I64)
#define PUSH_F32(v) PUSH(v, VALUE_TYPE_F32)
#define PUSH_F64(v) PUSH(v, VALUE_TYPE_F64)
#define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF)
#define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF)
#endif

1500
core/iwasm/fast-jit/jit_ir.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,457 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
/**
* @file jit-ir.def
*
* @brief Definition of JIT IR instructions and annotations.
*/
/**
* @def INSN (NAME, OPND_KIND, OPND_NUM, FIRST_USE)
*
* Definition of IR instructions
*
* @param NAME name of the opcode
* @param OPND_KIND kind of the operand(s)
* @param OPND_NUM number of the operand(s)
* @param FIRST_USE index of the first use register
*
* @p OPND_KIND and @p OPND_NUM together determine the format of an
* instruction. There are four kinds of formats:
*
* 1) Reg: fixed-number register operands, @p OPND_NUM specifies the
* number of operands;
*
* 2) VReg: variable-number register operands, @p OPND_NUM specifies
* the number of fixed register operands;
*
* 3) TableSwitch: tableswitch instruction's format, @p OPND_NUM must
* be 1;
*
* 4) LookupSwitch: lookupswitch instruction's format, @p OPND_NUM
* must be 1.
*
* Instruction operands are all registers and they are organized in an
* order that all registers defined by the instruction, if any, appear
* before the registers used by the instruction. The @p FIRST_USE is
* the index of the first use register in the register vector sorted
* in this order. Use @c jit_insn_opnd_regs to get the register
* vector in this order and use @c jit_insn_opnd_first_use to get the
* index of the first use register.
*
* Every instruction with name @p NAME has the following definitions:
*
* @c JEFF_OP_NAME: the enum opcode of insn NAME
* @c jit_insn_new_NAME (...): creates a new instance of insn NAME
*
* An instruction is deleted by function:
*
* @c jit_insn_delete (@p insn)
*
* In the scope of this IR's terminology, operand and argument have
* different meanings. The operand is a general notation, which
* denotes every raw operand of an instruction, while the argument
* only denotes the variable part of operands of instructions of VReg
* kind. For example, a VReg instruction phi node "r0 = phi(r1, r2)"
* has three operands opnd[0]: r0, opnd[1]: r1 and opnd[2]: r2, but
* only two arguments arg[0]: r1 and arg[1]: r2. Operands or
* arguments of instructions with various formats can be access
* through the following APIs:
*
* @c jit_insn_opnd (@p insn, @p n): for Reg_N formats
* @c jit_insn_opndv (@p insn, @p n): for VReg_N formats
* @c jit_insn_opndv_num (@p insn): for VReg_N formats
* @c jit_insn_opndts (@p insn): for TableSwitch_1 format
* @c jit_insn_opndls (@p insn): for LookupSwitch_1 format
*/
#ifndef INSN
#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE)
#endif
/* Comparison instructions */
INSN(I32_EQZ, Reg, 3, 1)
INSN(I32_EQ, Reg, 3, 1)
INSN(I32_NE, Reg, 3, 1)
INSN(I32_LT_S, Reg, 3, 1)
INSN(I32_LT_U, Reg, 3, 1)
INSN(I32_GT_S, Reg, 3, 1)
INSN(I32_GT_U, Reg, 3, 1)
INSN(I32_LE_S, Reg, 3, 1)
INSN(I32_LE_U, Reg, 3, 1)
INSN(I32_GE_S, Reg, 3, 1)
INSN(I32_GE_U, Reg, 3, 1)
INSN(I64_EQZ, Reg, 3, 1)
INSN(I64_EQ, Reg, 3, 1)
INSN(I64_NE, Reg, 3, 1)
INSN(I64_LT_S, Reg, 3, 1)
INSN(I64_LT_U, Reg, 3, 1)
INSN(I64_GT_S, Reg, 3, 1)
INSN(I64_GT_U, Reg, 3, 1)
INSN(I64_LE_S, Reg, 3, 1)
INSN(I64_LE_U, Reg, 3, 1)
INSN(I64_GE_S, Reg, 3, 1)
INSN(I64_GE_U, Reg, 3, 1)
INSN(F32_EQ, Reg, 3, 1)
INSN(F32_NE, Reg, 3, 1)
INSN(F32_LT, Reg, 3, 1)
INSN(F32_GT, Reg, 3, 1)
INSN(F32_LE, Reg, 3, 1)
INSN(F32_GE, Reg, 3, 1)
INSN(F64_EQ, Reg, 3, 1)
INSN(F64_NE, Reg, 3, 1)
INSN(F64_LT, Reg, 3, 1)
INSN(F64_GT, Reg, 3, 1)
INSN(F64_LE, Reg, 3, 1)
INSN(F64_GE, Reg, 3, 1)
/* Select instruction */
INSN(SELECT, Reg, 4, 1)
/* Control instructions */
INSN(JMP, Reg, 1, 0)
INSN(BEQ, Reg, 3, 0)
INSN(BNE, Reg, 3, 0)
INSN(BGTS, Reg, 3, 0)
INSN(BGES, Reg, 3, 0)
INSN(BLTS, Reg, 3, 0)
INSN(BLES, Reg, 3, 0)
INSN(BGTU, Reg, 3, 0)
INSN(BGEU, Reg, 3, 0)
INSN(BLTU, Reg, 3, 0)
INSN(BLEU, Reg, 3, 0)
INSN(TABLE_SWITCH, TableSwitch, 1, 0)
INSN(LOOKUP_SWITCH, LookupSwitch, 1, 0)
/* check zero divisor */
INSN(CHECK_DIV_ZERO, Reg, 3, 0)
/* check stack overflow */
INSN(CHECK_SOE, Reg, 3, 0)
/* Call and return instructions */
INSN(CALLNATIVE, VReg, 2, 1)
INSN(CALLBC, Reg, 3, 0)
INSN(RETURN, Reg, 1, 0)
/* Move and conversion instructions that transfer values among
registers of the same kind (move) or different kinds (convert) */
INSN(MOV, Reg, 2, 1)
INSN(PHI, VReg, 1, 1)
INSN(I32TOI8, Reg, 2, 1)
INSN(I32TOU8, Reg, 2, 1)
INSN(I32TOI16, Reg, 2, 1)
INSN(I32TOU16, Reg, 2, 1)
INSN(I32TOI64, Reg, 2, 1)
INSN(I32TOF32, Reg, 2, 1)
INSN(I32TOF64, Reg, 2, 1)
INSN(U32TOI64, Reg, 2, 1)
INSN(U32TOF32, Reg, 2, 1)
INSN(U32TOF64, Reg, 2, 1)
INSN(I64TOI32, Reg, 2, 1)
INSN(I64TOF32, Reg, 2, 1)
INSN(I64TOF64, Reg, 2, 1)
INSN(F32TOI32, Reg, 2, 1)
INSN(F32TOI64, Reg, 2, 1)
INSN(F32TOF64, Reg, 2, 1)
INSN(F64TOI32, Reg, 2, 1)
INSN(F64TOI64, Reg, 2, 1)
INSN(F64TOF32, Reg, 2, 1)
/* Arithmetic and bitwise instructions: */
INSN(NEG, Reg, 2, 1)
INSN(NOT, Reg, 2, 1)
INSN(ADD, Reg, 3, 1)
INSN(SUB, Reg, 3, 1)
INSN(MUL, Reg, 3, 1)
INSN(DIV, Reg, 3, 1)
INSN(REM, Reg, 3, 1)
INSN(SHL, Reg, 3, 1)
INSN(SHRS, Reg, 3, 1)
INSN(SHRU, Reg, 3, 1)
INSN(OR, Reg, 3, 1)
INSN(XOR, Reg, 3, 1)
INSN(AND, Reg, 3, 1)
INSN(CMP, Reg, 3, 1)
/* Select instruction: */
INSN(SELECTEQ, Reg, 4, 1)
INSN(SELECTNE, Reg, 4, 1)
INSN(SELECTGTS, Reg, 4, 1)
INSN(SELECTGES, Reg, 4, 1)
INSN(SELECTLTS, Reg, 4, 1)
INSN(SELECTLES, Reg, 4, 1)
INSN(SELECTGTU, Reg, 4, 1)
INSN(SELECTGEU, Reg, 4, 1)
INSN(SELECTLTU, Reg, 4, 1)
INSN(SELECTLEU, Reg, 4, 1)
/* Memory access instructions: */
INSN(LDSELF, Reg, 1, 1)
INSN(LDJITINFO, Reg, 1, 1)
INSN(LDI8, Reg, 3, 1)
INSN(LDU8, Reg, 3, 1)
INSN(LDI16, Reg, 3, 1)
INSN(LDU16, Reg, 3, 1)
INSN(LDI32, Reg, 3, 1)
INSN(LDU32, Reg, 3, 1)
INSN(LDI64, Reg, 3, 1)
INSN(LDU64, Reg, 3, 1)
INSN(LDF32, Reg, 3, 1)
INSN(LDF64, Reg, 3, 1)
INSN(LDV64, Reg, 3, 1)
INSN(LDV128, Reg, 3, 1)
INSN(LDV256, Reg, 3, 1)
INSN(STI8, Reg, 3, 0)
INSN(STI16, Reg, 3, 0)
INSN(STI32, Reg, 3, 0)
INSN(STI64, Reg, 3, 0)
INSN(STF32, Reg, 3, 0)
INSN(STF64, Reg, 3, 0)
INSN(STV64, Reg, 3, 1)
INSN(STV128, Reg, 3, 1)
INSN(STV256, Reg, 3, 1)
#if 0
/* Memory instructions */
INSN(I32_LOAD, Reg, 2, 1)
INSN(I64_LOAD, Reg, 2, 1)
INSN(F32_LOAD, Reg, 2, 1)
INSN(F64_LOAD, Reg, 2, 1)
INSN(I32_LOAD8_S, Reg, 2, 1)
INSN(I32_LOAD8_U, Reg, 2, 1)
INSN(I32_LOAD16_S, Reg, 2, 1)
INSN(I32_LOAD16_U, Reg, 2, 1)
INSN(I64_LOAD8_S, Reg, 2, 1)
INSN(I64_LOAD8_U, Reg, 2, 1)
INSN(I64_LOAD16_S, Reg, 2, 1)
INSN(I64_LOAD16_U, Reg, 2, 1)
INSN(I64_LOAD32_S, Reg, 2, 1)
INSN(I64_LOAD32_U, Reg, 2, 1)
INSN(I32_STORE, Reg, 2, 0)
INSN(I64_STORE, Reg, 2, 0)
INSN(F32_STORE, Reg, 2, 0)
INSN(F64_STORE, Reg, 2, 0)
INSN(I32_STORE8, Reg, 2, 0)
INSN(I32_STORE16, Reg, 2, 0)
INSN(I64_STORE8, Reg, 2, 0)
INSN(I64_STORE16, Reg, 2, 0)
INSN(I64_STORE32, Reg, 2, 0)
/* Numeric operators */
INSN(I32_CLZ, Reg, 2, 1)
INSN(I32_CTZ, Reg, 2, 1)
INSN(I32_POPCNT, Reg, 2, 1)
INSN(I32_ADD, Reg, 3, 1)
INSN(I32_SUB, Reg, 3, 1)
INSN(I32_MUL, Reg, 3, 1)
INSN(I32_DIV_S, Reg, 3, 1)
INSN(I32_DIV_U, Reg, 3, 1)
INSN(I32_REM_S, Reg, 3, 1)
INSN(I32_REM_U, Reg, 3, 1)
INSN(I32_AND, Reg, 3, 1)
INSN(I32_OR, Reg, 3, 1)
INSN(I32_XOR, Reg, 3, 1)
INSN(I32_SHL, Reg, 3, 1)
INSN(I32_SHR_S, Reg, 3, 1)
INSN(I32_SHR_U, Reg, 3, 1)
INSN(I32_ROTL, Reg, 3, 1)
INSN(I32_ROTR, Reg, 3, 1)
INSN(I64_CLZ, Reg, 2, 1)
INSN(I64_CTZ, Reg, 2, 1)
INSN(I64_POPCNT, Reg, 2, 1)
INSN(I64_ADD, Reg, 3, 1)
INSN(I64_SUB, Reg, 3, 1)
INSN(I64_MUL, Reg, 3, 1)
INSN(I64_DIV_S, Reg, 3, 1)
INSN(I64_DIV_U, Reg, 3, 1)
INSN(I64_REM_S, Reg, 3, 1)
INSN(I64_REM_U, Reg, 3, 1)
INSN(I64_AND, Reg, 3, 1)
INSN(I64_OR, Reg, 3, 1)
INSN(I64_XOR, Reg, 3, 1)
INSN(I64_SHL, Reg, 3, 1)
INSN(I64_SHR_S, Reg, 3, 1)
INSN(I64_SHR_U, Reg, 3, 1)
INSN(I64_ROTL, Reg, 3, 1)
INSN(I64_ROTR, Reg, 3, 1)
INSN(F32_ABS, Reg, 2, 1)
INSN(F32_NEG, Reg, 2, 1)
INSN(F32_CEIL, Reg, 2, 1)
INSN(F32_FLOOR, Reg, 2, 1)
INSN(F32_TRUNC, Reg, 2, 1)
INSN(F32_NEAREST, Reg, 2, 1)
INSN(F32_SQRT, Reg, 2, 1)
INSN(F32_ADD, Reg, 3, 1)
INSN(F32_SUB, Reg, 3, 1)
INSN(F32_MUL, Reg, 3, 1)
INSN(F32_DIV, Reg, 3, 1)
INSN(F32_MIN, Reg, 3, 1)
INSN(F32_MAX, Reg, 3, 1)
INSN(F32_COPYSIGN, Reg, 3, 1)
INSN(F64_ABS, Reg, 2, 1)
INSN(F64_NEG, Reg, 2, 1)
INSN(F64_CEIL, Reg, 2, 1)
INSN(F64_FLOOR, Reg, 2, 1)
INSN(F64_TRUNC, Reg, 2, 1)
INSN(F64_NEAREST, Reg, 2, 1)
INSN(F64_SQRT, Reg, 2, 1)
INSN(F64_ADD, Reg, 3, 1)
INSN(F64_SUB, Reg, 3, 1)
INSN(F64_MUL, Reg, 3, 1)
INSN(F64_DIV, Reg, 3, 1)
INSN(F64_MIN, Reg, 3, 1)
INSN(F64_MAX, Reg, 3, 1)
INSN(F64_COPYSIGN, Reg, 3, 1)
/* Convert instructions */
INSN(I32_WRAP_I64, Reg, 2, 1)
INSN(I32_TRUNC_S_F32, Reg, 2, 1)
INSN(I32_TRUNC_U_F32, Reg, 2, 1)
INSN(I32_TRUNC_S_F64, Reg, 2, 1)
INSN(I32_TRUNC_U_F64, Reg, 2, 1)
INSN(I64_EXTEND_S_I32, Reg, 2, 1)
INSN(I64_EXTEND_U_I32, Reg, 2, 1)
INSN(I64_TRUNC_S_F32, Reg, 2, 1)
INSN(I64_TRUNC_U_F32, Reg, 2, 1)
INSN(I64_TRUNC_S_F64, Reg, 2, 1)
INSN(I64_TRUNC_U_F64, Reg, 2, 1)
INSN(F32_CONVERT_S_I32, Reg, 2, 1)
INSN(F32_CONVERT_U_I32, Reg, 2, 1)
INSN(F32_CONVERT_S_I64, Reg, 2, 1)
INSN(F32_CONVERT_U_I64, Reg, 2, 1)
INSN(F32_DEMOTE_F64, Reg, 2, 1)
INSN(F64_CONVERT_S_I32, Reg, 2, 1)
INSN(F64_CONVERT_U_I32, Reg, 2, 1)
INSN(F64_CONVERT_S_I64, Reg, 2, 1)
INSN(F64_CONVERT_U_I64, Reg, 2, 1)
INSN(F64_PROMOTE_F32, Reg, 2, 1)
INSN(I32_EXTEND8_S, Reg, 2, 1)
INSN(I32_EXTEND16_S, Reg, 2, 1)
INSN(I64_EXTEND8_S, Reg, 2, 1)
INSN(I64_EXTEND16_S, Reg, 2, 1)
INSN(I64_EXTEND32_S, Reg, 2, 1)
INSN(I32_TRUNC_SAT_S_F32, Reg, 2, 1)
INSN(I32_TRUNC_SAT_U_F32, Reg, 2, 1)
INSN(I32_TRUNC_SAT_S_F64, Reg, 2, 1)
INSN(I32_TRUNC_SAT_U_F64, Reg, 2, 1)
INSN(I64_TRUNC_SAT_S_F32, Reg, 2, 1)
INSN(I64_TRUNC_SAT_U_F32, Reg, 2, 1)
INSN(I64_TRUNC_SAT_S_F64, Reg, 2, 1)
INSN(I64_TRUNC_SAT_U_F64, Reg, 2, 1)
#endif
#undef INSN
/**
* @def ANN_LABEL (TYPE, NAME)
*
* Definition of label annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_annl_NAME (cc, label): accesses the annotation NAME of
* label @p label
* @c jit_annl_enable_NAME (cc): enables the annotation NAME
* @c jit_annl_disable_NAME (cc): disables the annotation NAME
* @c jit_annl_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_LABEL
#define ANN_LABEL(TYPE, NAME)
#endif
/* Basic Block of a label. */
ANN_LABEL(JitBasicBlock *, basic_block)
/* Predecessor number of the block that is only used in
jit_cc_update_cfg for updating the CFG. */
ANN_LABEL(uint16, pred_num)
/* Execution frequency of a block. We can split critical edges with
empty blocks so we don't need to store frequencies of edges. */
ANN_LABEL(uint16, freq)
/* Begin bytecode instruction pointer of the block. */
ANN_LABEL(uint8 *, begin_bcip)
/* End bytecode instruction pointer of the block. */
ANN_LABEL(uint8 *, end_bcip)
/* Stack pointer offset at the end of the block. */
ANN_LABEL(uint16, end_sp)
/* The label of the next physically adjacent block. */
ANN_LABEL(JitReg, next_label)
/* Compiled code address of the block. */
ANN_LABEL(void *, jitted_addr)
#undef ANN_LABEL
/**
* @def ANN_INSN (TYPE, NAME)
*
* Definition of instruction annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_anni_NAME (cc, insn): accesses the annotation NAME of
* instruction @p insn
* @c jit_anni_enable_NAME (cc): enables the annotation NAME
* @c jit_anni_disable_NAME (cc): disables the annotation NAME
* @c jit_anni_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_INSN
#define ANN_INSN(TYPE, NAME)
#endif
/* A private annotation for linking instructions with the same hash
value, which is only used by the compilation context's hash table
of instructions. */
ANN_INSN(JitInsn *, _hash_link)
#undef ANN_INSN
/**
* @def ANN_REG (TYPE, NAME)
*
* Definition of register annotations.
*
* @param TYPE type of the annotation
* @param NAME name of the annotation
*
* Each defined annotation with name NAME has the following APIs:
*
* @c jit_annr_NAME (cc, reg): accesses the annotation NAME of
* register @p reg
* @c jit_annr_enable_NAME (cc): enables the annotation NAME
* @c jit_annr_disable_NAME (cc): disables the annotation NAME
* @c jit_annr_is_enabled_NAME (cc): check whether the annotation NAME
* is enabled
*/
#ifndef ANN_REG
#define ANN_REG(TYPE, NAME)
#endif
/* Defining instruction of registers satisfying SSA property. */
ANN_REG(JitInsn *, def_insn)
#undef ANN_REG

1840
core/iwasm/fast-jit/jit_ir.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,781 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_utils.h"
#include "jit_compiler.h"
/**
* A uint16 stack for storing distances of occurrences of virtual
* registers.
*/
typedef struct UintStack {
/* Capacity of the stack. */
uint32 capacity;
/* Top index of the stack. */
uint32 top;
/* Elements of the vector. */
uint16 elem[1];
} UintStack;
static bool
uint_stack_push(UintStack **stack, unsigned val)
{
unsigned capacity = *stack ? (*stack)->capacity : 0;
unsigned top = *stack ? (*stack)->top : 0;
bh_assert(top <= capacity);
if (top == capacity) {
const unsigned elem_size = sizeof((*stack)->elem[0]);
unsigned new_capacity = capacity ? capacity + capacity / 2 : 4;
UintStack *new_stack =
jit_malloc(offsetof(UintStack, elem) + elem_size * new_capacity);
if (!new_stack)
return false;
new_stack->capacity = new_capacity;
new_stack->top = top;
if (*stack)
memcpy(new_stack->elem, (*stack)->elem, elem_size * top);
jit_free(*stack);
*stack = new_stack;
}
(*stack)->elem[(*stack)->top++] = val;
return true;
}
static int
uint_stack_top(UintStack *stack)
{
return stack->elem[stack->top - 1];
}
static void
uint_stack_delete(UintStack **stack)
{
jit_free(*stack);
*stack = NULL;
}
static void
uint_stack_pop(UintStack **stack)
{
bh_assert((*stack)->top > 0);
if (--(*stack)->top == 0)
uint_stack_delete(stack);
}
/**
* Information of a virtual register.
*/
typedef struct VirtualReg {
/* The hard register allocated to this virtual register. */
JitReg hreg;
/* The spill slot allocated to this virtual register. */
JitReg slot;
/* The hard register allocated to global virtual registers. It is 0
for local registers, whose lifetime is within one basic block. */
JitReg global_hreg;
/* Distances from the beginning of basic block of all occurrences of the
virtual register in the basic block. */
UintStack *distances;
} VirtualReg;
/**
* Information of a hard register.
*/
typedef struct HardReg {
/* The virtual register this hard register is allocated to. */
JitReg vreg;
} HardReg;
/**
* Information of a spill slot.
*/
typedef struct SpillSlot {
/* The virtual register this spill slot is allocated to. */
JitReg vreg;
} SpillSlot;
typedef struct RegallocContext {
/* The compiler context. */
JitCompContext *cc;
/* Information of virtual registers. The register allocation must
not increase the virtual register number during the allocation
process. */
VirtualReg *vregs[JIT_REG_KIND_L32];
/* Information of hard registers. */
HardReg *hregs[JIT_REG_KIND_L32];
/* Number of elements in the spill_slots array. */
uint32 spill_slot_num;
/* Information of spill slots. */
SpillSlot *spill_slots;
/* The last define-released hard register. */
JitReg last_def_released_hreg;
} RegallocContext;
/**
* Get the VirtualReg structure of the given virtual register.
*
* @param rc the regalloc context
* @param vreg the virtual register
*
* @return the VirtualReg structure of the given virtual register
*/
static VirtualReg *
rc_get_vr(RegallocContext *rc, JitReg vreg)
{
unsigned kind = jit_reg_kind(vreg);
unsigned no = jit_reg_no(vreg);
bh_assert(jit_reg_is_variable(vreg));
return &rc->vregs[kind][no];
}
/**
* Get the HardReg structure of the given hard register.
*
* @param rc the regalloc context
* @param hreg the hard register
*
* @return the HardReg structure of the given hard register
*/
static HardReg *
rc_get_hr(RegallocContext *rc, JitReg hreg)
{
unsigned kind = jit_reg_kind(hreg);
unsigned no = jit_reg_no(hreg);
bh_assert(jit_reg_is_variable(hreg) && jit_cc_is_hreg(rc->cc, hreg));
return &rc->hregs[kind][no];
}
/**
* Get the SpillSlot structure of the given slot.
*
* @param rc the regalloc context
* @param slot the constant register representing the slot index
*
* @return the SpillSlot of the given slot
*/
static SpillSlot *
rc_get_spill_slot(RegallocContext *rc, JitReg slot)
{
unsigned index = jit_cc_get_const_I32(rc->cc, slot);
bh_assert(index < rc->spill_slot_num);
return &rc->spill_slots[index];
}
/**
* Get the stride in the spill slots of the register.
*
* @param reg a virtual register
*
* @return stride in the spill slots
*/
static unsigned
get_reg_stride(JitReg reg)
{
static const uint8 strides[] = { 0, 1, 2, 1, 2, 2, 4, 8, 0 };
return strides[jit_reg_kind(reg)];
}
/**
* Allocate a spill slot for the given virtual register.
*
* @param rc the regalloc context
* @param vreg the virtual register
*
* @return the spill slot encoded in a consant register
*/
static JitReg
rc_alloc_spill_slot(RegallocContext *rc, JitReg vreg)
{
const unsigned stride = get_reg_stride(vreg);
unsigned mask, new_num, i, j;
SpillSlot *slots;
bh_assert(stride > 0);
for (i = 0; i < rc->spill_slot_num; i += stride)
for (j = i;; j++) {
if (j == i + stride)
/* Found a free slot for vreg. */
goto found;
if (rc->spill_slots[j].vreg)
break;
}
/* No free slot, increase the slot number. */
mask = stride - 1;
/* Align the slot index. */
i = (rc->spill_slot_num + mask) & ~mask;
new_num = i == 0 ? 32 : i + i / 2;
if (!(slots = jit_calloc(sizeof(*slots) * new_num)))
return 0;
if (rc->spill_slots)
memcpy(slots, rc->spill_slots, sizeof(*slots) * rc->spill_slot_num);
jit_free(rc->spill_slots);
rc->spill_slots = slots;
rc->spill_slot_num = new_num;
found:
/* Now, i is the first slot for vreg. */
#if 0 /* TODO: check the spill */
if (rc->cc->interp_frame_size + (i + stride) * 4
> rc->cc->total_frame_size)
/* No frame space for the spill area. */
return 0;
#endif
/* Allocate the slot(s) to vreg. */
for (j = i; j < i + stride; j++)
rc->spill_slots[j].vreg = vreg;
return jit_cc_new_const_I32(rc->cc, i);
}
/**
* Free a spill slot.
*
* @param rc the regalloc context
* @param slot_reg the constant register representing the slot index
*/
static void
rc_free_spill_slot(RegallocContext *rc, JitReg slot_reg)
{
if (slot_reg) {
SpillSlot *slot = rc_get_spill_slot(rc, slot_reg);
const JitReg vreg = slot->vreg;
const unsigned stride = get_reg_stride(vreg);
unsigned i;
for (i = 0; i < stride; i++)
slot[i].vreg = 0;
}
}
static void
rc_destroy(RegallocContext *rc)
{
unsigned i, j;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned vreg_num = jit_cc_reg_num(rc->cc, i);
if (rc->vregs[i])
for (j = 0; j < vreg_num; j++)
uint_stack_delete(&rc->vregs[i][j].distances);
jit_free(rc->vregs[i]);
jit_free(rc->hregs[i]);
}
jit_free(rc->spill_slots);
}
static bool
rc_init(RegallocContext *rc, JitCompContext *cc)
{
unsigned i, j;
memset(rc, 0, sizeof(*rc));
rc->cc = cc;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned vreg_num = jit_cc_reg_num(cc, i);
const unsigned hreg_num = jit_cc_hreg_num(cc, i);
if (!(rc->vregs[i] = jit_calloc(sizeof(VirtualReg) * vreg_num))
|| !(rc->hregs[i] = jit_calloc(sizeof(HardReg) * hreg_num)))
goto fail;
/* Hard registers can only be allocated to themselves. */
for (j = 0; j < hreg_num; j++)
rc->vregs[i][j].global_hreg = jit_reg_new(i, j);
}
return true;
fail:
rc_destroy(rc);
return false;
}
/**
* Check whether the gien register is an allocation candidate, which
* must be a variable register that is not fixed hard register.
*
* @param cc the compilation context
* @param reg the register
*
* @return true if the register is an allocation candidate
*/
static bool
is_alloc_candidate(JitCompContext *cc, JitReg reg)
{
return (jit_reg_is_variable(reg)
&& (!jit_cc_is_hreg(cc, reg) || !jit_cc_is_hreg_fixed(cc, reg)));
}
/**
* Collect distances from the beginning of basic block of all occurrences of
* each virtual register.
*
* @param rc the regalloc context
* @param basic_block the basic block
*
* @return distance of the end instruction if succeeds, -1 otherwise
*/
static int
collect_distances(RegallocContext *rc, JitBasicBlock *basic_block)
{
JitInsn *insn;
int distance = 1;
JIT_FOREACH_INSN(basic_block, insn)
{
JitRegVec regvec = jit_insn_opnd_regs(insn);
unsigned i;
JitReg *regp;
/* NOTE: the distance may be pushed more than once if the
virtual register occurs multiple times in the
instruction. */
JIT_REG_VEC_FOREACH(regvec, i, regp)
if (is_alloc_candidate(rc->cc, *regp))
if (!uint_stack_push(&(rc_get_vr(rc, *regp))->distances, distance))
return -1;
distance++;
}
return distance;
}
static JitReg
offset_of_spill_slot(JitCompContext *cc, JitReg slot)
{
/* TODO: check the spill */
return jit_cc_new_const_I32(
cc, /*cc->interp_frame_size + jit_cc_get_const_I32 (cc, slot) * 4*/ 0);
}
/**
* Reload the virtual register from memory. Reload instruction will
* be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register to be reloaded
* @param cur_insn the current instruction after which the reload
* insertion will be inserted
*
* @return the reload instruction if succeeds, NULL otherwise
*/
static JitInsn *
reload_vreg(RegallocContext *rc, JitReg vreg, JitInsn *cur_insn)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
HardReg *hr = rc_get_hr(rc, vr->hreg);
JitInsn *insn = NULL;
if (vreg == rc->cc->exec_env_reg)
/* Reload exec_env_reg with LDSELF. */
insn = jit_cc_new_insn(rc->cc, LDSELF, vr->hreg);
else
/* Allocate spill slot if not yet and reload from there. */
{
JitReg fp_reg = rc->cc->fp_reg, offset;
if (!vr->slot && !(vr->slot = rc_alloc_spill_slot(rc, vreg)))
/* Cannot allocte spill slot (due to OOM or frame size limit). */
return NULL;
offset = offset_of_spill_slot(rc->cc, vr->slot);
switch (jit_reg_kind(vreg)) {
case JIT_REG_KIND_I32:
insn = jit_cc_new_insn(rc->cc, LDI32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_I64:
insn = jit_cc_new_insn(rc->cc, LDI64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F32:
insn = jit_cc_new_insn(rc->cc, LDF32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F64:
insn = jit_cc_new_insn(rc->cc, LDF64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V64:
insn = jit_cc_new_insn(rc->cc, LDV64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V128:
insn =
jit_cc_new_insn(rc->cc, LDV128, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V256:
insn =
jit_cc_new_insn(rc->cc, LDV256, vr->hreg, fp_reg, offset);
break;
default:
bh_assert(0);
}
}
if (insn)
jit_insn_insert_after(cur_insn, insn);
bh_assert(hr->vreg == vreg);
hr->vreg = vr->hreg = 0;
return insn;
}
/**
* Spill the virtual register (which cannot be exec_env_reg) to memory.
* Spill instruction will be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register to be reloaded
* @param cur_insn the current instruction after which the reload
* insertion will be inserted
*
* @return the spill instruction if succeeds, NULL otherwise
*/
static JitInsn *
spill_vreg(RegallocContext *rc, JitReg vreg, JitInsn *cur_insn)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
JitReg fp_reg = rc->cc->fp_reg, offset;
JitInsn *insn;
/* There is no chance to spill exec_env_reg. */
bh_assert(vreg != rc->cc->exec_env_reg);
bh_assert(vr->hreg && vr->slot);
offset = offset_of_spill_slot(rc->cc, vr->slot);
switch (jit_reg_kind(vreg)) {
case JIT_REG_KIND_I32:
insn = jit_cc_new_insn(rc->cc, STI32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_I64:
insn = jit_cc_new_insn(rc->cc, STI64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F32:
insn = jit_cc_new_insn(rc->cc, STF32, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_F64:
insn = jit_cc_new_insn(rc->cc, STF64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V64:
insn = jit_cc_new_insn(rc->cc, STV64, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V128:
insn = jit_cc_new_insn(rc->cc, STV128, vr->hreg, fp_reg, offset);
break;
case JIT_REG_KIND_V256:
insn = jit_cc_new_insn(rc->cc, STV256, vr->hreg, fp_reg, offset);
break;
default:
bh_assert(0);
}
if (insn)
jit_insn_insert_after(cur_insn, insn);
return insn;
}
/**
* Allocate a hard register for the virtual register. Necessary
* reloade instruction will be inserted after the given instruction.
*
* @param rc the regalloc context
* @param vreg the virtual register
* @param insn the instruction after which the reload insertion will
* be inserted
* @param distance the distance of the current instruction
*
* @return the hard register allocated if succeeds, 0 otherwise
*/
static JitReg
allocate_hreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance)
{
const int kind = jit_reg_kind(vreg);
const HardReg *hregs = rc->hregs[kind];
const unsigned hreg_num = jit_cc_hreg_num(rc->cc, kind);
JitReg hreg, vreg_to_reload = 0;
int min_distance = distance, vr_distance;
VirtualReg *vr = rc_get_vr(rc, vreg);
unsigned i;
if (hreg_num == 0)
/* Unsupported hard register kind. */
{
/* TODO: how to set error */
/*jit_set_error (JIT_ERROR_UNSUPPORTED_HREG);*/
return 0;
}
if (vr->global_hreg)
/* It has globally allocated register, we can only use it. */
{
if ((vreg_to_reload = (rc_get_hr(rc, vr->global_hreg))->vreg))
if (!reload_vreg(rc, vreg_to_reload, insn))
return 0;
return vr->global_hreg;
}
/* Use the last define-released register if its kind is correct and
it's free so as to optimize for two-operand instructions. */
if (jit_reg_kind(rc->last_def_released_hreg) == kind
&& (rc_get_hr(rc, rc->last_def_released_hreg))->vreg == 0)
return rc->last_def_released_hreg;
/* No hint given, just try to pick any free register. */
for (i = 0; i < hreg_num; i++) {
hreg = jit_reg_new(kind, i);
if (jit_cc_is_hreg_fixed(rc->cc, hreg))
continue;
if (hregs[i].vreg == 0)
/* Found a free one, return it. */
return hreg;
}
/* No free registers, need to spill and reload one. */
for (i = 0; i < hreg_num; i++) {
if (jit_cc_is_hreg_fixed(rc->cc, jit_reg_new(kind, i)))
continue;
vr = rc_get_vr(rc, hregs[i].vreg);
vr_distance = vr->distances ? uint_stack_top(vr->distances) : 0;
if (vr_distance < min_distance) {
min_distance = vr_distance;
vreg_to_reload = hregs[i].vreg;
hreg = jit_reg_new(kind, i);
}
}
bh_assert(min_distance < distance);
if (!reload_vreg(rc, vreg_to_reload, insn))
return 0;
return hreg;
}
/**
* Allocate a hard register for the virtual register if not allocated
* yet. Necessary spill and reloade instructions will be inserted
* before/after and after the given instruction. This operation will
* convert the virtual register's state from 1 or 3 to 2.
*
* @param rc the regalloc context
* @param vreg the virtual register
* @param insn the instruction after which the spill and reload
* insertions will be inserted
* @param distance the distance of the current instruction
*
* @return the hard register allocated to the virtual register if
* succeeds, 0 otherwise
*/
static JitReg
allocate_for_vreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance)
{
VirtualReg *vr = rc_get_vr(rc, vreg);
if (vr->hreg)
/* It has had a hard register, reuse it. */
return vr->hreg;
/* Not allocated yet. */
if ((vr->hreg = allocate_hreg(rc, vreg, insn, distance)))
(rc_get_hr(rc, vr->hreg))->vreg = vreg;
return vr->hreg;
}
/**
* Clobber live registers.
*
* @param rc the regalloc context
* @param is_native whether it's native ABI or JITed ABI
* @param insn the instruction after which the reload insertion will
* be inserted
*
* @return true if succeeds, false otherwise
*/
static bool
clobber_live_regs(RegallocContext *rc, bool is_native, JitInsn *insn)
{
unsigned i, j;
for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) {
const unsigned hreg_num = jit_cc_hreg_num(rc->cc, i);
for (j = 0; j < hreg_num; j++) {
JitReg hreg = jit_reg_new(i, j);
bool caller_saved =
(is_native ? jit_cc_is_hreg_caller_saved_native(rc->cc, hreg)
: jit_cc_is_hreg_caller_saved_jitted(rc->cc, hreg));
if (caller_saved && rc->hregs[i][j].vreg)
if (!reload_vreg(rc, rc->hregs[i][j].vreg, insn))
return false;
}
}
return true;
}
/**
* Do local register allocation for the given basic block
*
* @param rc the regalloc context
* @param basic_block the basic block
* @param distance the distance of the last instruction of the basic block
*
* @return true if succeeds, false otherwise
*/
static bool
allocate_for_basic_block(RegallocContext *rc, JitBasicBlock *basic_block,
int distance)
{
JitInsn *insn;
JIT_FOREACH_INSN_REVERSE(basic_block, insn)
{
JitRegVec regvec = jit_insn_opnd_regs(insn);
unsigned first_use = jit_insn_opnd_first_use(insn);
unsigned i;
JitReg *regp;
distance--;
JIT_REG_VEC_FOREACH_DEF(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
const JitReg vreg = *regp;
VirtualReg *vr = rc_get_vr(rc, vreg);
if (!(*regp = allocate_for_vreg(rc, vreg, insn, distance)))
return false;
/* Spill the register if required. */
if (vr->slot && !spill_vreg(rc, vreg, insn))
return false;
bh_assert(uint_stack_top(vr->distances) == distance);
uint_stack_pop(&vr->distances);
/* Record the define-released hard register. */
rc->last_def_released_hreg = vr->hreg;
/* Release the hreg and spill slot. */
rc_free_spill_slot(rc, vr->slot);
(rc_get_hr(rc, vr->hreg))->vreg = 0;
vr->hreg = vr->slot = 0;
}
if (insn->opcode == JIT_OP_CALLBC) {
if (!clobber_live_regs(rc, false, insn))
return false;
/* The exec_env_reg is implicitly used by the callee. */
if (!allocate_for_vreg(rc, rc->cc->exec_env_reg, insn, distance))
return false;
}
else if (insn->opcode == JIT_OP_CALLNATIVE) {
if (!clobber_live_regs(rc, true, insn))
return false;
}
JIT_REG_VEC_FOREACH_USE(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
if (!allocate_for_vreg(rc, *regp, insn, distance))
return false;
}
JIT_REG_VEC_FOREACH_USE(regvec, i, regp, first_use)
if (is_alloc_candidate(rc->cc, *regp)) {
VirtualReg *vr = rc_get_vr(rc, *regp);
bh_assert(uint_stack_top(vr->distances) == distance);
uint_stack_pop(&vr->distances);
*regp = vr->hreg;
}
}
return true;
}
bool
jit_pass_regalloc(JitCompContext *cc)
{
RegallocContext rc;
unsigned label_index, end_label_index;
JitBasicBlock *basic_block;
VirtualReg *self_vr;
bool retval = false;
if (!rc_init(&rc, cc))
return false;
/* NOTE: don't allocate new virtual registers during allocation
because the rc->vregs array is fixed size. */
/* TODO: allocate hard registers for global virtual registers here.
Currently, exec_env_reg is the only global virtual register. */
self_vr = rc_get_vr(&rc, cc->exec_env_reg);
JIT_FOREACH_BLOCK_ENTRY_EXIT(cc, label_index, end_label_index, basic_block)
{
int distance;
/* TODO: initialize hreg for live-out registers. */
self_vr->hreg = self_vr->global_hreg;
(rc_get_hr(&rc, cc->exec_env_reg))->vreg = cc->exec_env_reg;
if ((distance = collect_distances(&rc, basic_block)) < 0)
goto cleanup_and_return;
if (!allocate_for_basic_block(&rc, basic_block, distance))
goto cleanup_and_return;
/* TODO: generate necessary spills for live-in registers. */
}
retval = true;
cleanup_and_return:
rc_destroy(&rc);
return retval;
}

View File

@ -0,0 +1,19 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#include "jit_utils.h"
JitBitmap *
jit_bitmap_new(uintptr_t begin_index, unsigned bitnum)
{
JitBitmap *bitmap;
if ((bitmap = jit_calloc(offsetof(JitBitmap, map) + (bitnum + 7) / 8))) {
bitmap->begin_index = begin_index;
bitmap->end_index = begin_index + bitnum;
}
return bitmap;
}

View File

@ -0,0 +1,136 @@
/*
* Copyright (C) 2021 Intel Corporation. All rights reserved.
* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
*/
#ifndef _JIT_UTILS_H_
#define _JIT_UTILS_H_
#include "bh_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* A simple fixed size bitmap.
*/
typedef struct JitBitmap {
/* The first valid bit index. */
uintptr_t begin_index;
/* The last valid bit index plus one. */
uintptr_t end_index;
/* The bitmap. */
uint8 map[1];
} JitBitmap;
static inline void *
jit_malloc(unsigned int size)
{
return wasm_runtime_malloc(size);
}
static inline void *
jit_calloc(unsigned int size)
{
void *ret = wasm_runtime_malloc(size);
if (ret) {
memset(ret, 0, size);
}
return ret;
}
static inline void
jit_free(void *ptr)
{
if (ptr)
wasm_runtime_free(ptr);
}
/**
* Create a new bitmap.
*
* @param begin_index the first valid bit index
* @param bitnum maximal bit number of the bitmap.
*
* @return the new bitmap if succeeds, NULL otherwise.
*/
JitBitmap *
jit_bitmap_new(uintptr_t begin_index, unsigned bitnum);
/**
* Delete a bitmap.
*
* @param bitmap the bitmap to be deleted
*/
static inline void
jit_bitmap_delete(JitBitmap *bitmap)
{
jit_free(bitmap);
}
/**
* Check whether the given index is in the range of the bitmap.
*
* @param bitmap the bitmap
* @param n the bit index
*
* @return true if the index is in range, false otherwise
*/
static inline bool
jit_bitmap_is_in_range(JitBitmap *bitmap, unsigned n)
{
return n >= bitmap->begin_index && n < bitmap->end_index;
}
/**
* Get a bit in the bitmap
*
* @param bitmap the bitmap
* @param n the n-th bit to be get
*
* @return value of the bit
*/
static inline int
jit_bitmap_get_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
return (bitmap->map[idx / 8] >> (idx % 8)) & 1;
}
/**
* Set a bit in the bitmap.
*
* @param bitmap the bitmap
* @param n the n-th bit to be set
*/
static inline void
jit_bitmap_set_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
bitmap->map[idx / 8] |= 1 << (idx % 8);
}
/**
* Clear a bit in the bitmap.
*
* @param bitmap the bitmap
* @param n the n-th bit to be cleared
*/
static inline void
jit_bitmap_clear_bit(JitBitmap *bitmap, unsigned n)
{
unsigned idx = n - bitmap->begin_index;
bh_assert(n >= bitmap->begin_index && n < bitmap->end_index);
bitmap->map[idx / 8] &= ~(1 << (idx % 8));
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -254,6 +254,9 @@ struct WASMFunction {
uint8 *consts; uint8 *consts;
uint32 const_cell_num; uint32 const_cell_num;
#endif #endif
#if WASM_ENABLE_FAST_JIT != 0
void *jitted_code;
#endif
}; };
struct WASMGlobal { struct WASMGlobal {
@ -323,6 +326,7 @@ typedef struct WASMFastOPCodeNode {
uint8 orig_op; uint8 orig_op;
} WASMFastOPCodeNode; } WASMFastOPCodeNode;
#endif #endif
struct WASMModule { struct WASMModule {
/* Module type, for module loaded from WASM bytecode binary, /* Module type, for module loaded from WASM bytecode binary,
this field is Wasm_Module_Bytecode; this field is Wasm_Module_Bytecode;
@ -414,9 +418,12 @@ struct WASMModule {
#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0 #if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0
bh_list fast_opcode_list; bh_list fast_opcode_list;
uint8 *buf_code; uint8 *buf_code;
uint64 buf_code_size;
#endif
#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0 \
|| WASM_ENABLE_FAST_JIT != 0
uint8 *load_addr; uint8 *load_addr;
uint64 load_size; uint64 load_size;
uint64 buf_code_size;
#endif #endif
#if WASM_ENABLE_DEBUG_INTERP != 0 #if WASM_ENABLE_DEBUG_INTERP != 0
@ -437,6 +444,11 @@ struct WASMModule {
const uint8 *name_section_buf; const uint8 *name_section_buf;
const uint8 *name_section_buf_end; const uint8 *name_section_buf_end;
#endif #endif
#if WASM_ENABLE_FAST_JIT != 0
/* point to JITed functions */
void **func_ptrs;
#endif
}; };
typedef struct BlockType { typedef struct BlockType {

View File

@ -26,6 +26,11 @@ typedef struct WASMInterpFrame {
/* Instruction pointer of the bytecode array. */ /* Instruction pointer of the bytecode array. */
uint8 *ip; uint8 *ip;
#if WASM_ENABLE_FAST_JIT != 0
uint8 *jitted_return_addr;
uint32 spill_cache[16];
#endif
#if WASM_ENABLE_PERF_PROFILING != 0 #if WASM_ENABLE_PERF_PROFILING != 0
uint64 time_started; uint64 time_started;
#endif #endif

View File

@ -15,6 +15,9 @@
#if WASM_ENABLE_THREAD_MGR != 0 && WASM_ENABLE_DEBUG_INTERP != 0 #if WASM_ENABLE_THREAD_MGR != 0 && WASM_ENABLE_DEBUG_INTERP != 0
#include "../libraries/thread-mgr/thread_manager.h" #include "../libraries/thread-mgr/thread_manager.h"
#endif #endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#endif
typedef int32 CellType_I32; typedef int32 CellType_I32;
typedef int64 CellType_I64; typedef int64 CellType_I64;
@ -3761,7 +3764,12 @@ wasm_interp_call_wasm(WASMModuleInstance *module_inst, WASMExecEnv *exec_env,
} }
} }
else { else {
#if WASM_ENABLE_FAST_JIT == 0
wasm_interp_call_func_bytecode(module_inst, exec_env, function, frame); wasm_interp_call_func_bytecode(module_inst, exec_env, function, frame);
#else
jit_interp_switch_to_jitted(exec_env, frame, function,
function->u.func->jitted_code);
#endif
} }
/* Output the return value to the caller */ /* Output the return value to the caller */

View File

@ -13,6 +13,9 @@
#if WASM_ENABLE_DEBUG_INTERP != 0 #if WASM_ENABLE_DEBUG_INTERP != 0
#include "../libraries/debug-engine/debug_engine.h" #include "../libraries/debug-engine/debug_engine.h"
#endif #endif
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#endif
/* Read a value of given type from the address pointed to by the given /* Read a value of given type from the address pointed to by the given
pointer and increase the pointer to the position just after the pointer and increase the pointer to the position just after the
@ -3226,6 +3229,13 @@ load_from_sections(WASMModule *module, WASMSection *sections,
#endif #endif
} }
#if WASM_ENABLE_FAST_JIT != 0
if (!jit_compiler_compile_all(module)) {
set_error_buf(error_buf, error_buf_size, "fast jit compilation failed");
return false;
}
#endif
#if WASM_ENABLE_MEMORY_TRACING != 0 #if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_module_mem_consumption((WASMModuleCommon *)module); wasm_runtime_dump_module_mem_consumption((WASMModuleCommon *)module);
#endif #endif
@ -3589,7 +3599,7 @@ wasm_loader_load(const uint8 *buf, uint32 size,
return NULL; return NULL;
} }
#if WASM_ENABLE_DEBUG_INTERP != 0 #if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_FAST_JIT != 0
module->load_addr = (uint8 *)buf; module->load_addr = (uint8 *)buf;
module->load_size = size; module->load_size = size;
#endif #endif
@ -7309,7 +7319,8 @@ re_scan:
operand_offset = local_offset; operand_offset = local_offset;
PUSH_OFFSET_TYPE(local_type); PUSH_OFFSET_TYPE(local_type);
#else #else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) #if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) { if (local_offset < 0x80) {
#if WASM_ENABLE_DEBUG_INTERP != 0 #if WASM_ENABLE_DEBUG_INTERP != 0
record_fast_op(module, p_org, *p_org); record_fast_op(module, p_org, *p_org);
@ -7384,7 +7395,8 @@ re_scan:
POP_OFFSET_TYPE(local_type); POP_OFFSET_TYPE(local_type);
} }
#else #else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) #if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) { if (local_offset < 0x80) {
#if WASM_ENABLE_DEBUG_INTERP != 0 #if WASM_ENABLE_DEBUG_INTERP != 0
record_fast_op(module, p_org, *p_org); record_fast_op(module, p_org, *p_org);
@ -7455,7 +7467,8 @@ re_scan:
*(loader_ctx->frame_offset *(loader_ctx->frame_offset
- wasm_value_type_cell_num(local_type))); - wasm_value_type_cell_num(local_type)));
#else #else
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) #if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \
&& (WASM_ENABLE_FAST_JIT == 0)
if (local_offset < 0x80) { if (local_offset < 0x80) {
#if WASM_ENABLE_DEBUG_INTERP != 0 #if WASM_ENABLE_DEBUG_INTERP != 0
record_fast_op(module, p_org, *p_org); record_fast_op(module, p_org, *p_org);
@ -7505,7 +7518,6 @@ re_scan:
PUSH_TYPE(global_type); PUSH_TYPE(global_type);
#if WASM_ENABLE_FAST_INTERP == 0 #if WASM_ENABLE_FAST_INTERP == 0
#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0)
if (global_type == VALUE_TYPE_I64 if (global_type == VALUE_TYPE_I64
|| global_type == VALUE_TYPE_F64) { || global_type == VALUE_TYPE_F64) {
#if WASM_ENABLE_DEBUG_INTERP != 0 #if WASM_ENABLE_DEBUG_INTERP != 0
@ -7513,7 +7525,6 @@ re_scan:
#endif #endif
*p_org = WASM_OP_GET_GLOBAL_64; *p_org = WASM_OP_GET_GLOBAL_64;
} }
#endif
#else /* else of WASM_ENABLE_FAST_INTERP */ #else /* else of WASM_ENABLE_FAST_INTERP */
if (global_type == VALUE_TYPE_I64 if (global_type == VALUE_TYPE_I64
|| global_type == VALUE_TYPE_F64) { || global_type == VALUE_TYPE_F64) {

View File

@ -10,6 +10,9 @@
#include "wasm_opcode.h" #include "wasm_opcode.h"
#include "wasm_runtime.h" #include "wasm_runtime.h"
#include "../common/wasm_native.h" #include "../common/wasm_native.h"
#if WASM_ENABLE_FAST_JIT != 0
#include "../fast-jit/jit_compiler.h"
#endif
/* Read a value of given type from the address pointed to by the given /* Read a value of given type from the address pointed to by the given
pointer and increase the pointer to the position just after the pointer and increase the pointer to the position just after the
@ -2137,6 +2140,13 @@ load_from_sections(WASMModule *module, WASMSection *sections,
} }
} }
#if WASM_ENABLE_FAST_JIT != 0
if (!jit_compiler_compile_all(module)) {
set_error_buf(error_buf, error_buf_size, "fast jit compilation failed");
return false;
}
#endif
#if WASM_ENABLE_MEMORY_TRACING != 0 #if WASM_ENABLE_MEMORY_TRACING != 0
wasm_runtime_dump_module_mem_consumption(module); wasm_runtime_dump_module_mem_consumption(module);
#endif #endif

View File

@ -50,6 +50,11 @@ if (NOT DEFINED WAMR_BUILD_JIT)
set (WAMR_BUILD_JIT 0) set (WAMR_BUILD_JIT 0)
endif () endif ()
if (NOT DEFINED WAMR_BUILD_FAST_JIT)
# Disable Fast JIT by default
set (WAMR_BUILD_FAST_JIT 0)
endif ()
if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN) if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN)
# Enable libc builtin support by default # Enable libc builtin support by default
set (WAMR_BUILD_LIBC_BUILTIN 1) set (WAMR_BUILD_LIBC_BUILTIN 1)