diff --git a/build-scripts/config_common.cmake b/build-scripts/config_common.cmake index 432acf581..191487bf9 100644 --- a/build-scripts/config_common.cmake +++ b/build-scripts/config_common.cmake @@ -138,6 +138,8 @@ if (WAMR_BUILD_JIT EQUAL 1) else () message (" WAMR LLVM MC JIT enabled") endif () +elseif (WAMR_BUILD_FAST_JIT EQUAL 1) + message (" WAMR Fast JIT enabled") else () message (" WAMR JIT disabled") endif () diff --git a/build-scripts/runtime_lib.cmake b/build-scripts/runtime_lib.cmake index e7f7860cf..e713ed1d2 100644 --- a/build-scripts/runtime_lib.cmake +++ b/build-scripts/runtime_lib.cmake @@ -48,7 +48,11 @@ if (NOT DEFINED WAMR_BUILD_TARGET) endif () ################ optional according to settings ################ -if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1) +if (WAMR_BUILD_INTERP EQUAL 1 OR WAMR_BUILD_JIT EQUAL 1 + OR WAMR_BUILD_FAST_JIT EQUAL 1) + if (WAMR_BUILD_FAST_JIT EQUAL 1) + set (WAMR_BUILD_FAST_INTERP 0) + endif () include (${IWASM_DIR}/interpreter/iwasm_interp.cmake) endif () @@ -59,6 +63,10 @@ if (WAMR_BUILD_AOT EQUAL 1) endif () endif () +if (NOT WAMR_BUILD_JIT EQUAL 1 AND WAMR_BUILD_FAST_JIT EQUAL 1) + include (${IWASM_DIR}/fast-jit/iwasm_fast_jit.cmake) +endif () + if (WAMR_BUILD_APP_FRAMEWORK EQUAL 1) include (${APP_FRAMEWORK_DIR}/app_framework.cmake) include (${SHARED_DIR}/coap/lib_coap.cmake) @@ -137,6 +145,7 @@ set (source_all ${IWASM_INTERP_SOURCE} ${IWASM_AOT_SOURCE} ${IWASM_COMPL_SOURCE} + ${IWASM_FAST_JIT_SOURCE} ${WASM_APP_LIB_SOURCE_ALL} ${NATIVE_INTERFACE_SOURCE} ${APP_MGR_SOURCE} diff --git a/core/iwasm/common/wasm_exec_env.h b/core/iwasm/common/wasm_exec_env.h index b3e2a0524..250fe8270 100644 --- a/core/iwasm/common/wasm_exec_env.h +++ b/core/iwasm/common/wasm_exec_env.h @@ -84,6 +84,12 @@ typedef struct WASMExecEnv { void **native_symbol; #endif +#if WASM_ENABLE_FAST_JIT != 0 + /* Cache for jit native operations, mainly for operations of float, + double and long, such as F64TOI64, F32TOI64, I64 MUL/REM, and so on. */ + uint64 jit_cache[2]; +#endif + #if WASM_ENABLE_THREAD_MGR != 0 /* thread return value */ void *thread_ret_value; diff --git a/core/iwasm/common/wasm_runtime_common.c b/core/iwasm/common/wasm_runtime_common.c index eebbf27c8..e8558777d 100644 --- a/core/iwasm/common/wasm_runtime_common.c +++ b/core/iwasm/common/wasm_runtime_common.c @@ -27,6 +27,9 @@ #if WASM_ENABLE_SHARED_MEMORY != 0 #include "wasm_shared_memory.h" #endif +#if WASM_ENABLE_FAST_JIT != 0 +#include "../fast-jit/jit_compiler.h" +#endif #include "../common/wasm_c_api_internal.h" #if WASM_ENABLE_MULTI_MODULE != 0 @@ -146,8 +149,20 @@ wasm_runtime_env_init() } #endif +#if WASM_ENABLE_FAST_JIT != 0 + if (!jit_compiler_init()) { + goto fail9; + } +#endif + return true; +#if WASM_ENABLE_FAST_JIT != 0 +fail9: +#if WASM_ENABLE_REF_TYPES != 0 + wasm_externref_map_destroy(); +#endif +#endif #if WASM_ENABLE_REF_TYPES != 0 fail8: #endif @@ -208,6 +223,10 @@ wasm_runtime_init() void wasm_runtime_destroy() { +#if WASM_ENABLE_FAST_JIT != 0 + jit_compiler_destroy(); +#endif + #if WASM_ENABLE_REF_TYPES != 0 wasm_externref_map_destroy(); #endif diff --git a/core/iwasm/compilation/aot_compiler.c b/core/iwasm/compilation/aot_compiler.c index c4dac553a..e1516eebe 100644 --- a/core/iwasm/compilation/aot_compiler.c +++ b/core/iwasm/compilation/aot_compiler.c @@ -489,6 +489,7 @@ aot_compile_func(AOTCompContext *comp_ctx, uint32 func_index) break; case WASM_OP_GET_GLOBAL: + case WASM_OP_GET_GLOBAL_64: read_leb_uint32(frame_ip, frame_ip_end, global_idx); if (!aot_compile_op_get_global(comp_ctx, func_ctx, global_idx)) return false; diff --git a/core/iwasm/fast-jit/cg/x86-64/jit_codegen_x86_64.c b/core/iwasm/fast-jit/cg/x86-64/jit_codegen_x86_64.c new file mode 100644 index 000000000..274e329e0 --- /dev/null +++ b/core/iwasm/fast-jit/cg/x86-64/jit_codegen_x86_64.c @@ -0,0 +1,120 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_codegen.h" + +bool +jit_codegen_init() +{ + return true; +} + +void +jit_codegen_destroy() +{} + +/* clang-format off */ +static const uint8 hreg_info_I4[3][7] = { + /* ebp, eax, ebx, ecx, edx, edi, esi */ + { 1, 0, 0, 0, 0, 0, 1 }, /* fixed, esi is freely used */ + { 0, 1, 0, 1, 1, 0, 0 }, /* caller_saved_native */ + { 0, 1, 0, 1, 1, 1, 0 } /* caller_saved_jitted */ +}; + +static const uint8 hreg_info_I8[3][16] = { + /* rbp, rax, rbx, rcx, rdx, rdi, rsi, rsp, + r8, r9, r10, r11, r12, r13, r14, r15 */ + { 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 1 }, /* fixed, rsi is freely used */ + { 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0 }, /* caller_saved_native */ + { 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 0, 0, 0, 0, 0, 0 }, /* caller_saved_jitted */ +}; + +static uint8 hreg_info_F4[3][16] = { + { 0, 0, 0, 0, 0, 0, 0, 0, + 1, 1, 1, 1, 1, 1, 1, 1 }, /* fixed, rsi is freely used */ + { 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1 }, /* caller_saved_native */ + { 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1 }, /* caller_saved_jitted */ +}; + +static uint8 hreg_info_F8[3][16] = { + { 1, 1, 1, 1, 1, 1, 1, 1, + 0, 0, 0, 0, 0, 0, 0, 0 }, /* fixed, rsi is freely used */ + { 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1 }, /* caller_saved_native */ + { 1, 1, 1, 1, 1, 1, 1, 1, + 1, 1, 1, 1, 1, 1, 1, 1 }, /* caller_saved_jitted */ +}; + +static const JitHardRegInfo hreg_info = { + { + { 0, NULL, NULL, NULL }, /* VOID */ + + { sizeof(hreg_info_I4[0]), /* I4 */ + hreg_info_I4[0], + hreg_info_I4[1], + hreg_info_I4[2] }, + + { sizeof(hreg_info_I8[0]), /* I8 */ + hreg_info_I8[0], + hreg_info_I8[1], + hreg_info_I8[2] }, + + { sizeof(hreg_info_F4[0]), /* F4 */ + hreg_info_F4[0], + hreg_info_F4[1], + hreg_info_F4[2] }, + + { sizeof(hreg_info_F8[0]), /* F8 */ + hreg_info_F8[0], + hreg_info_F8[1], + hreg_info_F8[2] }, + + { 0, NULL, NULL, NULL }, /* V8 */ + { 0, NULL, NULL, NULL }, /* V16 */ + { 0, NULL, NULL, NULL } /* V32 */ + }, + /* frame pointer hreg index: rbp */ + 0, + /* exec_env hreg index: r15 */ + 15, + /* cmp hreg index: esi */ + 6 +}; +/* clang-format on */ + +const JitHardRegInfo * +jit_codegen_get_hreg_info() +{ + return &hreg_info; +} + +bool +jit_codegen_gen_native(JitCompContext *cc) +{ + jit_set_last_error(cc, "jit_codegen_gen_native failed"); + return false; +} + +bool +jit_codegen_lower(JitCompContext *cc) +{ + return true; +} + +void +jit_codegen_dump_native(void *begin_addr, void *end_addr) +{} + +bool +jit_codegen_call_func_jitted(void *exec_env, void *frame, void *func_inst, + void *target) +{ + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_compare.c b/core/iwasm/fast-jit/fe/jit_emit_compare.c new file mode 100644 index 000000000..ec54e4e9b --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_compare.c @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_compare.h" +#include "../jit_frontend.h" + +bool +jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond) +{ + return false; +} + +bool +jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond) +{ + return false; +} + +bool +jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond) +{ + return false; +} + +bool +jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond) +{ + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_compare.h b/core/iwasm/fast-jit/fe/jit_emit_compare.h new file mode 100644 index 000000000..db905b550 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_compare.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_COMPARE_H_ +#define _JIT_EMIT_COMPARE_H_ + +#include "../jit_compiler.h" +#include "../jit_frontend.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_i32_compare(JitCompContext *cc, IntCond cond); + +bool +jit_compile_op_i64_compare(JitCompContext *cc, IntCond cond); + +bool +jit_compile_op_f32_compare(JitCompContext *cc, FloatCond cond); + +bool +jit_compile_op_f64_compare(JitCompContext *cc, FloatCond cond); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_COMPARE_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_const.c b/core/iwasm/fast-jit/fe/jit_emit_const.c new file mode 100644 index 000000000..1bbc83c2f --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_const.c @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_const.h" +#include "../jit_frontend.h" + +bool +jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const) +{ + JitReg value = NEW_CONST(I32, i32_const); + PUSH_I32(value); + return true; +fail: + return false; +} + +bool +jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const) +{ + JitReg value = NEW_CONST(I64, i64_const); + PUSH_I64(value); + return true; +fail: + return false; +} + +bool +jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const) +{ + JitReg value = NEW_CONST(F32, f32_const); + PUSH_F32(value); + return true; +fail: + return false; +} + +bool +jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const) +{ + JitReg value = NEW_CONST(F64, f64_const); + PUSH_F64(value); + return true; +fail: + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_const.h b/core/iwasm/fast-jit/fe/jit_emit_const.h new file mode 100644 index 000000000..b75314117 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_const.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_CONST_H_ +#define _JIT_EMIT_CONST_H_ + +#include "../jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_i32_const(JitCompContext *cc, int32 i32_const); + +bool +jit_compile_op_i64_const(JitCompContext *cc, int64 i64_const); + +bool +jit_compile_op_f32_const(JitCompContext *cc, float32 f32_const); + +bool +jit_compile_op_f64_const(JitCompContext *cc, float64 f64_const); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_CONST_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_control.c b/core/iwasm/fast-jit/fe/jit_emit_control.c new file mode 100644 index 000000000..e1a28db4b --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_control.c @@ -0,0 +1,1080 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_control.h" +#include "jit_emit_exception.h" +#include "../jit_frontend.h" +#include "../interpreter/wasm_loader.h" + +#define CREATE_BASIC_BLOCK(new_basic_block) \ + do { \ + bh_assert(!new_basic_block); \ + if (!(new_basic_block = jit_cc_new_basic_block(cc, 0))) { \ + jit_set_last_error(cc, "create basic block failed"); \ + goto fail; \ + } \ + } while (0) + +#define CURR_BASIC_BLOCK() cc->cur_basic_block + +#define BUILD_BR(target_block) \ + do { \ + if (!GEN_INSN(JMP, jit_basic_block_label(target_block))) { \ + jit_set_last_error(cc, "generate jmp insn failed"); \ + goto fail; \ + } \ + } while (0) + +#define BUILD_COND_BR(value_if, block_then, block_else) \ + do { \ + if (!GEN_INSN(BNE, value_if, jit_basic_block_label(block_then), \ + jit_basic_block_label(block_else))) { \ + jit_set_last_error(cc, "generate bne insn failed"); \ + goto fail; \ + } \ + } while (0) + +#define SET_BUILDER_POS(basic_block) \ + do { \ + cc->cur_basic_block = basic_block; \ + } while (0) + +#define SET_BB_BEGIN_BCIP(basic_block, bcip) \ + do { \ + *(jit_annl_begin_bcip(cc, jit_basic_block_label(basic_block))) = bcip; \ + } while (0) + +#define SET_BB_END_BCIP(basic_block, bcip) \ + do { \ + *(jit_annl_end_bcip(cc, jit_basic_block_label(basic_block))) = bcip; \ + } while (0) + +static JitBlock * +get_target_block(JitCompContext *cc, uint32 br_depth) +{ + uint32 i = br_depth; + JitBlock *block = cc->block_stack.block_list_end; + + while (i-- > 0 && block) { + block = block->prev; + } + + if (!block) { + jit_set_last_error(cc, "WASM block stack underflow"); + return NULL; + } + return block; +} + +static bool +load_block_params(JitCompContext *cc, JitBlock *block) +{ + JitFrame *jit_frame = cc->jit_frame; + uint32 offset, i; + JitReg value; + + /* Clear jit frame's locals and stacks */ + clear_values(jit_frame); + + /* Restore jit frame's sp to block's sp begin */ + jit_frame->sp = block->frame_sp_begin; + + /* Load params to new block */ + offset = (uint32)(jit_frame->sp - jit_frame->lp); + for (i = 0; i < block->param_count; i++) { + switch (block->param_types[i]) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_EXTERNREF: + case VALUE_TYPE_FUNCREF: +#endif + value = gen_load_i32(jit_frame, offset); + offset++; + break; + case VALUE_TYPE_I64: + value = gen_load_i64(jit_frame, offset); + offset += 2; + break; + case VALUE_TYPE_F32: + value = gen_load_f32(jit_frame, offset); + offset++; + break; + case VALUE_TYPE_F64: + value = gen_load_f64(jit_frame, offset); + offset += 2; + break; + default: + bh_assert(0); + break; + } + PUSH(value, block->param_types[i]); + } + + return true; +fail: + return false; +} + +static bool +push_jit_block_to_stack_and_pass_params(JitCompContext *cc, JitBlock *block, + JitBasicBlock *basic_block, JitReg cond) +{ + JitFrame *jit_frame = cc->jit_frame; + JitValue *value_list_head = NULL, *value_list_end = NULL, *jit_value; + JitInsn *insn; + JitReg value; + uint32 i, param_index, cell_num; + + if (block->label_type == LABEL_TYPE_BLOCK + || (block->label_type == LABEL_TYPE_IF && !cond)) { + /* Reuse the current basic block and no need to commit values, + we just move param values from current block's value stack to + the new block's value stack */ + for (i = 0; i < block->param_count; i++) { + param_index = block->param_count - 1 - i; + jit_value = jit_value_stack_pop( + &cc->block_stack.block_list_end->value_stack); + if (!value_list_head) { + value_list_head = value_list_end = jit_value; + jit_value->prev = jit_value->next = NULL; + } + else { + jit_value->prev = NULL; + jit_value->next = value_list_head; + value_list_head->prev = jit_value; + value_list_head = jit_value; + } + } + block->value_stack.value_list_head = value_list_head; + block->value_stack.value_list_end = value_list_end; + + /* Save block's begin frame sp */ + cell_num = wasm_get_cell_num(block->param_types, block->param_count); + block->frame_sp_begin = jit_frame->sp - cell_num; + + /* Push the new block to block stack */ + jit_block_stack_push(&cc->block_stack, block); + + /* Continue to translate current block */ + } + else { + /* Commit register values to locals and stacks */ + gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp); + + /* Pop param values from current block's value stack */ + for (i = 0; i < block->param_count; i++) { + param_index = block->param_count - 1 - i; + POP(value, block->param_types[param_index]); + } + + /* Clear frame values */ + clear_values(jit_frame); + /* Save block's begin frame sp */ + block->frame_sp_begin = jit_frame->sp; + + /* Push the new block to block stack */ + jit_block_stack_push(&cc->block_stack, block); + + if (!cond) { /* LOOP block */ + BUILD_BR(basic_block); + } + else { /* IF block with condition br insn */ + if (!(insn = GEN_INSN(BNE, cond, jit_basic_block_label(basic_block), + 0))) { + jit_set_last_error(cc, "generate cond br failed"); + goto fail; + } + + /* Don't create else basic block or end basic block now, just + save its incoming BNE insn, and patch the insn's else label + when the basic block is lazily created */ + if (basic_block == block->basic_block_entry) { + block->incoming_insn_for_else_bb = insn; + } + else { + if (!jit_block_add_incoming_insn(block, insn)) { + jit_set_last_error(cc, "add incoming insn failed"); + goto fail; + } + } + } + + /* Start to translate the block */ + SET_BUILDER_POS(basic_block); + + /* Push the block parameters */ + if (!load_block_params(cc, block)) { + goto fail; + } + } + return true; +fail: + return false; +} + +static void +copy_block_arities(JitCompContext *cc, JitReg dst_frame_sp, uint8 *dst_types, + uint32 dst_type_count) +{ + JitFrame *jit_frame; + uint32 offset_src, offset_dst, i; + JitReg value; + + jit_frame = cc->jit_frame; + offset_src = (uint32)(jit_frame->sp - jit_frame->lp) + - wasm_get_cell_num(dst_types, dst_type_count); + offset_dst = 0; + + /* pop values from stack and store to dest frame */ + for (i = 0; i < dst_type_count; i++) { + switch (dst_types[i]) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_EXTERNREF: + case VALUE_TYPE_FUNCREF: +#endif + value = gen_load_i32(jit_frame, offset_src); + GEN_INSN(STI32, value, dst_frame_sp, + NEW_CONST(I32, offset_dst * 4)); + offset_src++; + offset_dst++; + break; + case VALUE_TYPE_I64: + value = gen_load_i64(jit_frame, offset_src); + GEN_INSN(STI64, value, dst_frame_sp, + NEW_CONST(I32, offset_dst * 4)); + offset_src += 2; + offset_dst += 2; + break; + case VALUE_TYPE_F32: + value = gen_load_f32(jit_frame, offset_src); + GEN_INSN(STF32, value, dst_frame_sp, + NEW_CONST(I32, offset_dst * 4)); + offset_src++; + offset_dst++; + break; + case VALUE_TYPE_F64: + value = gen_load_f64(jit_frame, offset_src); + GEN_INSN(STI64, value, dst_frame_sp, + NEW_CONST(I32, offset_dst * 4)); + offset_src += 2; + offset_dst += 2; + break; + default: + bh_assert(0); + break; + } + } +} + +static void +handle_func_return(JitCompContext *cc, JitBlock *block) +{ + JitReg prev_frame, prev_frame_sp; + +#if UINTPTR_MAX == UINT64_MAX + prev_frame = jit_cc_new_reg_I64(cc); + prev_frame_sp = jit_cc_new_reg_I64(cc); + + /* prev_frame = cur_frame->prev_frame */ + GEN_INSN(LDI64, prev_frame, cc->fp_reg, + NEW_CONST(I32, offsetof(WASMInterpFrame, prev_frame))); + GEN_INSN(LDI64, prev_frame_sp, prev_frame, + NEW_CONST(I32, offsetof(WASMInterpFrame, sp))); +#else + prev_frame = jit_cc_new_reg_I32(cc); + prev_frame_sp = jit_cc_new_reg_I32(cc); + + /* prev_frame = cur_frame->prev_frame */ + GEN_INSN(LDI32, prev_frame, cc->fp_reg, + NEW_CONST(I32, offsetof(WASMInterpFrame, prev_frame))); + GEN_INSN(LDI32, prev_frame_sp, prev_frame, + NEW_CONST(I32, offsetof(WASMInterpFrame, sp))); +#endif + + copy_block_arities(cc, prev_frame_sp, block->result_types, + block->result_count); + + /* Free stack space of the current frame: + exec_env->wasm_stack.s.top = cur_frame */ +#if UINTPTR_MAX == UINT64_MAX + GEN_INSN(STI64, cc->fp_reg, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, wasm_stack.s.top))); +#else + GEN_INSN(STI32, cc->fp_reg, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, wasm_stack.s.top))); +#endif + /* Set the prev_frame as the current frame: + exec_env->cur_frame = prev_frame */ +#if UINTPTR_MAX == UINT64_MAX + GEN_INSN(STI64, prev_frame, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, cur_frame))); +#else + GEN_INSN(STI32, prev_frame, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, cur_frame))); +#endif + /* fp_reg = prev_frame */ + GEN_INSN(MOV, cc->fp_reg, prev_frame); + /* return 0 */ + GEN_INSN(RETURN, NEW_CONST(I32, 0)); +} + +static bool +handle_op_end(JitCompContext *cc, uint8 **p_frame_ip) +{ + JitFrame *jit_frame = cc->jit_frame; + JitBlock *block; + JitIncomingInsn *incoming_insn; + JitInsn *insn; + + /* Check block stack */ + if (!(block = cc->block_stack.block_list_end)) { + jit_set_last_error(cc, "WASM block stack underflow"); + return false; + } + + if (!block->incoming_insns_for_end_bb) { + /* No other basic blocks jumping to this end, no need to + create the end basic block, just continue to translate + the following opcodes */ + if (block->label_type == LABEL_TYPE_FUNCTION) { + handle_func_return(cc, block); + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + } + + /* Pop block and destroy the block */ + block = jit_block_stack_pop(&cc->block_stack); + jit_block_destroy(block); + return true; + } + else { + /* Commit register values to locals and stacks */ + gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp); + /* Clear frame values */ + clear_values(jit_frame); + + /* Create the end basic block */ + CREATE_BASIC_BLOCK(block->basic_block_end); + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + SET_BB_BEGIN_BCIP(block->basic_block_end, *p_frame_ip); + /* Jump to the end basic block */ + BUILD_BR(block->basic_block_end); + + /* Patch the INSNs which jump to this basic block */ + incoming_insn = block->incoming_insns_for_end_bb; + while (incoming_insn) { + insn = incoming_insn->insn; + if (insn->opcode == JIT_OP_JMP) { + *(jit_insn_opnd(insn, 0)) = + jit_basic_block_label(block->basic_block_end); + } + else if (insn->opcode == JIT_OP_BNE) { + *(jit_insn_opnd(insn, 1)) = + jit_basic_block_label(block->basic_block_end); + } + else { + bh_assert(0); + } + incoming_insn = incoming_insn->next; + } + + SET_BUILDER_POS(block->basic_block_end); + + if (block->label_type == LABEL_TYPE_FUNCTION) { + handle_func_return(cc, block); + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + } + + /* Pop block and destroy the block */ + block = jit_block_stack_pop(&cc->block_stack); + jit_block_destroy(block); + return true; + } + return true; +fail: + return false; +} + +static bool +handle_op_else(JitCompContext *cc, uint8 **p_frame_ip) +{ + JitBlock *block = cc->block_stack.block_list_end; + JitFrame *jit_frame = cc->jit_frame; + JitInsn *insn; + + /* Check block */ + if (!block) { + jit_set_last_error(cc, "WASM block stack underflow"); + return false; + } + if (block->label_type != LABEL_TYPE_IF) { + jit_set_last_error(cc, "Invalid WASM block type"); + return false; + } + + if (!block->incoming_insn_for_else_bb) { + /* The if branch is handled like OP_BLOCK (cond is const and != 0), + just skip the else branch and handle OP_END */ + *p_frame_ip = block->wasm_code_end + 1; + return handle_op_end(cc, p_frame_ip); + } + else { + /* Has else branch and need to translate else branch */ + + /* Commit register values to locals and stacks */ + gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp); + /* Clear frame values */ + clear_values(jit_frame); + + /* Jump to end basic block */ + if (!(insn = GEN_INSN(JMP, 0))) { + jit_set_last_error(cc, "generate jmp insn failed"); + return false; + } + if (!jit_block_add_incoming_insn(block, insn)) { + jit_set_last_error(cc, "add incoming insn failed"); + return false; + } + + /* Clear value stack, restore param values and + start to translate the else branch. */ + jit_value_stack_destroy(&block->value_stack); + + /* Lazily create else basic block */ + CREATE_BASIC_BLOCK(block->basic_block_else); + SET_BB_END_BCIP(block->basic_block_entry, *p_frame_ip - 1); + SET_BB_BEGIN_BCIP(block->basic_block_else, *p_frame_ip); + + /* Patch the insn which conditionly jumps to the else basic block */ + insn = block->incoming_insn_for_else_bb; + *(jit_insn_opnd(insn, 2)) = + jit_basic_block_label(block->basic_block_else); + + SET_BUILDER_POS(block->basic_block_else); + + /* Reload block parameters */ + if (!load_block_params(cc, block)) { + return false; + } + + return true; + } + return true; +fail: + return false; +} + +static bool +handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip) +{ + JitBlock *block = cc->block_stack.block_list_end, *block_prev; + + bh_assert(block); + + do { + block_prev = block->prev; + + if (block->label_type == LABEL_TYPE_IF + && block->incoming_insn_for_else_bb + && *p_frame_ip <= block->wasm_code_else) { + /* Else branch hasn't been translated, + start to translate the else branch */ + *p_frame_ip = block->wasm_code_else + 1; + return handle_op_else(cc, p_frame_ip); + } + else if (block->incoming_insns_for_end_bb) { + *p_frame_ip = block->wasm_code_end + 1; + return handle_op_end(cc, p_frame_ip); + } + else { + jit_block_stack_pop(&cc->block_stack); + jit_block_destroy(block); + block = block_prev; + } + } while (block != NULL); + + return true; +} + +bool +jit_compile_op_block(JitCompContext *cc, uint8 **p_frame_ip, + uint8 *frame_ip_end, uint32 label_type, uint32 param_count, + uint8 *param_types, uint32 result_count, + uint8 *result_types) +{ + BlockAddr block_addr_cache[BLOCK_ADDR_CACHE_SIZE][BLOCK_ADDR_CONFLICT_SIZE]; + JitBlock *block; + JitReg value; + uint8 *else_addr, *end_addr; + + /* Check block stack */ + if (!cc->block_stack.block_list_end) { + jit_set_last_error(cc, "WASM block stack underflow"); + return false; + } + + memset(block_addr_cache, 0, sizeof(block_addr_cache)); + + /* Get block info */ + if (!(wasm_loader_find_block_addr( + NULL, (BlockAddr *)block_addr_cache, *p_frame_ip, frame_ip_end, + (uint8)label_type, &else_addr, &end_addr))) { + jit_set_last_error(cc, "find block end addr failed"); + return false; + } + + /* Allocate memory */ + if (!(block = jit_calloc(sizeof(JitBlock)))) { + jit_set_last_error(cc, "allocate memory failed"); + return false; + } + + if (param_count && !(block->param_types = jit_calloc(param_count))) { + jit_set_last_error(cc, "allocate memory failed"); + goto fail; + } + if (result_count && !(block->result_types = jit_calloc(result_count))) { + jit_set_last_error(cc, "allocate memory failed"); + goto fail; + } + + /* Initialize block data */ + block->label_type = label_type; + block->param_count = param_count; + if (param_count) { + bh_memcpy_s(block->param_types, param_count, param_types, param_count); + } + block->result_count = result_count; + if (result_count) { + bh_memcpy_s(block->result_types, result_count, result_types, + result_count); + } + block->wasm_code_else = else_addr; + block->wasm_code_end = end_addr; + + if (label_type == LABEL_TYPE_BLOCK) { + /* Push the new jit block to block stack and continue to + translate current basic block */ + if (!push_jit_block_to_stack_and_pass_params(cc, block, + cc->cur_basic_block, 0)) + goto fail; + } + else if (label_type == LABEL_TYPE_LOOP) { + CREATE_BASIC_BLOCK(block->basic_block_entry); + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + SET_BB_BEGIN_BCIP(block->basic_block_entry, *p_frame_ip); + /* Push the new jit block to block stack and continue to + translate the new basic block */ + if (!push_jit_block_to_stack_and_pass_params( + cc, block, block->basic_block_entry, 0)) + goto fail; + } + else if (label_type == LABEL_TYPE_IF) { + POP_I32(value); + + if (!jit_reg_is_const_val(value)) { + /* Compare value is not constant, create condition br IR */ + + /* Create entry block */ + CREATE_BASIC_BLOCK(block->basic_block_entry); + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + SET_BB_BEGIN_BCIP(block->basic_block_entry, *p_frame_ip); + + if (else_addr) { + if (!push_jit_block_to_stack_and_pass_params( + cc, block, block->basic_block_entry, value)) + goto fail; + } + else { + if (!push_jit_block_to_stack_and_pass_params( + cc, block, block->basic_block_else, value)) + goto fail; + } + } + else { + if (jit_cc_get_const_I32(cc, value) != 0) { + /* Compare value is not 0, condition is true, else branch of + BASIC_BLOCK if cannot be reached, we treat it same as + LABEL_TYPE_BLOCK and start to translate if branch */ + if (!push_jit_block_to_stack_and_pass_params( + cc, block, block->basic_block_entry, 0)) + goto fail; + } + else { + if (else_addr) { + /* Compare value is not 0, condition is false, if branch of + BASIC_BLOCK if cannot be reached, we treat it same as + LABEL_TYPE_BLOCK and start to translate else branch */ + if (!push_jit_block_to_stack_and_pass_params( + cc, block, block->basic_block_else, 0)) + goto fail; + *p_frame_ip = else_addr + 1; + } + else { + /* The whole if block cannot be reached, skip it */ + jit_block_destroy(block); + *p_frame_ip = end_addr + 1; + } + } + } + } + else { + jit_set_last_error(cc, "Invalid block type"); + goto fail; + } + + return true; +fail: + jit_block_destroy(block); + return false; +} + +bool +jit_compile_op_else(JitCompContext *cc, uint8 **p_frame_ip) +{ + return handle_op_else(cc, p_frame_ip); +} + +bool +jit_compile_op_end(JitCompContext *cc, uint8 **p_frame_ip) +{ + return handle_op_end(cc, p_frame_ip); +} + +#if 0 +#if WASM_ENABLE_THREAD_MGR != 0 +bool +check_suspend_flags(JitCompContext *cc, JITFuncContext *func_ctx) +{ + LLVMValueRef terminate_addr, terminate_flags, flag, offset, res; + JitBasicBlock *terminate_check_block, non_terminate_block; + JITFuncType *jit_func_type = func_ctx->jit_func->func_type; + JitBasicBlock *terminate_block; + + /* Offset of suspend_flags */ + offset = I32_FIVE; + + if (!(terminate_addr = LLVMBuildInBoundsGEP( + cc->builder, func_ctx->exec_env, &offset, 1, "terminate_addr"))) { + jit_set_last_error("llvm build in bounds gep failed"); + return false; + } + if (!(terminate_addr = + LLVMBuildBitCast(cc->builder, terminate_addr, INT32_PTR_TYPE, + "terminate_addr_ptr"))) { + jit_set_last_error("llvm build bit cast failed"); + return false; + } + + if (!(terminate_flags = + LLVMBuildLoad(cc->builder, terminate_addr, "terminate_flags"))) { + jit_set_last_error("llvm build bit cast failed"); + return false; + } + /* Set terminate_flags memory accecc to volatile, so that the value + will always be loaded from memory rather than register */ + LLVMSetVolatile(terminate_flags, true); + + CREATE_BASIC_BLOCK(terminate_check_block, "terminate_check"); + MOVE_BASIC_BLOCK_AFTER_CURR(terminate_check_block); + + CREATE_BASIC_BLOCK(non_terminate_block, "non_terminate"); + MOVE_BASIC_BLOCK_AFTER_CURR(non_terminate_block); + + BUILD_ICMP(LLVMIntSGT, terminate_flags, I32_ZERO, res, "need_terminate"); + BUILD_COND_BR(res, terminate_check_block, non_terminate_block); + + /* Move builder to terminate check block */ + SET_BUILDER_POS(terminate_check_block); + + CREATE_BASIC_BLOCK(terminate_block, "terminate"); + MOVE_BASIC_BLOCK_AFTER_CURR(terminate_block); + + if (!(flag = LLVMBuildAnd(cc->builder, terminate_flags, I32_ONE, + "termination_flag"))) { + jit_set_last_error("llvm build AND failed"); + return false; + } + + BUILD_ICMP(LLVMIntSGT, flag, I32_ZERO, res, "need_terminate"); + BUILD_COND_BR(res, terminate_block, non_terminate_block); + + /* Move builder to terminate block */ + SET_BUILDER_POS(terminate_block); + if (!jit_build_zero_function_ret(cc, func_ctx, jit_func_type)) { + goto fail; + } + + /* Move builder to terminate block */ + SET_BUILDER_POS(non_terminate_block); + return true; + +fail: + return false; +} +#endif /* End of WASM_ENABLE_THREAD_MGR */ +#endif + +bool +jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip) +{ + JitFrame *jit_frame; + JitBlock *block_dst; + JitReg frame_sp_dst; + JitValueSlot *frame_sp_src = NULL; + JitInsn *insn; + bool copy_arities; + uint32 offset; + +#if 0 /* TODO */ +#if WASM_ENABLE_THREAD_MGR != 0 + /* Insert suspend check point */ + if (cc->enable_thread_mgr) { + if (!check_suspend_flags(cc, func_ctx)) + return false; + } +#endif +#endif + + if (!(block_dst = get_target_block(cc, br_depth))) { + return false; + } + + jit_frame = cc->jit_frame; + + if (block_dst->label_type == LABEL_TYPE_LOOP) { + frame_sp_src = + jit_frame->sp + - wasm_get_cell_num(block_dst->param_types, block_dst->param_count); + } + else { + frame_sp_src = jit_frame->sp + - wasm_get_cell_num(block_dst->result_types, + block_dst->result_count); + } + + /* Only copy parameters or results when the src/dst addr are different */ + copy_arities = (block_dst->frame_sp_begin != frame_sp_src) ? true : false; + + if (copy_arities) { +#if UINTPTR_MAX == UINT64_MAX + frame_sp_dst = jit_cc_new_reg_I64(cc); +#else + frame_sp_dst = jit_cc_new_reg_I32(cc); +#endif + offset = offsetof(WASMInterpFrame, lp) + + (block_dst->frame_sp_begin - jit_frame->lp) * 4; + GEN_INSN(ADD, frame_sp_dst, cc->fp_reg, NEW_CONST(I32, offset)); + } + + if (block_dst->label_type == LABEL_TYPE_LOOP) { + if (copy_arities) { + /* Dest block is Loop block, copy loop parameters */ + copy_block_arities(cc, frame_sp_dst, block_dst->param_types, + block_dst->param_count); + } + /* Jump to the begin basic block */ + BUILD_BR(block_dst->basic_block_entry); + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + } + else { + if (copy_arities) { + /* Dest block is Block/If/Function block, copy block results */ + copy_block_arities(cc, frame_sp_dst, block_dst->result_types, + block_dst->result_count); + } + /* Jump to the end basic block */ + if (!(insn = GEN_INSN(JMP, 0))) { + jit_set_last_error(cc, "generate jmp insn failed"); + goto fail; + } + if (!jit_block_add_incoming_insn(block_dst, insn)) { + jit_set_last_error(cc, "add incoming insn failed"); + goto fail; + } + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + } + + return handle_next_reachable_block(cc, p_frame_ip); +fail: + return false; +} + +bool +jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip) +{ + JitFrame *jit_frame; + JitBlock *block_dst; + JitReg frame_sp_dst, cond; + JitBasicBlock *cur_basic_block, *if_basic_block = NULL; + JitValueSlot *frame_sp_src = NULL; + JitInsn *insn; + bool copy_arities; + uint32 offset; + +#if 0 /* TODO */ +#if WASM_ENABLE_THREAD_MGR != 0 + /* Insert suspend check point */ + if (cc->enable_thread_mgr) { + if (!check_suspend_flags(cc, func_ctx)) + return false; + } +#endif +#endif + + if (!(block_dst = get_target_block(cc, br_depth))) { + return false; + } + + POP_I32(cond); + + jit_frame = cc->jit_frame; + cur_basic_block = cc->cur_basic_block; + gen_commit_values(jit_frame, jit_frame->lp, jit_frame->sp); + clear_values(jit_frame); + + CREATE_BASIC_BLOCK(if_basic_block); + if (!GEN_INSN(BNE, cond, jit_basic_block_label(if_basic_block), 0)) { + jit_set_last_error(cc, "generate bne insn failed"); + goto fail; + } + + SET_BUILDER_POS(if_basic_block); + SET_BB_BEGIN_BCIP(if_basic_block, *p_frame_ip - 1); + + if (block_dst->label_type == LABEL_TYPE_LOOP) { + frame_sp_src = + jit_frame->sp + - wasm_get_cell_num(block_dst->param_types, block_dst->param_count); + } + else { + frame_sp_src = jit_frame->sp + - wasm_get_cell_num(block_dst->result_types, + block_dst->result_count); + } + + /* Only copy parameters or results when the src/dst addr are different */ + copy_arities = (block_dst->frame_sp_begin != frame_sp_src) ? true : false; + + if (copy_arities) { +#if UINTPTR_MAX == UINT64_MAX + frame_sp_dst = jit_cc_new_reg_I64(cc); +#else + frame_sp_dst = jit_cc_new_reg_I32(cc); +#endif + offset = offsetof(WASMInterpFrame, lp) + + (block_dst->frame_sp_begin - jit_frame->lp) * 4; + GEN_INSN(ADD, frame_sp_dst, cc->fp_reg, NEW_CONST(I32, offset)); + } + + if (block_dst->label_type == LABEL_TYPE_LOOP) { + if (copy_arities) { + /* Dest block is Loop block, copy loop parameters */ + copy_block_arities(cc, frame_sp_dst, block_dst->param_types, + block_dst->param_count); + } + /* Jump to the begin basic block */ + BUILD_BR(block_dst->basic_block_entry); + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + } + else { + if (copy_arities) { + /* Dest block is Block/If/Function block, copy block results */ + copy_block_arities(cc, frame_sp_dst, block_dst->result_types, + block_dst->result_count); + } + /* Jump to the end basic block */ + if (!(insn = GEN_INSN(JMP, 0))) { + jit_set_last_error(cc, "generate jmp insn failed"); + goto fail; + } + if (!jit_block_add_incoming_insn(block_dst, insn)) { + jit_set_last_error(cc, "add incoming insn failed"); + goto fail; + } + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + } + + SET_BUILDER_POS(cur_basic_block); + + return true; +fail: + return false; +} + +bool +jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count, + uint8 **p_frame_ip) +{ + return false; +#if 0 + uint32 i, j; + LLVMValueRef value_switch, value_cmp, value_case, value, *values = NULL; + JitBasicBlock *default_basic_block = NULL, target_basic_block; + JitBasicBlock *next_basic_block_end; + JitBlock *target_block; + uint32 br_depth, depth_idx; + uint32 param_index, result_index; + uint64 size; + char name[32]; + +#if WASM_ENABLE_THREAD_MGR != 0 + /* Insert suspend check point */ + if (cc->enable_thread_mgr) { + if (!check_suspend_flags(cc, func_ctx)) + return false; + } +#endif + + POP_I32(value_cmp); + + if (LLVMIsUndef(value_cmp) +#if LLVM_VERSION_NUMBER >= 12 + || LLVMIsPoison(value_cmp) +#endif + ) { + if (!(jit_emit_exception(cc, func_ctx, EXCE_INTEGER_OVERFLOW, false, + NULL, NULL))) { + goto fail; + } + return jit_handle_next_reachable_block(cc, func_ctx, p_frame_ip); + } + + if (!LLVMIsConstant(value_cmp)) { + /* Compare value is not constant, create switch IR */ + for (i = 0; i <= br_count; i++) { + target_block = get_target_block(func_ctx, br_depths[i]); + if (!target_block) + return false; + + if (target_block->label_type != LABEL_TYPE_LOOP) { + /* Dest block is Block/If/Function block */ + /* Create the end block */ + if (!target_block->basic_block_end) { + format_block_name(name, sizeof(name), + target_block->block_index, + target_block->label_type, LABEL_END); + CREATE_BASIC_BLOCK(target_block->basic_block_end, name); + if ((next_basic_block_end = + find_next_basic_block_end(target_block))) + MOVE_BASIC_BLOCK_BEFORE(target_block->basic_block_end, + next_basic_block_end); + } + /* Handle result values */ + if (target_block->result_count) { + size = sizeof(LLVMValueRef) + * (uint64)target_block->result_count; + if (size >= UINT32_MAX + || !(values = jit_calloc((uint32)size))) { + jit_set_last_error(cc, "allocate memory failed"); + goto fail; + } + CREATE_RESULT_VALUE_PHIS(target_block); + for (j = 0; j < target_block->result_count; j++) { + result_index = target_block->result_count - 1 - j; + POP(value, target_block->result_types[result_index]); + values[result_index] = value; + ADD_TO_RESULT_PHIS(target_block, value, result_index); + } + for (j = 0; j < target_block->result_count; j++) { + PUSH(values[j], target_block->result_types[j]); + } + jit_free(values); + } + target_block->is_reachable = true; + if (i == br_count) + default_basic_block = target_block->basic_block_end; + } + else { + /* Handle Loop parameters */ + if (target_block->param_count) { + size = sizeof(LLVMValueRef) + * (uint64)target_block->param_count; + if (size >= UINT32_MAX + || !(values = jit_calloc((uint32)size))) { + jit_set_last_error(cc, "allocate memory failed"); + goto fail; + } + for (j = 0; j < target_block->param_count; j++) { + param_index = target_block->param_count - 1 - j; + POP(value, target_block->param_types[param_index]); + values[param_index] = value; + ADD_TO_PARAM_PHIS(target_block, value, param_index); + } + for (j = 0; j < target_block->param_count; j++) { + PUSH(values[j], target_block->param_types[j]); + } + jit_free(values); + } + if (i == br_count) + default_basic_block = target_block->basic_block_entry; + } + } + + /* Create switch IR */ + if (!(value_switch = LLVMBuildSwitch(cc->builder, value_cmp, + default_basic_block, br_count))) { + jit_set_last_error(cc, "llvm build switch failed"); + return false; + } + + /* Add each case for switch IR */ + for (i = 0; i < br_count; i++) { + value_case = I32_CONST(i); + CHECK_LLVM_CONST(value_case); + target_block = get_target_block(func_ctx, br_depths[i]); + if (!target_block) + return false; + target_basic_block = target_block->label_type != LABEL_TYPE_LOOP + ? target_block->basic_block_end + : target_block->basic_block_entry; + LLVMAddCase(value_switch, value_case, target_basic_block); + } + + return handle_next_reachable_block(cc, func_ctx, p_frame_ip); + } + else { + /* Compare value is constant, create br IR */ + depth_idx = (uint32)LLVMConstIntGetZExtValue(value_cmp); + br_depth = br_depths[br_count]; + if (depth_idx < br_count) { + br_depth = br_depths[depth_idx]; + } + return jit_compile_op_br(cc, func_ctx, br_depth, p_frame_ip); + } +fail: + if (values) + jit_free(values); + return false; +#endif +} + +bool +jit_compile_op_return(JitCompContext *cc, uint8 **p_frame_ip) +{ + JitBlock *block_func = cc->block_stack.block_list_head; + + bh_assert(block_func); + + handle_func_return(cc, block_func); + SET_BB_END_BCIP(cc->cur_basic_block, *p_frame_ip - 1); + + return handle_next_reachable_block(cc, p_frame_ip); +} + +bool +jit_compile_op_unreachable(JitCompContext *cc, uint8 **p_frame_ip) +{ + if (!jit_emit_exception(cc, EXCE_UNREACHABLE, false, 0, NULL)) + return false; + + return handle_next_reachable_block(cc, p_frame_ip); +} + +bool +jit_handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip) +{ + return handle_next_reachable_block(cc, p_frame_ip); +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_control.h b/core/iwasm/fast-jit/fe/jit_emit_control.h new file mode 100644 index 000000000..f72a2e924 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_control.h @@ -0,0 +1,55 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_CONTROL_H_ +#define _JIT_EMIT_CONTROL_H_ + +#include "../jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_block(JitCompContext *cc, uint8 **p_frame_ip, + uint8 *frame_ip_end, uint32 label_type, uint32 param_count, + uint8 *param_types, uint32 result_count, + uint8 *result_types); + +bool +jit_compile_op_else(JitCompContext *cc, uint8 **p_frame_ip); + +bool +jit_compile_op_end(JitCompContext *cc, uint8 **p_frame_ip); + +bool +jit_compile_op_br(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip); + +bool +jit_compile_op_br_if(JitCompContext *cc, uint32 br_depth, uint8 **p_frame_ip); + +bool +jit_compile_op_br_table(JitCompContext *cc, uint32 *br_depths, uint32 br_count, + uint8 **p_frame_ip); + +bool +jit_compile_op_return(JitCompContext *cc, uint8 **p_frame_ip); + +bool +jit_compile_op_unreachable(JitCompContext *cc, uint8 **p_frame_ip); + +bool +jit_handle_next_reachable_block(JitCompContext *cc, uint8 **p_frame_ip); + +#if WASM_ENABLE_THREAD_MGR != 0 +bool +jit_check_suspend_flags(JitCompContext *cc); +#endif + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_CONTROL_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_conversion.c b/core/iwasm/fast-jit/fe/jit_emit_conversion.c new file mode 100644 index 000000000..cb6f2a748 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_conversion.c @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_conversion.h" +#include "../jit_frontend.h" + +bool +jit_compile_op_i32_wrap_i64(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool saturating) +{ + return false; +} + +bool +jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool saturating) +{ + return false; +} + +bool +jit_compile_op_i64_extend_i32(JitCompContext *comp_ctx, bool sign) +{ + return false; +} + +bool +jit_compile_op_i64_extend_i64(JitCompContext *comp_ctx, int8 bitwidth) +{ + return false; +} + +bool +jit_compile_op_i32_extend_i32(JitCompContext *comp_ctx, int8 bitwidth) +{ + return false; +} + +bool +jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool saturating) +{ + return false; +} + +bool +jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool saturating) +{ + return false; +} + +bool +jit_compile_op_f32_convert_i32(JitCompContext *comp_ctx, bool sign) +{ + return false; +} + +bool +jit_compile_op_f32_convert_i64(JitCompContext *comp_ctx, bool sign) +{ + return false; +} + +bool +jit_compile_op_f32_demote_f64(JitCompContext *comp_ctx) +{ + return false; +} + +bool +jit_compile_op_f64_convert_i32(JitCompContext *comp_ctx, bool sign) +{ + return false; +} + +bool +jit_compile_op_f64_convert_i64(JitCompContext *comp_ctx, bool sign) +{ + return false; +} + +bool +jit_compile_op_f64_promote_f32(JitCompContext *comp_ctx) +{ + return false; +} + +bool +jit_compile_op_i64_reinterpret_f64(JitCompContext *comp_ctx) +{ + return false; +} + +bool +jit_compile_op_i32_reinterpret_f32(JitCompContext *comp_ctx) +{ + return false; +} + +bool +jit_compile_op_f64_reinterpret_i64(JitCompContext *comp_ctx) +{ + return false; +} + +bool +jit_compile_op_f32_reinterpret_i32(JitCompContext *comp_ctx) +{ + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_conversion.h b/core/iwasm/fast-jit/fe/jit_emit_conversion.h new file mode 100644 index 000000000..b90aa7064 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_conversion.h @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_CONVERSION_H_ +#define _JIT_EMIT_CONVERSION_H_ + +#include "../jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_i32_wrap_i64(JitCompContext *cc); + +bool +jit_compile_op_i32_trunc_f32(JitCompContext *cc, bool sign, bool saturating); + +bool +jit_compile_op_i32_trunc_f64(JitCompContext *cc, bool sign, bool saturating); + +bool +jit_compile_op_i64_extend_i32(JitCompContext *comp_ctx, bool sign); + +bool +jit_compile_op_i64_extend_i64(JitCompContext *comp_ctx, int8 bitwidth); + +bool +jit_compile_op_i32_extend_i32(JitCompContext *comp_ctx, int8 bitwidth); + +bool +jit_compile_op_i64_trunc_f32(JitCompContext *cc, bool sign, bool saturating); + +bool +jit_compile_op_i64_trunc_f64(JitCompContext *cc, bool sign, bool saturating); + +bool +jit_compile_op_f32_convert_i32(JitCompContext *comp_ctx, bool sign); + +bool +jit_compile_op_f32_convert_i64(JitCompContext *comp_ctx, bool sign); + +bool +jit_compile_op_f32_demote_f64(JitCompContext *comp_ctx); + +bool +jit_compile_op_f64_convert_i32(JitCompContext *comp_ctx, bool sign); + +bool +jit_compile_op_f64_convert_i64(JitCompContext *comp_ctx, bool sign); + +bool +jit_compile_op_f64_promote_f32(JitCompContext *comp_ctx); + +bool +jit_compile_op_i64_reinterpret_f64(JitCompContext *comp_ctx); + +bool +jit_compile_op_i32_reinterpret_f32(JitCompContext *comp_ctx); + +bool +jit_compile_op_f64_reinterpret_i64(JitCompContext *comp_ctx); + +bool +jit_compile_op_f32_reinterpret_i32(JitCompContext *comp_ctx); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_CONVERSION_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_exception.c b/core/iwasm/fast-jit/fe/jit_emit_exception.c new file mode 100644 index 000000000..dda45ad7a --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_exception.c @@ -0,0 +1,14 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_exception.h" +#include "../jit_frontend.h" + +bool +jit_emit_exception(JitCompContext *cc, int32 exception_id, bool is_cond_br, + JitReg cond_br_if, JitBasicBlock *cond_br_else_block) +{ + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_exception.h b/core/iwasm/fast-jit/fe/jit_emit_exception.h new file mode 100644 index 000000000..a5739b523 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_exception.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_EXCEPTION_H_ +#define _JIT_EMIT_EXCEPTION_H_ + +#include "../jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_emit_exception(JitCompContext *cc, int32 exception_id, bool is_cond_br, + JitReg cond_br_if, JitBasicBlock *cond_br_else_block); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_EXCEPTION_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_function.c b/core/iwasm/fast-jit/fe/jit_emit_function.c new file mode 100644 index 000000000..d029d642f --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_function.c @@ -0,0 +1,38 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_function.h" +#include "../jit_frontend.h" + +bool +jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call) +{ + return false; +} + +bool +jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx, + uint32 tbl_idx) +{ + return false; +} + +bool +jit_compile_op_ref_null(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_ref_is_null(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx) +{ + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_function.h b/core/iwasm/fast-jit/fe/jit_emit_function.h new file mode 100644 index 000000000..4b475bed0 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_function.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_FUNCTION_H_ +#define _JIT_EMIT_FUNCTION_H_ + +#include "../jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_call(JitCompContext *cc, uint32 func_idx, bool tail_call); + +bool +jit_compile_op_call_indirect(JitCompContext *cc, uint32 type_idx, + uint32 tbl_idx); + +bool +jit_compile_op_ref_null(JitCompContext *cc); + +bool +jit_compile_op_ref_is_null(JitCompContext *cc); + +bool +jit_compile_op_ref_func(JitCompContext *cc, uint32 func_idx); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_FUNCTION_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_memory.c b/core/iwasm/fast-jit/fe/jit_emit_memory.c new file mode 100644 index 000000000..34ce25cc5 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_memory.c @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_memory.h" +#include "../jit_frontend.h" + +bool +jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes, bool sign, bool atomic) +{ + return false; +} + +bool +jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes, bool sign, bool atomic) +{ + return false; +} + +bool +jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset) +{ + return false; +} + +bool +jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset) +{ + return false; +} + +bool +jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes, bool atomic) +{ + return false; +} + +bool +jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes, bool atomic) +{ + return false; +} + +bool +jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset) +{ + return false; +} + +bool +jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset) +{ + return false; +} + +bool +jit_compile_op_memory_size(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_memory_grow(JitCompContext *cc) +{ + return false; +} + +#if WASM_ENABLE_BULK_MEMORY != 0 +bool +jit_compile_op_memory_init(JitCompContext *cc, uint32 seg_index) +{ + return false; +} + +bool +jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_index) +{ + return false; +} + +bool +jit_compile_op_memory_copy(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_memory_fill(JitCompContext *cc) +{ + return false; +} +#endif + +#if WASM_ENABLE_SHARED_MEMORY != 0 +bool +jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type, + uint32 align, uint32 offset, uint32 bytes) +{ + return false; +} + +bool +jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align, + uint32 offset, uint32 bytes) +{ + return false; +} + +bool +jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align, + uint32 offset, uint32 bytes) +{ + return false; +} + +bool +jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes) +{ + return false; +} +#endif diff --git a/core/iwasm/fast-jit/fe/jit_emit_memory.h b/core/iwasm/fast-jit/fe/jit_emit_memory.h new file mode 100644 index 000000000..442e08670 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_memory.h @@ -0,0 +1,88 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_MEMORY_H_ +#define _JIT_EMIT_MEMORY_H_ + +#include "../jit_compiler.h" +#if WASM_ENABLE_SHARED_MEMORY != 0 +#include "../../common/wasm_shared_memory.h" +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_i32_load(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes, bool sign, bool atomic); + +bool +jit_compile_op_i64_load(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes, bool sign, bool atomic); + +bool +jit_compile_op_f32_load(JitCompContext *cc, uint32 align, uint32 offset); + +bool +jit_compile_op_f64_load(JitCompContext *cc, uint32 align, uint32 offset); + +bool +jit_compile_op_i32_store(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes, bool atomic); + +bool +jit_compile_op_i64_store(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes, bool atomic); + +bool +jit_compile_op_f32_store(JitCompContext *cc, uint32 align, uint32 offset); + +bool +jit_compile_op_f64_store(JitCompContext *cc, uint32 align, uint32 offset); + +bool +jit_compile_op_memory_size(JitCompContext *cc); + +bool +jit_compile_op_memory_grow(JitCompContext *cc); + +#if WASM_ENABLE_BULK_MEMORY != 0 +bool +jit_compile_op_memory_init(JitCompContext *cc, uint32 seg_index); + +bool +jit_compile_op_data_drop(JitCompContext *cc, uint32 seg_index); + +bool +jit_compile_op_memory_copy(JitCompContext *cc); + +bool +jit_compile_op_memory_fill(JitCompContext *cc); +#endif + +#if WASM_ENABLE_SHARED_MEMORY != 0 +bool +jit_compile_op_atomic_rmw(JitCompContext *cc, uint8 atomic_op, uint8 op_type, + uint32 align, uint32 offset, uint32 bytes); + +bool +jit_compile_op_atomic_cmpxchg(JitCompContext *cc, uint8 op_type, uint32 align, + uint32 offset, uint32 bytes); + +bool +jit_compile_op_atomic_wait(JitCompContext *cc, uint8 op_type, uint32 align, + uint32 offset, uint32 bytes); + +bool +jit_compiler_op_atomic_notify(JitCompContext *cc, uint32 align, uint32 offset, + uint32 bytes); +#endif + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_MEMORY_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_numberic.c b/core/iwasm/fast-jit/fe/jit_emit_numberic.c new file mode 100644 index 000000000..75cfea645 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_numberic.c @@ -0,0 +1,291 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_numberic.h" +#include "../jit_frontend.h" + +#define PUSH_INT(v) \ + do { \ + if (is_i32) \ + PUSH_I32(v); \ + else \ + PUSH_I64(v); \ + } while (0) + +#define POP_INT(v) \ + do { \ + if (is_i32) \ + POP_I32(v); \ + else \ + POP_I64(v); \ + } while (0) + +#define PUSH_FLOAT(v) \ + do { \ + if (is_f32) \ + PUSH_F32(v); \ + else \ + PUSH_F64(v); \ + } while (0) + +#define POP_FLOAT(v) \ + do { \ + if (is_f32) \ + POP_F32(v); \ + else \ + POP_F64(v); \ + } while (0) + +#define DEF_INT_UNARY_OP(op, err) \ + do { \ + JitReg res, operand; \ + POP_INT(operand); \ + if (!(res = op)) { \ + if (err) \ + jit_set_last_error(cc, err); \ + goto fail; \ + } \ + PUSH_INT(res); \ + } while (0) + +#define DEF_INT_BINARY_OP(op, err) \ + do { \ + JitReg res, left, right; \ + POP_INT(right); \ + POP_INT(left); \ + if (!(res = op)) { \ + if (err) \ + jit_set_last_error(cc, err); \ + goto fail; \ + } \ + PUSH_INT(res); \ + } while (0) + +#define DEF_FP_UNARY_OP(op, err) \ + do { \ + JitReg res, operand; \ + POP_FLOAT(operand); \ + if (!(res = op)) { \ + if (err) \ + jit_set_last_error(cc, err); \ + goto fail; \ + } \ + PUSH_FLOAT(res); \ + } while (0) + +#define DEF_FP_BINARY_OP(op, err) \ + do { \ + JitReg res, left, right; \ + POP_FLOAT(right); \ + POP_FLOAT(left); \ + if (!(res = op)) { \ + if (err) \ + jit_set_last_error(cc, err); \ + goto fail; \ + } \ + PUSH_FLOAT(res); \ + } while (0) + +bool +jit_compile_op_i32_clz(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_i32_ctz(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_i32_popcnt(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_i64_clz(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_i64_ctz(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_i64_popcnt(JitCompContext *cc) +{ + return false; +} + +#define IS_CONST_ZERO(val) \ + (jit_reg_is_const(val) \ + && ((is_i32 && jit_cc_get_const_I32(cc, val) == 0) \ + || (!is_i32 && jit_cc_get_const_I64(cc, val) == 0))) + +static JitReg +compile_int_add(JitCompContext *cc, JitReg left, JitReg right, bool is_i32) +{ + JitReg res; + + /* If one of the operands is 0, just return the other */ + if (IS_CONST_ZERO(left)) + return right; + if (IS_CONST_ZERO(right)) + return left; + + /* Build add */ + res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc); + GEN_INSN(ADD, res, left, right); + return res; +} + +static JitReg +compile_int_sub(JitCompContext *cc, JitReg left, JitReg right, bool is_i32) +{ + JitReg res; + + /* If the right operand is 0, just return the left */ + if (IS_CONST_ZERO(right)) + return left; + + /* Build sub */ + res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc); + GEN_INSN(SUB, res, left, right); + return res; +} + +static JitReg +compile_int_mul(JitCompContext *cc, JitReg left, JitReg right, bool is_i32) +{ + JitReg res; + + /* If one of the operands is 0, just return constant 0 */ + if (IS_CONST_ZERO(left) || IS_CONST_ZERO(right)) + return is_i32 ? NEW_CONST(I32, 0) : NEW_CONST(I64, 0); + + /* Build mul */ + res = is_i32 ? jit_cc_new_reg_I32(cc) : jit_cc_new_reg_I64(cc); + GEN_INSN(MUL, res, left, right); + return res; +} + +static bool +compile_int_div(JitCompContext *cc, IntArithmetic arith_op, bool is_i32, + uint8 **p_frame_ip) +{ + /* TODO */ + bh_assert(0); + return false; +} + +static bool +compile_op_int_arithmetic(JitCompContext *cc, IntArithmetic arith_op, + bool is_i32, uint8 **p_frame_ip) +{ + switch (arith_op) { + case INT_ADD: + DEF_INT_BINARY_OP(compile_int_add(cc, left, right, is_i32), + "compile int add fail."); + return true; + case INT_SUB: + DEF_INT_BINARY_OP(compile_int_sub(cc, left, right, is_i32), + "compile int sub fail."); + return true; + case INT_MUL: + DEF_INT_BINARY_OP(compile_int_mul(cc, left, right, is_i32), + "compile int mul fail."); + return true; + case INT_DIV_S: + case INT_DIV_U: + case INT_REM_S: + case INT_REM_U: + return compile_int_div(cc, arith_op, is_i32, p_frame_ip); + default: + bh_assert(0); + return false; + } + +fail: + return false; +} + +bool +jit_compile_op_i32_arithmetic(JitCompContext *cc, IntArithmetic arith_op, + uint8 **p_frame_ip) +{ + return compile_op_int_arithmetic(cc, arith_op, true, p_frame_ip); +} + +bool +jit_compile_op_i64_arithmetic(JitCompContext *cc, IntArithmetic arith_op, + uint8 **p_frame_ip) +{ + return compile_op_int_arithmetic(cc, arith_op, false, p_frame_ip); +} + +bool +jit_compile_op_i32_bitwise(JitCompContext *cc, IntBitwise bitwise_op) +{ + return false; +} + +bool +jit_compile_op_i64_bitwise(JitCompContext *cc, IntBitwise bitwise_op) +{ + return false; +} + +bool +jit_compile_op_i32_shift(JitCompContext *cc, IntShift shift_op) +{ + return false; +} + +bool +jit_compile_op_i64_shift(JitCompContext *cc, IntShift shift_op) +{ + return false; +} + +bool +jit_compile_op_f32_math(JitCompContext *cc, FloatMath math_op) +{ + return false; +} + +bool +jit_compile_op_f64_math(JitCompContext *cc, FloatMath math_op) +{ + return false; +} + +bool +jit_compile_op_f32_arithmetic(JitCompContext *cc, FloatArithmetic arith_op) +{ + return false; +} + +bool +jit_compile_op_f64_arithmetic(JitCompContext *cc, FloatArithmetic arith_op) +{ + return false; +} + +bool +jit_compile_op_f32_copysign(JitCompContext *cc) +{ + return false; +} + +bool +jit_compile_op_f64_copysign(JitCompContext *cc) +{ + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_numberic.h b/core/iwasm/fast-jit/fe/jit_emit_numberic.h new file mode 100644 index 000000000..e73c3ebad --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_numberic.h @@ -0,0 +1,76 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_NUMBERIC_H_ +#define _JIT_EMIT_NUMBERIC_H_ + +#include "../jit_compiler.h" +#include "../jit_frontend.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_i32_clz(JitCompContext *cc); + +bool +jit_compile_op_i32_ctz(JitCompContext *cc); + +bool +jit_compile_op_i32_popcnt(JitCompContext *cc); + +bool +jit_compile_op_i64_clz(JitCompContext *cc); + +bool +jit_compile_op_i64_ctz(JitCompContext *cc); + +bool +jit_compile_op_i64_popcnt(JitCompContext *cc); + +bool +jit_compile_op_i32_arithmetic(JitCompContext *cc, IntArithmetic arith_op, + uint8 **p_frame_ip); + +bool +jit_compile_op_i64_arithmetic(JitCompContext *cc, IntArithmetic arith_op, + uint8 **p_frame_ip); + +bool +jit_compile_op_i32_bitwise(JitCompContext *cc, IntBitwise bitwise_op); + +bool +jit_compile_op_i64_bitwise(JitCompContext *cc, IntBitwise bitwise_op); + +bool +jit_compile_op_i32_shift(JitCompContext *cc, IntShift shift_op); + +bool +jit_compile_op_i64_shift(JitCompContext *cc, IntShift shift_op); + +bool +jit_compile_op_f32_math(JitCompContext *cc, FloatMath math_op); + +bool +jit_compile_op_f64_math(JitCompContext *cc, FloatMath math_op); + +bool +jit_compile_op_f32_arithmetic(JitCompContext *cc, FloatArithmetic arith_op); + +bool +jit_compile_op_f64_arithmetic(JitCompContext *cc, FloatArithmetic arith_op); + +bool +jit_compile_op_f32_copysign(JitCompContext *cc); + +bool +jit_compile_op_f64_copysign(JitCompContext *cc); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_NUMBERIC_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_parametric.c b/core/iwasm/fast-jit/fe/jit_emit_parametric.c new file mode 100644 index 000000000..f8b3dfbce --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_parametric.c @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_parametric.h" +#include "../jit_frontend.h" + +static bool +pop_value_from_wasm_stack(JitCompContext *cc, bool is_32bit, JitReg *p_value, + uint8 *p_type) +{ + JitValue *jit_value; + uint8 type; + + if (!cc->block_stack.block_list_end) { + jit_set_last_error(cc, "WASM block stack underflow."); + return false; + } + if (!cc->block_stack.block_list_end->value_stack.value_list_end) { + jit_set_last_error(cc, "WASM data stack underflow."); + return false; + } + + jit_value = + jit_value_stack_pop(&cc->block_stack.block_list_end->value_stack); + type = jit_value->type; + + if (p_type != NULL) { + *p_type = jit_value->type; + } + if (p_value != NULL) { + *p_value = jit_value->value; + } + + wasm_runtime_free(jit_value); + + /* is_32: i32, f32, ref.func, ref.extern, v128 */ + if (is_32bit + && !(type == VALUE_TYPE_I32 || type == VALUE_TYPE_F32 +#if WASM_ENABLE_REF_TYPES != 0 + || type == VALUE_TYPE_FUNCREF || type == VALUE_TYPE_EXTERNREF +#endif + || type == VALUE_TYPE_V128)) { + jit_set_last_error(cc, "invalid WASM stack data type."); + return false; + } + /* !is_32: i64, f64 */ + if (!is_32bit && !(type == VALUE_TYPE_I64 || type == VALUE_TYPE_F64)) { + jit_set_last_error(cc, "invalid WASM stack data type."); + return false; + } + + switch (type) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_FUNCREF: + case VALUE_TYPE_EXTERNREF: +#endif + pop_i32(cc->jit_frame); + break; + case VALUE_TYPE_I64: + pop_i64(cc->jit_frame); + break; + case VALUE_TYPE_F32: + pop_f32(cc->jit_frame); + break; + case VALUE_TYPE_F64: + pop_f64(cc->jit_frame); + break; + } + + return true; +} + +bool +jit_compile_op_drop(JitCompContext *cc, bool is_drop_32) +{ + if (!pop_value_from_wasm_stack(cc, is_drop_32, NULL, NULL)) + return false; + return true; +} + +bool +jit_compile_op_select(JitCompContext *cc, bool is_select_32) +{ + JitReg val1, val2, cond, selected; + uint8 val1_type, val2_type; + + POP_I32(cond); + + if (!pop_value_from_wasm_stack(cc, is_select_32, &val2, &val2_type) + || !pop_value_from_wasm_stack(cc, is_select_32, &val1, &val1_type)) { + return false; + } + + if (val1_type != val2_type) { + jit_set_last_error(cc, "invalid stack values with different type"); + return false; + } + + if (is_select_32) + selected = jit_cc_new_reg_I32(cc); + else + selected = jit_cc_new_reg_I64(cc); + + GEN_INSN(SELECTNE, selected, cond, val1, val2); + PUSH(selected, val1_type); + return true; +fail: + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_parametric.h b/core/iwasm/fast-jit/fe/jit_emit_parametric.h new file mode 100644 index 000000000..40025ed21 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_parametric.h @@ -0,0 +1,25 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_PARAMETRIC_H_ +#define _JIT_EMIT_PARAMETRIC_H_ + +#include "../jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_drop(JitCompContext *cc, bool is_drop_32); + +bool +jit_compile_op_select(JitCompContext *cc, bool is_select_32); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_PARAMETRIC_H_ */ diff --git a/core/iwasm/fast-jit/fe/jit_emit_table.c b/core/iwasm/fast-jit/fe/jit_emit_table.c new file mode 100644 index 000000000..3985a526c --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_table.c @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_table.h" +#include "../jit_frontend.h" + +bool +jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx) +{ + return false; +} + +bool +jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx) +{ + return false; +} + +bool +jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx) +{ + return false; +} + +bool +jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx, + uint32 tbl_seg_idx) +{ + return false; +} + +bool +jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx, + uint32 dst_tbl_idx) +{ + return false; +} + +bool +jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx) +{ + return false; +} + +bool +jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx) +{ + return false; +} + +bool +jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx) +{ + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_table.h b/core/iwasm/fast-jit/fe/jit_emit_table.h new file mode 100644 index 000000000..eaa5893a7 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_table.h @@ -0,0 +1,45 @@ + +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_TABLE_H_ +#define _JIT_EMIT_TABLE_H_ + +#include "../jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_elem_drop(JitCompContext *cc, uint32 tbl_seg_idx); + +bool +jit_compile_op_table_get(JitCompContext *cc, uint32 tbl_idx); + +bool +jit_compile_op_table_set(JitCompContext *cc, uint32 tbl_idx); + +bool +jit_compile_op_table_init(JitCompContext *cc, uint32 tbl_idx, + uint32 tbl_seg_idx); + +bool +jit_compile_op_table_copy(JitCompContext *cc, uint32 src_tbl_idx, + uint32 dst_tbl_idx); + +bool +jit_compile_op_table_size(JitCompContext *cc, uint32 tbl_idx); + +bool +jit_compile_op_table_grow(JitCompContext *cc, uint32 tbl_idx); + +bool +jit_compile_op_table_fill(JitCompContext *cc, uint32 tbl_idx); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif +#endif diff --git a/core/iwasm/fast-jit/fe/jit_emit_variable.c b/core/iwasm/fast-jit/fe/jit_emit_variable.c new file mode 100644 index 000000000..7d3763763 --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_variable.c @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_emit_variable.h" +#include "../jit_frontend.h" + +#define CHECK_LOCAL(idx) \ + do { \ + if (idx \ + >= wasm_func->func_type->param_count + wasm_func->local_count) { \ + jit_set_last_error(cc, "local index out of range"); \ + goto fail; \ + } \ + } while (0) + +static uint8 +get_local_type(const WASMFunction *wasm_func, uint32 local_idx) +{ + uint32 param_count = wasm_func->func_type->param_count; + return local_idx < param_count + ? wasm_func->func_type->types[local_idx] + : wasm_func->local_types[local_idx - param_count]; +} + +bool +jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx) +{ + WASMFunction *wasm_func = cc->cur_wasm_func; + uint16 *local_offsets = wasm_func->local_offsets; + uint16 local_offset; + uint8 local_type; + JitReg value; + + CHECK_LOCAL(local_idx); + + local_offset = local_offsets[local_idx]; + local_type = get_local_type(wasm_func, local_idx); + + switch (local_type) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_EXTERNREF: + case VALUE_TYPE_FUNCREF: +#endif + value = local_i32(cc->jit_frame, local_offset); + + break; + case VALUE_TYPE_I64: + value = local_i64(cc->jit_frame, local_offset); + break; + case VALUE_TYPE_F32: + value = local_f32(cc->jit_frame, local_offset); + break; + case VALUE_TYPE_F64: + value = local_f64(cc->jit_frame, local_offset); + break; + default: + bh_assert(0); + break; + } + + PUSH(value, local_type); + return true; +fail: + return false; +} + +bool +jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx) +{ + WASMFunction *wasm_func = cc->cur_wasm_func; + uint16 *local_offsets = wasm_func->local_offsets; + uint16 local_offset; + uint8 local_type; + JitReg value; + + CHECK_LOCAL(local_idx); + + local_offset = local_offsets[local_idx]; + local_type = get_local_type(wasm_func, local_idx); + + switch (local_type) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_EXTERNREF: + case VALUE_TYPE_FUNCREF: +#endif + POP_I32(value); + set_local_i32(cc->jit_frame, local_offset, value); + break; + case VALUE_TYPE_I64: + POP_I64(value); + set_local_i64(cc->jit_frame, local_offset, value); + break; + case VALUE_TYPE_F32: + POP_F32(value); + set_local_f32(cc->jit_frame, local_offset, value); + break; + case VALUE_TYPE_F64: + POP_F64(value); + set_local_f64(cc->jit_frame, local_offset, value); + break; + default: + bh_assert(0); + break; + } + + return true; +fail: + return false; +} + +bool +jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx) +{ + WASMFunction *wasm_func = cc->cur_wasm_func; + uint16 *local_offsets = wasm_func->local_offsets; + uint16 local_offset; + uint8 local_type; + JitReg value; + + CHECK_LOCAL(local_idx); + + local_offset = local_offsets[local_idx]; + local_type = get_local_type(wasm_func, local_idx); + + switch (local_type) { + case VALUE_TYPE_I32: +#if WASM_ENABLE_REF_TYPES != 0 + case VALUE_TYPE_EXTERNREF: + case VALUE_TYPE_FUNCREF: +#endif + POP_I32(value); + set_local_i32(cc->jit_frame, local_offset, value); + PUSH_I32(value); + break; + case VALUE_TYPE_I64: + POP_I64(value); + set_local_i64(cc->jit_frame, local_offset, value); + PUSH_I64(value); + break; + case VALUE_TYPE_F32: + POP_F32(value); + set_local_f32(cc->jit_frame, local_offset, value); + PUSH_F32(value); + break; + case VALUE_TYPE_F64: + POP_F64(value); + set_local_f64(cc->jit_frame, local_offset, value); + PUSH_F64(value); + break; + default: + bh_assert(0); + break; + } + + return true; +fail: + return false; +} + +bool +jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx) +{ + return false; +} + +bool +jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx, + bool is_aux_stack) +{ + return false; +} diff --git a/core/iwasm/fast-jit/fe/jit_emit_variable.h b/core/iwasm/fast-jit/fe/jit_emit_variable.h new file mode 100644 index 000000000..80a10511d --- /dev/null +++ b/core/iwasm/fast-jit/fe/jit_emit_variable.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2019 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_EMIT_VARIABLE_H_ +#define _JIT_EMIT_VARIABLE_H_ + +#include "../jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compile_op_get_local(JitCompContext *cc, uint32 local_idx); + +bool +jit_compile_op_set_local(JitCompContext *cc, uint32 local_idx); + +bool +jit_compile_op_tee_local(JitCompContext *cc, uint32 local_idx); + +bool +jit_compile_op_get_global(JitCompContext *cc, uint32 global_idx); + +bool +jit_compile_op_set_global(JitCompContext *cc, uint32 global_idx, + bool is_aux_stack); + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif /* end of _JIT_EMIT_VARIABLE_H_ */ diff --git a/core/iwasm/fast-jit/iwasm_fast_jit.cmake b/core/iwasm/fast-jit/iwasm_fast_jit.cmake new file mode 100644 index 000000000..42e398726 --- /dev/null +++ b/core/iwasm/fast-jit/iwasm_fast_jit.cmake @@ -0,0 +1,18 @@ +# Copyright (C) 2019 Intel Corporation. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + +set (IWASM_FAST_JIT_DIR ${CMAKE_CURRENT_LIST_DIR}) + +add_definitions (-DWASM_ENABLE_FAST_JIT=1) + +include_directories (${IWASM_FAST_JIT_DIR}) + +file (GLOB c_source_jit ${IWASM_FAST_JIT_DIR}/*.c ${IWASM_FAST_JIT_DIR}/fe/*.c) + +if (WAMR_BUILD_TARGET STREQUAL "X86_64" OR WAMR_BUILD_TARGET STREQUAL "AMD_64") + file (GLOB_RECURSE c_source_jit_cg ${IWASM_FAST_JIT_DIR}/cg/x86-64/*.c) +else () + message (FATAL_ERROR "Fast JIT codegen for target ${WAMR_BUILD_TARGET} isn't implemented") +endif () + +set (IWASM_FAST_JIT_SOURCE ${c_source_jit} ${c_source_jit_cg}) diff --git a/core/iwasm/fast-jit/jit_codecache.c b/core/iwasm/fast-jit/jit_codecache.c new file mode 100644 index 000000000..80525041e --- /dev/null +++ b/core/iwasm/fast-jit/jit_codecache.c @@ -0,0 +1,61 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_codecache.h" +#include "mem_alloc.h" +#include "jit_compiler.h" + +static void *code_cache_pool = NULL; +static uint32 code_cache_pool_size = 0; +static mem_allocator_t code_cache_pool_allocator = NULL; + +bool +jit_code_cache_init(uint32 code_cache_size) +{ + int map_prot = MMAP_PROT_READ | MMAP_PROT_WRITE | MMAP_PROT_EXEC; + int map_flags = MMAP_MAP_NONE; + + if (!(code_cache_pool = + os_mmap(NULL, code_cache_size, map_prot, map_flags))) { + return false; + } + + if (!(code_cache_pool_allocator = + mem_allocator_create(code_cache_pool, code_cache_size))) { + os_munmap(code_cache_pool, code_cache_size); + code_cache_pool = NULL; + return false; + } + + code_cache_pool_size = code_cache_size; + return true; +} + +void +jit_code_cache_destroy() +{ + mem_allocator_destroy(code_cache_pool_allocator); + os_munmap(code_cache_pool, code_cache_pool_size); +} + +void * +jit_code_cache_malloc(uint32 size) +{ + return mem_allocator_malloc(code_cache_pool_allocator, size); +} + +void +jit_code_cache_free(void *ptr) +{ + if (ptr) + mem_allocator_free(code_cache_pool_allocator, ptr); +} + +bool +jit_pass_register_jitted_code(JitCompContext *cc) +{ + /* TODO */ + return false; +} diff --git a/core/iwasm/fast-jit/jit_codecache.h b/core/iwasm/fast-jit/jit_codecache.h new file mode 100644 index 000000000..953026ad4 --- /dev/null +++ b/core/iwasm/fast-jit/jit_codecache.h @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_CODE_CACHE_H_ +#define _JIT_CODE_CACHE_H_ + +#include "bh_platform.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_code_cache_init(uint32 code_cache_size); + +void +jit_code_cache_destroy(); + +void * +jit_code_cache_alloc(uint32 size); + +void +jit_code_cache_free(void *ptr); + +#ifdef __cplusplus +} +#endif + +#endif /* end of _JIT_CODE_CACHE_H_ */ diff --git a/core/iwasm/fast-jit/jit_codegen.c b/core/iwasm/fast-jit/jit_codegen.c new file mode 100644 index 000000000..2f0f90b91 --- /dev/null +++ b/core/iwasm/fast-jit/jit_codegen.c @@ -0,0 +1,26 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_compiler.h" +#include "jit_codegen.h" + +bool +jit_pass_lower_cg(JitCompContext *cc) +{ + return jit_codegen_lower(cc); +} + +bool +jit_pass_codegen(JitCompContext *cc) +{ +#if 0 + bh_assert(jit_annl_is_enabled_next_label(cc)); + + if (!jit_annl_enable_jitted_addr(cc)) + return false; +#endif + + return jit_codegen_gen_native(cc); +} diff --git a/core/iwasm/fast-jit/jit_codegen.h b/core/iwasm/fast-jit/jit_codegen.h new file mode 100644 index 000000000..f7209e5c9 --- /dev/null +++ b/core/iwasm/fast-jit/jit_codegen.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_CODEGEN_H_ +#define _JIT_CODEGEN_H_ + +#include "bh_platform.h" +#include "jit_ir.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Initialize codegen module, such as instruction encoder. + * + * @return true if succeeded; false if failed. + */ +bool +jit_codegen_init(); + +/** + * Destroy codegen module, such as instruction encoder. + */ +void +jit_codegen_destroy(); + +/** + * Get hard register information of each kind. + * + * @return the JitHardRegInfo array of each kind + */ +const JitHardRegInfo * +jit_codegen_get_hreg_info(); + +/** + * Generate native code for the given compilation context + * + * @param cc the compilation context that is ready to do codegen + * + * @return true if succeeds, false otherwise + */ +bool +jit_codegen_gen_native(JitCompContext *cc); + +/** + * lower unsupported operations to supported ones for the target. + * + * @param cc the compilation context that is ready to do codegen + * + * @return true if succeeds, false otherwise + */ +bool +jit_codegen_lower(JitCompContext *cc); + +/** + * Dump native code in the given range to assembly. + * + * @param begin_addr begin address of the native code + * @param end_addr end address of the native code + */ +void +jit_codegen_dump_native(void *begin_addr, void *end_addr); + +/** + * Call jitted code + * + * @param exec_env the current exec_env + */ +bool +jit_codegen_call_func_jitted(void *exec_env, void *frame, void *func_inst, + void *target); + +#ifdef __cplusplus +} +#endif + +#endif /* end of _JIT_CODEGEN_H_ */ diff --git a/core/iwasm/fast-jit/jit_compiler.c b/core/iwasm/fast-jit/jit_compiler.c new file mode 100644 index 000000000..db0f1952a --- /dev/null +++ b/core/iwasm/fast-jit/jit_compiler.c @@ -0,0 +1,196 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_compiler.h" +#include "jit_ir.h" +#include "jit_codegen.h" +#include "jit_codecache.h" +#include "../interpreter/wasm.h" + +typedef struct JitGlobals { + /* Compiler pass sequence. The last element must be 0. */ + const uint8 *passes; + /* Code cache size. */ + uint32 code_cache_size; +} JitGlobals; + +typedef struct JitCompilerPass { + /* Name of the pass. */ + const char *name; + /* The entry of the compiler pass. */ + bool (*run)(JitCompContext *cc); +} JitCompilerPass; + +/* clang-format off */ +static JitCompilerPass compiler_passes[] = { + { NULL, NULL }, +#define REG_PASS(name) { #name, jit_pass_##name } + REG_PASS(dump), + REG_PASS(update_cfg), + REG_PASS(frontend), + REG_PASS(lower_fe), + REG_PASS(lower_cg), + REG_PASS(regalloc), + REG_PASS(codegen), + REG_PASS(register_jitted_code) +#undef REG_PASS +}; + +/* Number of compiler passes. */ +#define COMPILER_PASS_NUM (sizeof(compiler_passes) / sizeof(compiler_passes[0])) + +#define WASM_ENABLE_FAST_JIT_DUMP 1 + +#if WASM_ENABLE_FAST_JIT_DUMP == 0 +static const uint8 compiler_passes_without_dump[] = { + 3, 4, 5, 6, 7, 8, 0 +}; +#else +static const uint8 compiler_passes_with_dump[] = { + 3, 2, 1, 4, 1, 5, 1, 6, 1, 7, 1, 8, 0 +}; +#endif + +/* The exported global data of JIT compiler. */ +JitGlobals jit_globals = { +#if WASM_ENABLE_FAST_JIT_DUMP == 0 + .passes = compiler_passes_without_dump, +#else + .passes = compiler_passes_with_dump, +#endif + .code_cache_size = 10 * 1024 * 1024 +}; +/* clang-format on */ + +static bool +apply_compiler_passes(JitCompContext *cc) +{ + const uint8 *p = jit_globals.passes; + + for (; *p; p++) { + /* Set the pass NO. */ + cc->cur_pass_no = p - jit_globals.passes; + bh_assert(*p < COMPILER_PASS_NUM); + + if (!compiler_passes[*p].run(cc)) { + LOG_VERBOSE("JIT: compilation failed at pass[%d] = %s\n", + p - jit_globals.passes, compiler_passes[*p].name); + return false; + } + } + + return true; +} + +bool +jit_compiler_init() +{ + /* TODO: get code cache size with global configs */ + if (!jit_code_cache_init(jit_globals.code_cache_size)) + return false; + + if (!jit_codegen_init()) + goto fail1; + + return true; + +fail1: + jit_code_cache_destroy(); + return false; +} + +void +jit_compiler_destroy() +{ + jit_codegen_destroy(); + + jit_code_cache_destroy(); +} + +const char * +jit_compiler_get_pass_name(unsigned i) +{ + return i < COMPILER_PASS_NUM ? compiler_passes[i].name : NULL; +} + +bool +jit_compiler_compile(WASMModule *module, uint32 func_idx) +{ + JitCompContext *cc; + bool ret = true; + + /* Initialize compilation context. */ + if (!(cc = jit_calloc(sizeof(*cc)))) + return false; + + if (!jit_cc_init(cc, 64)) { + jit_free(cc); + return false; + } + + cc->cur_wasm_module = module; + cc->cur_wasm_func = module->functions[func_idx]; + cc->cur_wasm_func_idx = func_idx; + cc->mem_space_unchanged = (!cc->cur_wasm_func->has_op_memory_grow + && !cc->cur_wasm_func->has_op_func_call) + || (!module->possible_memory_grow); + + /* Apply compiler passes. */ + if (!apply_compiler_passes(cc)) { + os_printf("fast jit compilation failed: %s\n", jit_get_last_error(cc)); + ret = false; + } + + /* Delete the compilation context. */ + jit_cc_delete(cc); + + return ret; +} + +bool +jit_compiler_compile_all(WASMModule *module) +{ + JitCompContext *cc; + bool ret = false; + uint32 i; + + /* Initialize compilation context. */ + if (!(cc = jit_calloc(sizeof(*cc)))) + return false; + + if (!jit_cc_init(cc, 64)) { + jit_free(cc); + return false; + } + + for (i = 0; i < module->function_count; i++) { + cc->cur_wasm_module = module; + cc->cur_wasm_func = module->functions[i]; + cc->cur_wasm_func_idx = i; + cc->mem_space_unchanged = (!cc->cur_wasm_func->has_op_memory_grow + && !cc->cur_wasm_func->has_op_func_call) + || (!module->possible_memory_grow); + + /* Apply compiler passes. */ + if (!apply_compiler_passes(cc)) { + os_printf("fast jit compilation failed: %s\n", + jit_get_last_error(cc)); + ret = false; + break; + } + } + + /* Delete the compilation context. */ + jit_cc_delete(cc); + + return ret; +} + +bool +jit_interp_switch_to_jitted(void *exec_env, void *frame, + WASMFunctionInstance *func_inst, void *target) +{ + return jit_codegen_call_func_jitted(exec_env, func_inst, frame, target); +} diff --git a/core/iwasm/fast-jit/jit_compiler.h b/core/iwasm/fast-jit/jit_compiler.h new file mode 100644 index 000000000..d9e2e16d1 --- /dev/null +++ b/core/iwasm/fast-jit/jit_compiler.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_COMPILER_H_ +#define _JIT_COMPILER_H_ + +#include "bh_platform.h" +#include "../interpreter/wasm_runtime.h" +#include "jit_ir.h" + +#ifdef __cplusplus +extern "C" { +#endif + +bool +jit_compiler_init(); + +void +jit_compiler_destroy(); + +const char * +jit_compiler_get_pass_name(unsigned i); + +bool +jit_compiler_compile(WASMModule *module, uint32 func_idx); + +bool +jit_compiler_compile_all(WASMModule *module); + +bool +jit_interp_switch_to_jitted(void *exec_env, void *frame, + WASMFunctionInstance *func_inst, void *target); + +/* + * Pass declarations: + */ + +/** + * Dump the compilation context. + */ +bool +jit_pass_dump(JitCompContext *cc); + +/** + * Update CFG (usually before dump for better readability). + */ +bool +jit_pass_update_cfg(JitCompContext *cc); + +/** + * Translate profiling result into MIR. + */ +bool +jit_pass_frontend(JitCompContext *cc); + +/** + * Convert MIR to LIR. + */ +bool +jit_pass_lower_fe(JitCompContext *cc); + +/** + * Lower unsupported operations into supported ones. + */ +bool +jit_pass_lower_cg(JitCompContext *cc); + +/** + * Register allocation. + */ +bool +jit_pass_regalloc(JitCompContext *cc); + +/** + * Native code generation. + */ +bool +jit_pass_codegen(JitCompContext *cc); + +/** + * Register the jitted code so that it can be executed. + */ +bool +jit_pass_register_jitted_code(JitCompContext *cc); + +#ifdef __cplusplus +} +#endif + +#endif /* end of _JIT_COMPILER_H_ */ diff --git a/core/iwasm/fast-jit/jit_dump.c b/core/iwasm/fast-jit/jit_dump.c new file mode 100644 index 000000000..884ec8c25 --- /dev/null +++ b/core/iwasm/fast-jit/jit_dump.c @@ -0,0 +1,322 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_dump.h" +#include "jit_compiler.h" +#include "jit_codegen.h" + +void +jit_dump_reg(JitCompContext *cc, JitReg reg) +{ + unsigned kind = jit_reg_kind(reg); + unsigned no = jit_reg_no(reg); + + switch (kind) { + case JIT_REG_KIND_VOID: + os_printf("VOID"); + break; + + case JIT_REG_KIND_I32: + if (jit_reg_is_const(reg)) { + unsigned rel = jit_cc_get_const_I32_rel(cc, reg); + + os_printf("0x%x", jit_cc_get_const_I32(cc, reg)); + + if (rel) + os_printf("(rel: 0x%x)", rel); + } + else + os_printf("i%d", no); + break; + + case JIT_REG_KIND_I64: + if (jit_reg_is_const(reg)) + os_printf("0x%llxL", jit_cc_get_const_I64(cc, reg)); + else + os_printf("I%d", no); + break; + + case JIT_REG_KIND_F32: + if (jit_reg_is_const(reg)) + os_printf("%f", jit_cc_get_const_F32(cc, reg)); + else + os_printf("f%d", no); + break; + + case JIT_REG_KIND_F64: + if (jit_reg_is_const(reg)) + os_printf("%fL", jit_cc_get_const_F64(cc, reg)); + else + os_printf("D%d", no); + break; + + case JIT_REG_KIND_L32: + os_printf("L%d", no); + break; + + default: + bh_assert(!"Unsupported register kind."); + } +} + +static void +jit_dump_insn_Reg(JitCompContext *cc, JitInsn *insn, unsigned opnd_num) +{ + unsigned i; + + for (i = 0; i < opnd_num; i++) { + os_printf(i == 0 ? " " : ", "); + jit_dump_reg(cc, *(jit_insn_opnd(insn, i))); + } + + os_printf("\n"); +} + +static void +jit_dump_insn_VReg(JitCompContext *cc, JitInsn *insn, unsigned opnd_num) +{ + unsigned i; + + opnd_num = jit_insn_opndv_num(insn); + + for (i = 0; i < opnd_num; i++) { + os_printf(i == 0 ? " " : ", "); + jit_dump_reg(cc, *(jit_insn_opndv(insn, i))); + } + + os_printf("\n"); +} + +static void +jit_dump_insn_TableSwitch(JitCompContext *cc, JitInsn *insn, unsigned opnd_num) +{ + int i; + JitOpndTableSwitch *opnd = jit_insn_opndts(insn); + + os_printf(" "); + jit_dump_reg(cc, opnd->value); + os_printf("\n%16s: ", "default"); + jit_dump_reg(cc, opnd->default_target); + os_printf("\n"); + + for (i = opnd->low_value; i <= opnd->high_value; i++) { + os_printf("%18d: ", i); + jit_dump_reg(cc, opnd->targets[i - opnd->low_value]); + os_printf("\n"); + } +} + +static void +jit_dump_insn_LookupSwitch(JitCompContext *cc, JitInsn *insn, unsigned opnd_num) +{ + unsigned i; + JitOpndLookupSwitch *opnd = jit_insn_opndls(insn); + + os_printf(" "); + jit_dump_reg(cc, opnd->value); + os_printf("\n%16s: ", "default"); + jit_dump_reg(cc, opnd->default_target); + os_printf("\n"); + + for (i = 0; i < opnd->match_pairs_num; i++) { + os_printf("%18d: ", opnd->match_pairs[i].value); + jit_dump_reg(cc, opnd->match_pairs[i].target); + os_printf("\n"); + } +} + +void +jit_dump_insn(JitCompContext *cc, JitInsn *insn) +{ + switch (insn->opcode) { +#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) \ + case JIT_OP_##NAME: \ + os_printf(" %-15s", #NAME); \ + jit_dump_insn_##OPND_KIND(cc, insn, OPND_NUM); \ + break; +#include "jit_ir.def" +#undef INSN + } +} + +void +jit_dump_basic_block(JitCompContext *cc, JitBasicBlock *block) +{ + unsigned i; + JitInsn *insn; + JitRegVec preds = jit_basic_block_preds(block); + JitRegVec succs = jit_basic_block_succs(block); + JitReg label = jit_basic_block_label(block); + JitReg *reg; + + jit_dump_reg(cc, label); + os_printf(":\n ; PREDS("); + + JIT_REG_VEC_FOREACH(preds, i, reg) + { + if (i > 0) + os_printf(" "); + jit_dump_reg(cc, *reg); + } + + os_printf(")\n ;"); + + if (jit_annl_is_enabled_begin_bcip(cc)) + os_printf(" BEGIN_BCIP=0x%04x", + *(jit_annl_begin_bcip(cc, label)) + - (uint8 *)cc->cur_wasm_module->load_addr); + + if (jit_annl_is_enabled_end_bcip(cc)) + os_printf(" END_BCIP=0x%04x", + *(jit_annl_end_bcip(cc, label)) + - (uint8 *)cc->cur_wasm_module->load_addr); + os_printf("\n"); + + if (jit_annl_is_enabled_jitted_addr(cc)) + /* Dump assembly. */ + jit_codegen_dump_native( + *(jit_annl_jitted_addr(cc, label)), + label != cc->exit_label + ? *(jit_annl_jitted_addr(cc, *(jit_annl_next_label(cc, label)))) + : cc->jitted_addr_end); + else + /* Dump IR. */ + JIT_FOREACH_INSN(block, insn) + jit_dump_insn(cc, insn); + + os_printf(" ; SUCCS("); + + JIT_REG_VEC_FOREACH(succs, i, reg) + { + if (i > 0) + os_printf(" "); + jit_dump_reg(cc, *reg); + } + + os_printf(")\n\n"); +} + +static void +dump_func_name(JitCompContext *cc) +{ + const char *func_name = NULL; + WASMModule *module = cc->cur_wasm_module; + +#if WASM_ENABLE_CUSTOM_NAME_SECTION != 0 + func_name = func->field_name; +#endif + + /* if custom name section is not generated, + search symbols from export table */ + if (!func_name) { + uint32 i; + for (i = 0; i < module->export_count; i++) { + if (module->exports[i].kind == EXPORT_KIND_FUNC + && module->exports[i].index == cc->cur_wasm_func_idx) { + func_name = module->exports[i].name; + break; + } + } + } + + /* function name not exported, print number instead */ + if (func_name == NULL) { + os_printf("$f%d", cc->cur_wasm_func_idx); + } + else { + os_printf("%s", func_name); + } +} + +static void +dump_cc_ir(JitCompContext *cc) +{ + unsigned i, end; + JitBasicBlock *block; + JitReg label; + const char *kind_names[] = { "VOID", "I32", "I64", "F32", + "F64", "V64", "V128", "V256" }; + + os_printf("; Function: "); + dump_func_name(cc); + os_printf("\n"); + + os_printf("; Constant table sizes:"); + + for (i = 0; i < JIT_REG_KIND_L32; i++) + os_printf(" %s=%d", kind_names[i], cc->_const_val._num[i]); + + os_printf("\n; Label number: %d", jit_cc_label_num(cc)); + os_printf("\n; Instruction number: %d", jit_cc_insn_num(cc)); + os_printf("\n; Register numbers:"); + + for (i = 0; i < JIT_REG_KIND_L32; i++) + os_printf(" %s=%d", kind_names[i], jit_cc_reg_num(cc, i)); + + os_printf("\n; Label annotations:"); +#define ANN_LABEL(TYPE, NAME) \ + if (jit_annl_is_enabled_##NAME(cc)) \ + os_printf(" %s", #NAME); +#include "jit_ir.def" +#undef ANN_LABEL + + os_printf("\n; Instruction annotations:"); +#define ANN_INSN(TYPE, NAME) \ + if (jit_anni_is_enabled_##NAME(cc)) \ + os_printf(" %s", #NAME); +#include "jit_ir.def" +#undef ANN_INSN + + os_printf("\n; Register annotations:"); +#define ANN_REG(TYPE, NAME) \ + if (jit_annr_is_enabled_##NAME(cc)) \ + os_printf(" %s", #NAME); +#include "jit_ir.def" +#undef ANN_REG + + os_printf("\n\n"); + + if (jit_annl_is_enabled_next_label(cc)) + /* Blocks have been reordered, use that order to dump. */ + for (label = cc->entry_label; label; + label = *(jit_annl_next_label(cc, label))) + jit_dump_basic_block(cc, *(jit_annl_basic_block(cc, label))); + else + /* Otherwise, use the default order. */ + { + jit_dump_basic_block(cc, jit_cc_entry_basic_block(cc)); + + JIT_FOREACH_BLOCK(cc, i, end, block) + jit_dump_basic_block(cc, block); + + jit_dump_basic_block(cc, jit_cc_exit_basic_block(cc)); + } +} + +void +jit_dump_cc(JitCompContext *cc) +{ + if (jit_cc_label_num(cc) <= 2) + return; + + dump_cc_ir(cc); +} + +bool +jit_pass_dump(JitCompContext *cc) +{ + os_printf("JIT.COMPILER.DUMP: PASS_NO=%d PREV_PASS=%s\n\n", cc->cur_pass_no, + (cc->cur_pass_no > 0 ? jit_compiler_get_pass_name(cc->cur_pass_no) + : "NULL")); + jit_dump_cc(cc); + os_printf("\n"); + return true; +} + +bool +jit_pass_update_cfg(JitCompContext *cc) +{ + return jit_cc_update_cfg(cc); +} diff --git a/core/iwasm/fast-jit/jit_dump.h b/core/iwasm/fast-jit/jit_dump.h new file mode 100644 index 000000000..8e572b88d --- /dev/null +++ b/core/iwasm/fast-jit/jit_dump.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_DUMP_H_ +#define _JIT_DUMP_H_ + +#include "jit_compiler.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Dump a register. + * + * @param cc compilation context of the register + * @param reg register to be dumped + */ +void +jit_dump_reg(JitCompContext *cc, JitReg reg); + +/** + * Dump an instruction. + * + * @param cc compilation context of the instruction + * @param insn instruction to be dumped + */ +void +jit_dump_insn(JitCompContext *cc, JitInsn *insn); + +/** + * Dump a block. + * + * @param cc compilation context of the block + * @param block block to be dumped + */ +void +jit_dump_block(JitCompContext *cc, JitBlock *block); + +/** + * Dump a compilation context. + * + * @param cc compilation context to be dumped + */ +void +jit_dump_cc(JitCompContext *cc); + +#ifdef __cplusplus +} +#endif + +#endif /* end of _JIT_DUMP_H_ */ diff --git a/core/iwasm/fast-jit/jit_frontend.c b/core/iwasm/fast-jit/jit_frontend.c new file mode 100644 index 000000000..0a1379bed --- /dev/null +++ b/core/iwasm/fast-jit/jit_frontend.c @@ -0,0 +1,1580 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_compiler.h" +#include "jit_frontend.h" +#include "fe/jit_emit_compare.h" +#include "fe/jit_emit_const.h" +#include "fe/jit_emit_control.h" +#include "fe/jit_emit_conversion.h" +#include "fe/jit_emit_exception.h" +#include "fe/jit_emit_function.h" +#include "fe/jit_emit_memory.h" +#include "fe/jit_emit_numberic.h" +#include "fe/jit_emit_parametric.h" +#include "fe/jit_emit_table.h" +#include "fe/jit_emit_variable.h" +#include "../interpreter/wasm_interp.h" +#include "../interpreter/wasm_opcode.h" +#include "../common/wasm_exec_env.h" + +JitReg +gen_load_i32(JitFrame *frame, unsigned n) +{ + if (!frame->lp[n].reg) { + JitCompContext *cc = frame->cc; + frame->lp[n].reg = jit_cc_new_reg_I32(cc); + GEN_INSN(LDI32, frame->lp[n].reg, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + } + + return frame->lp[n].reg; +} + +JitReg +gen_load_i64(JitFrame *frame, unsigned n) +{ + if (!frame->lp[n].reg) { + JitCompContext *cc = frame->cc; + frame->lp[n].reg = frame->lp[n + 1].reg = jit_cc_new_reg_I64(cc); + GEN_INSN(LDI64, frame->lp[n].reg, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + } + + return frame->lp[n].reg; +} + +JitReg +gen_load_f32(JitFrame *frame, unsigned n) +{ + if (!frame->lp[n].reg) { + JitCompContext *cc = frame->cc; + frame->lp[n].reg = jit_cc_new_reg_F32(cc); + GEN_INSN(LDF32, frame->lp[n].reg, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + } + + return frame->lp[n].reg; +} + +JitReg +gen_load_f64(JitFrame *frame, unsigned n) +{ + if (!frame->lp[n].reg) { + JitCompContext *cc = frame->cc; + frame->lp[n].reg = frame->lp[n + 1].reg = jit_cc_new_reg_F64(cc); + GEN_INSN(LDF64, frame->lp[n].reg, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + } + + return frame->lp[n].reg; +} + +void +gen_commit_values(JitFrame *frame, JitValueSlot *begin, JitValueSlot *end) +{ + JitCompContext *cc = frame->cc; + JitValueSlot *p; + int n; + + for (p = begin; p < end; p++) { + if (!p->dirty) + continue; + + p->dirty = 0; + n = p - frame->lp; + + switch (jit_reg_kind(p->reg)) { + case JIT_REG_KIND_I32: + GEN_INSN(STI32, p->reg, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + break; + + case JIT_REG_KIND_I64: + GEN_INSN(STI64, p->reg, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + (++p)->dirty = 0; + break; + + case JIT_REG_KIND_F32: + GEN_INSN(STF32, p->reg, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + break; + + case JIT_REG_KIND_F64: + GEN_INSN(STF64, p->reg, cc->fp_reg, + NEW_CONST(I32, offset_of_local(n))); + (++p)->dirty = 0; + break; + } + } +} + +/** + * Generate instructions to commit SP and IP pointers to the frame. + * + * @param frame the frame information + */ +void +gen_commit_sp_ip(JitFrame *frame) +{ + JitCompContext *cc = frame->cc; + JitReg sp; + + if (frame->sp != frame->committed_sp) { +#if UINTPTR_MAX == UINT32_MAX + GEN_INSN_NORM(I32, sp, ADD, 0, cc->fp_reg, + NEW_CONST(I32, offset_of_local(frame->sp - frame->lp))); + GEN_INSN(STI32, sp, cc->fp_reg, + NEW_CONST(I32, offsetof(WASMInterpFrame, sp))); +#else + GEN_INSN_NORM(I64, sp, ADD, 0, cc->fp_reg, + NEW_CONST(I32, offset_of_local(frame->sp - frame->lp))); + GEN_INSN(STI64, sp, cc->fp_reg, + NEW_CONST(I32, offsetof(WASMInterpFrame, sp))); +#endif + frame->committed_sp = frame->sp; + } + +#if 0 + if (frame->ip != frame->committed_ip) { + GEN_INSN (STI32, + NEW_REL (BCIP, NONE, offset_of_addr (frame, frame->ip), frame->ip), + cc->fp_reg, + NEW_CONST (I32, offsetof (WASMInterpFrame, ip))); + frame->committed_ip = frame->ip; + } +#endif +} + +static bool +form_and_translate_func(JitCompContext *cc) +{ + JitBasicBlock *func_entry_basic_block; + JitReg func_entry_label; + JitInsn *jmp_insn; + + if (!(func_entry_basic_block = jit_frontend_translate_func(cc))) + return false; + + jit_cc_reset_insn_hash(cc); + + /* The label of the func entry basic block. */ + func_entry_label = jit_basic_block_label(func_entry_basic_block); + + /* Create a JMP instruction jumping to the func entry. */ + if (!(jmp_insn = jit_cc_new_insn(cc, JMP, func_entry_label))) + return false; + + /* Insert the instruction into the cc entry block. */ + jit_basic_block_append_insn(jit_cc_entry_basic_block(cc), jmp_insn); + + *(jit_annl_begin_bcip(cc, cc->entry_label)) = + *(jit_annl_end_bcip(cc, cc->entry_label)) = + *(jit_annl_begin_bcip(cc, cc->exit_label)) = + *(jit_annl_end_bcip(cc, cc->exit_label)) = + cc->cur_wasm_module->load_addr; + + return true; +} + +bool +jit_pass_frontend(JitCompContext *cc) +{ + /* Enable necessary annotations required at the current stage. */ + if (!jit_annl_enable_begin_bcip(cc) || !jit_annl_enable_end_bcip(cc) + || !jit_annl_enable_end_sp(cc) || !jit_annr_enable_def_insn(cc) + || !jit_cc_enable_insn_hash(cc, 127)) + return false; + + if (!(form_and_translate_func(cc))) + return false; + + /* Release the annotations after local CSE and translation. */ + jit_cc_disable_insn_hash(cc); + jit_annl_disable_end_sp(cc); + + return true; +} + +bool +jit_pass_lower_fe(JitCompContext *cc) +{ + return true; +} + +static JitFrame * +init_func_translation(JitCompContext *cc) +{ + JitFrame *jit_frame; + JitReg top, top_boundary, new_top, frame_boundary, frame_sp; + WASMModule *cur_wasm_module = cc->cur_wasm_module; + WASMFunction *cur_wasm_func = cc->cur_wasm_func; + uint32 cur_wasm_func_idx = cc->cur_wasm_func_idx; + uint32 max_locals = + cur_wasm_func->param_cell_num + cur_wasm_func->local_cell_num; + uint32 max_stacks = cur_wasm_func->max_stack_cell_num; + uint64 total_cell_num = + (uint64)cur_wasm_func->param_cell_num + + (uint64)cur_wasm_func->local_cell_num + + (uint64)cur_wasm_func->max_stack_cell_num + + ((uint64)cur_wasm_func->max_block_num) * sizeof(WASMBranchBlock) / 4; + uint32 frame_size, outs_size, local_size; + + if ((uint64)max_locals + (uint64)max_stacks >= UINT32_MAX + || total_cell_num >= UINT32_MAX + || !(jit_frame = jit_calloc(offsetof(JitFrame, lp) + + sizeof(*jit_frame->lp) + * (max_locals + max_stacks)))) { + os_printf("allocate jit frame failed\n"); + return NULL; + } + + jit_frame->cur_wasm_module = cur_wasm_module; + jit_frame->cur_wasm_func = cur_wasm_func; + jit_frame->cur_wasm_func_idx = cur_wasm_func_idx; + jit_frame->cc = cc; + jit_frame->max_locals = max_locals; + jit_frame->max_stacks = max_stacks; + jit_frame->sp = jit_frame->lp + max_locals; + + cc->jit_frame = jit_frame; + cc->cur_basic_block = jit_cc_entry_basic_block(cc); + cc->total_frame_size = wasm_interp_interp_frame_size(total_cell_num); + cc->jitted_return_address_offset = + offsetof(WASMInterpFrame, jitted_return_addr); + cc->cur_basic_block = jit_cc_entry_basic_block(cc); + + frame_size = outs_size = cc->total_frame_size; + local_size = + (cur_wasm_func->param_cell_num + cur_wasm_func->local_cell_num) * 4; + +#if UINTPTR_MAX == UINT64_MAX + top = jit_cc_new_reg_I64(cc); + top_boundary = jit_cc_new_reg_I64(cc); + new_top = jit_cc_new_reg_I64(cc); + frame_boundary = jit_cc_new_reg_I64(cc); + frame_sp = jit_cc_new_reg_I64(cc); + + /* top = exec_env->wasm_stack.s.top */ + GEN_INSN(LDI64, top, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, wasm_stack.s.top))); + /* top_boundary = exec_env->wasm_stack.s.top_boundary */ + GEN_INSN(LDI64, top_boundary, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, wasm_stack.s.top_boundary))); + /* frame_boundary = top + frame_size + outs_size */ + GEN_INSN(ADD, frame_boundary, top, NEW_CONST(I32, frame_size + outs_size)); + GEN_INSN(CHECK_SOE, NEW_CONST(I32, 0), frame_boundary, top_boundary); + + /* Add first and then sub to reduce one used register. */ + /* new_top = frame_boundary - outs_size = top + frame_size */ + GEN_INSN(SUB, new_top, frame_boundary, NEW_CONST(I32, outs_size)); + /* exec_env->wasm_stack.s.top = new_top */ + GEN_INSN(STI64, new_top, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, wasm_stack.s.top))); + /* frame_sp = frame->lp + local_size */ + GEN_INSN(ADD, frame_sp, top, + NEW_CONST(I32, offsetof(WASMInterpFrame, lp) + local_size)); + /* frame->sp = frame_sp */ + GEN_INSN(STI64, frame_sp, top, + NEW_CONST(I32, offsetof(WASMInterpFrame, sp))); + /* frame->prev_frame = fp_reg */ + GEN_INSN(STI64, cc->fp_reg, top, + NEW_CONST(I32, offsetof(WASMInterpFrame, prev_frame))); + /* + GEN_INSN(STI64, func_inst, top, + NEW_CONST(I32, offsetof(WASMInterpFrame, function))); + */ + /* exec_env->cur_frame = top */ + GEN_INSN(STI64, top, cc->exec_env_reg, + NEW_CONST(I32, offsetof(WASMExecEnv, cur_frame))); + /* fp_reg = top */ + GEN_INSN(MOV, cc->fp_reg, top); +#else +#endif + + return jit_frame; +} + +static void +free_block_memory(JitBlock *block) +{ + if (block->param_types) + jit_free(block->param_types); + if (block->result_types) + jit_free(block->result_types); + jit_free(block); +} + +static JitBlock * +create_func_block(JitCompContext *cc) +{ + JitBlock *jit_block; + WASMFunction *cur_func = cc->cur_wasm_func; + WASMType *func_type = cur_func->func_type; + uint32 param_count = func_type->param_count; + uint32 result_count = func_type->result_count; + + if (!(jit_block = jit_calloc(sizeof(JitBlock)))) { + return NULL; + } + + if (param_count && !(jit_block->param_types = jit_calloc(param_count))) { + goto fail; + } + if (result_count && !(jit_block->result_types = jit_calloc(result_count))) { + goto fail; + } + + /* Set block data */ + jit_block->label_type = LABEL_TYPE_FUNCTION; + jit_block->param_count = param_count; + if (param_count) { + bh_memcpy_s(jit_block->param_types, param_count, func_type->types, + param_count); + } + jit_block->result_count = result_count; + if (result_count) { + bh_memcpy_s(jit_block->result_types, result_count, + func_type->types + param_count, result_count); + } + jit_block->wasm_code_end = cur_func->code + cur_func->code_size; + jit_block->frame_sp_begin = cc->jit_frame->sp; + + /* Add function entry block */ + if (!(jit_block->basic_block_entry = jit_cc_new_basic_block(cc, 0))) { + goto fail; + } + *(jit_annl_begin_bcip( + cc, jit_basic_block_label(jit_block->basic_block_entry))) = + cur_func->code; + jit_block_stack_push(&cc->block_stack, jit_block); + cc->cur_basic_block = jit_block->basic_block_entry; + + return jit_block; + +fail: + free_block_memory(jit_block); + return NULL; +} + +#define CHECK_BUF(buf, buf_end, length) \ + do { \ + if (buf + length > buf_end) { \ + jit_set_last_error(cc, "read leb failed: unexpected end."); \ + return false; \ + } \ + } while (0) + +static bool +read_leb(JitCompContext *cc, const uint8 *buf, const uint8 *buf_end, + uint32 *p_offset, uint32 maxbits, bool sign, uint64 *p_result) +{ + uint64 result = 0; + uint32 shift = 0; + uint32 bcnt = 0; + uint64 byte; + + while (true) { + CHECK_BUF(buf, buf_end, 1); + byte = buf[*p_offset]; + *p_offset += 1; + result |= ((byte & 0x7f) << shift); + shift += 7; + if ((byte & 0x80) == 0) { + break; + } + bcnt += 1; + } + if (bcnt > (maxbits + 6) / 7) { + jit_set_last_error(cc, "read leb failed: " + "integer representation too long"); + return false; + } + if (sign && (shift < maxbits) && (byte & 0x40)) { + /* Sign extend */ + result |= (~((uint64)0)) << shift; + } + *p_result = result; + return true; +} + +#define read_leb_uint32(p, p_end, res) \ + do { \ + uint32 off = 0; \ + uint64 res64; \ + if (!read_leb(cc, p, p_end, &off, 32, false, &res64)) \ + return false; \ + p += off; \ + res = (uint32)res64; \ + } while (0) + +#define read_leb_int32(p, p_end, res) \ + do { \ + uint32 off = 0; \ + uint64 res64; \ + if (!read_leb(cc, p, p_end, &off, 32, true, &res64)) \ + return false; \ + p += off; \ + res = (int32)res64; \ + } while (0) + +#define read_leb_int64(p, p_end, res) \ + do { \ + uint32 off = 0; \ + uint64 res64; \ + if (!read_leb(cc, p, p_end, &off, 64, true, &res64)) \ + return false; \ + p += off; \ + res = (int64)res64; \ + } while (0) + +static bool +jit_compile_func(JitCompContext *cc) +{ + WASMFunction *cur_func = cc->cur_wasm_func; + WASMType *func_type = NULL; + uint8 *frame_ip = cur_func->code, opcode, *p_f32, *p_f64; + uint8 *frame_ip_end = frame_ip + cur_func->code_size; + uint8 *param_types = NULL, *result_types = NULL, value_type; + uint16 param_count, result_count; + uint32 br_depth, *br_depths, br_count; + uint32 func_idx, type_idx, mem_idx, local_idx, global_idx, i; + uint32 bytes = 4, align, offset; + bool sign = true; + int32 i32_const; + int64 i64_const; + float32 f32_const; + float64 f64_const; + + while (frame_ip < frame_ip_end) { + opcode = *frame_ip++; + + switch (opcode) { + case WASM_OP_UNREACHABLE: + if (!jit_compile_op_unreachable(cc, &frame_ip)) + return false; + break; + + case WASM_OP_NOP: + break; + + case WASM_OP_BLOCK: + case WASM_OP_LOOP: + case WASM_OP_IF: + { + value_type = *frame_ip++; + if (value_type == VALUE_TYPE_I32 || value_type == VALUE_TYPE_I64 + || value_type == VALUE_TYPE_F32 + || value_type == VALUE_TYPE_F64 + || value_type == VALUE_TYPE_V128 + || value_type == VALUE_TYPE_VOID + || value_type == VALUE_TYPE_FUNCREF + || value_type == VALUE_TYPE_EXTERNREF) { + param_count = 0; + param_types = NULL; + if (value_type == VALUE_TYPE_VOID) { + result_count = 0; + result_types = NULL; + } + else { + result_count = 1; + result_types = &value_type; + } + } + else { + jit_set_last_error(cc, "unsupported value type"); + return false; + } + if (!jit_compile_op_block( + cc, &frame_ip, frame_ip_end, + (uint32)(LABEL_TYPE_BLOCK + opcode - WASM_OP_BLOCK), + param_count, param_types, result_count, result_types)) + return false; + break; + } + case EXT_OP_BLOCK: + case EXT_OP_LOOP: + case EXT_OP_IF: + { + read_leb_uint32(frame_ip, frame_ip_end, type_idx); + func_type = cc->cur_wasm_module->types[type_idx]; + param_count = func_type->param_count; + param_types = func_type->types; + result_count = func_type->result_count; + result_types = func_type->types + param_count; + if (!jit_compile_op_block( + cc, &frame_ip, frame_ip_end, + (uint32)(LABEL_TYPE_BLOCK + opcode - EXT_OP_BLOCK), + param_count, param_types, result_count, result_types)) + return false; + break; + } + + case WASM_OP_ELSE: + if (!jit_compile_op_else(cc, &frame_ip)) + return false; + break; + + case WASM_OP_END: + if (!jit_compile_op_end(cc, &frame_ip)) + return false; + break; + + case WASM_OP_BR: + read_leb_uint32(frame_ip, frame_ip_end, br_depth); + if (!jit_compile_op_br(cc, br_depth, &frame_ip)) + return false; + break; + + case WASM_OP_BR_IF: + read_leb_uint32(frame_ip, frame_ip_end, br_depth); + if (!jit_compile_op_br_if(cc, br_depth, &frame_ip)) + return false; + break; + + case WASM_OP_BR_TABLE: + read_leb_uint32(frame_ip, frame_ip_end, br_count); + if (!(br_depths = jit_calloc((uint32)sizeof(uint32) + * (br_count + 1)))) { + jit_set_last_error(cc, "allocate memory failed."); + goto fail; + } + for (i = 0; i <= br_count; i++) + read_leb_uint32(frame_ip, frame_ip_end, br_depths[i]); + + if (!jit_compile_op_br_table(cc, br_depths, br_count, + &frame_ip)) { + jit_free(br_depths); + return false; + } + + jit_free(br_depths); + break; + + case WASM_OP_RETURN: + if (!jit_compile_op_return(cc, &frame_ip)) + return false; + break; + + case WASM_OP_CALL_INDIRECT: + { + uint32 tbl_idx; + + read_leb_uint32(frame_ip, frame_ip_end, type_idx); + +#if WASM_ENABLE_REF_TYPES != 0 + if (cc->enable_ref_types) { + read_leb_uint32(frame_ip, frame_ip_end, tbl_idx); + } + else +#endif + { + frame_ip++; + tbl_idx = 0; + } + + if (!jit_compile_op_call_indirect(cc, type_idx, tbl_idx)) + return false; + break; + } + +#if WASM_ENABLE_TAIL_CALL != 0 + case WASM_OP_RETURN_CALL: + if (!cc->enable_tail_call) { + jit_set_last_error(cc, "unsupported opcode"); + return false; + } + read_leb_uint32(frame_ip, frame_ip_end, func_idx); + if (!jit_compile_op_call(cc, func_idx, true)) + return false; + if (!jit_compile_op_return(cc, &frame_ip)) + return false; + break; + + case WASM_OP_RETURN_CALL_INDIRECT: + { + uint32 tbl_idx; + + if (!cc->enable_tail_call) { + jit_set_last_error(cc, "unsupported opcode"); + return false; + } + + read_leb_uint32(frame_ip, frame_ip_end, type_idx); +#if WASM_ENABLE_REF_TYPES != 0 + if (cc->enable_ref_types) { + read_leb_uint32(frame_ip, frame_ip_end, tbl_idx); + } + else +#endif + { + frame_ip++; + tbl_idx = 0; + } + + if (!jit_compile_op_call_indirect(cc, type_idx, tbl_idx)) + return false; + if (!jit_compile_op_return(cc, &frame_ip)) + return false; + break; + } +#endif /* end of WASM_ENABLE_TAIL_CALL */ + + case WASM_OP_DROP: + if (!jit_compile_op_drop(cc, true)) + return false; + break; + + case WASM_OP_DROP_64: + if (!jit_compile_op_drop(cc, false)) + return false; + break; + + case WASM_OP_SELECT: + if (!jit_compile_op_select(cc, true)) + return false; + break; + + case WASM_OP_SELECT_64: + if (!jit_compile_op_select(cc, false)) + return false; + break; + +#if WASM_ENABLE_REF_TYPES != 0 + case WASM_OP_SELECT_T: + { + uint32 vec_len; + + if (!cc->enable_ref_types) { + goto unsupport_ref_types; + } + + read_leb_uint32(frame_ip, frame_ip_end, vec_len); + bh_assert(vec_len == 1); + (void)vec_len; + + type_idx = *frame_ip++; + if (!jit_compile_op_select(cc, + (type_idx != VALUE_TYPE_I64) + && (type_idx != VALUE_TYPE_F64))) + return false; + break; + } + case WASM_OP_TABLE_GET: + { + uint32 tbl_idx; + + if (!cc->enable_ref_types) { + goto unsupport_ref_types; + } + + read_leb_uint32(frame_ip, frame_ip_end, tbl_idx); + if (!jit_compile_op_table_get(cc, tbl_idx)) + return false; + break; + } + case WASM_OP_TABLE_SET: + { + uint32 tbl_idx; + + if (!cc->enable_ref_types) { + goto unsupport_ref_types; + } + + read_leb_uint32(frame_ip, frame_ip_end, tbl_idx); + if (!jit_compile_op_table_set(cc, tbl_idx)) + return false; + break; + } + case WASM_OP_REF_NULL: + { + uint32 type; + + if (!cc->enable_ref_types) { + goto unsupport_ref_types; + } + + read_leb_uint32(frame_ip, frame_ip_end, type); + + if (!jit_compile_op_ref_null(cc)) + return false; + + (void)type; + break; + } + case WASM_OP_REF_IS_NULL: + { + if (!cc->enable_ref_types) { + goto unsupport_ref_types; + } + + if (!jit_compile_op_ref_is_null(cc)) + return false; + break; + } + case WASM_OP_REF_FUNC: + { + if (!cc->enable_ref_types) { + goto unsupport_ref_types; + } + + read_leb_uint32(frame_ip, frame_ip_end, func_idx); + if (!jit_compile_op_ref_func(cc, func_idx)) + return false; + break; + } +#endif + + case WASM_OP_GET_LOCAL: + read_leb_uint32(frame_ip, frame_ip_end, local_idx); + if (!jit_compile_op_get_local(cc, local_idx)) + return false; + break; + + case WASM_OP_SET_LOCAL: + read_leb_uint32(frame_ip, frame_ip_end, local_idx); + if (!jit_compile_op_set_local(cc, local_idx)) + return false; + break; + + case WASM_OP_TEE_LOCAL: + read_leb_uint32(frame_ip, frame_ip_end, local_idx); + if (!jit_compile_op_tee_local(cc, local_idx)) + return false; + break; + + case WASM_OP_GET_GLOBAL: + case WASM_OP_GET_GLOBAL_64: + read_leb_uint32(frame_ip, frame_ip_end, global_idx); + if (!jit_compile_op_get_global(cc, global_idx)) + return false; + break; + + case WASM_OP_SET_GLOBAL: + case WASM_OP_SET_GLOBAL_64: + case WASM_OP_SET_GLOBAL_AUX_STACK: + read_leb_uint32(frame_ip, frame_ip_end, global_idx); + if (!jit_compile_op_set_global( + cc, global_idx, + opcode == WASM_OP_SET_GLOBAL_AUX_STACK ? true : false)) + return false; + break; + + case WASM_OP_I32_LOAD: + bytes = 4; + sign = true; + goto op_i32_load; + case WASM_OP_I32_LOAD8_S: + case WASM_OP_I32_LOAD8_U: + bytes = 1; + sign = (opcode == WASM_OP_I32_LOAD8_S) ? true : false; + goto op_i32_load; + case WASM_OP_I32_LOAD16_S: + case WASM_OP_I32_LOAD16_U: + bytes = 2; + sign = (opcode == WASM_OP_I32_LOAD16_S) ? true : false; + op_i32_load: + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + if (!jit_compile_op_i32_load(cc, align, offset, bytes, sign, + false)) + return false; + break; + + case WASM_OP_I64_LOAD: + bytes = 8; + sign = true; + goto op_i64_load; + case WASM_OP_I64_LOAD8_S: + case WASM_OP_I64_LOAD8_U: + bytes = 1; + sign = (opcode == WASM_OP_I64_LOAD8_S) ? true : false; + goto op_i64_load; + case WASM_OP_I64_LOAD16_S: + case WASM_OP_I64_LOAD16_U: + bytes = 2; + sign = (opcode == WASM_OP_I64_LOAD16_S) ? true : false; + goto op_i64_load; + case WASM_OP_I64_LOAD32_S: + case WASM_OP_I64_LOAD32_U: + bytes = 4; + sign = (opcode == WASM_OP_I64_LOAD32_S) ? true : false; + op_i64_load: + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + if (!jit_compile_op_i64_load(cc, align, offset, bytes, sign, + false)) + return false; + break; + + case WASM_OP_F32_LOAD: + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + if (!jit_compile_op_f32_load(cc, align, offset)) + return false; + break; + + case WASM_OP_F64_LOAD: + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + if (!jit_compile_op_f64_load(cc, align, offset)) + return false; + break; + + case WASM_OP_I32_STORE: + bytes = 4; + goto op_i32_store; + case WASM_OP_I32_STORE8: + bytes = 1; + goto op_i32_store; + case WASM_OP_I32_STORE16: + bytes = 2; + op_i32_store: + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + if (!jit_compile_op_i32_store(cc, align, offset, bytes, false)) + return false; + break; + + case WASM_OP_I64_STORE: + bytes = 8; + goto op_i64_store; + case WASM_OP_I64_STORE8: + bytes = 1; + goto op_i64_store; + case WASM_OP_I64_STORE16: + bytes = 2; + goto op_i64_store; + case WASM_OP_I64_STORE32: + bytes = 4; + op_i64_store: + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + if (!jit_compile_op_i64_store(cc, align, offset, bytes, false)) + return false; + break; + + case WASM_OP_F32_STORE: + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + if (!jit_compile_op_f32_store(cc, align, offset)) + return false; + break; + + case WASM_OP_F64_STORE: + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + if (!jit_compile_op_f64_store(cc, align, offset)) + return false; + break; + + case WASM_OP_MEMORY_SIZE: + read_leb_uint32(frame_ip, frame_ip_end, mem_idx); + if (!jit_compile_op_memory_size(cc)) + return false; + (void)mem_idx; + break; + + case WASM_OP_MEMORY_GROW: + read_leb_uint32(frame_ip, frame_ip_end, mem_idx); + if (!jit_compile_op_memory_grow(cc)) + return false; + break; + + case WASM_OP_I32_CONST: + read_leb_int32(frame_ip, frame_ip_end, i32_const); + if (!jit_compile_op_i32_const(cc, i32_const)) + return false; + break; + + case WASM_OP_I64_CONST: + read_leb_int64(frame_ip, frame_ip_end, i64_const); + if (!jit_compile_op_i64_const(cc, i64_const)) + return false; + break; + + case WASM_OP_F32_CONST: + p_f32 = (uint8 *)&f32_const; + for (i = 0; i < sizeof(float32); i++) + *p_f32++ = *frame_ip++; + if (!jit_compile_op_f32_const(cc, f32_const)) + return false; + break; + + case WASM_OP_F64_CONST: + p_f64 = (uint8 *)&f64_const; + for (i = 0; i < sizeof(float64); i++) + *p_f64++ = *frame_ip++; + if (!jit_compile_op_f64_const(cc, f64_const)) + return false; + break; + + case WASM_OP_I32_EQZ: + case WASM_OP_I32_EQ: + case WASM_OP_I32_NE: + case WASM_OP_I32_LT_S: + case WASM_OP_I32_LT_U: + case WASM_OP_I32_GT_S: + case WASM_OP_I32_GT_U: + case WASM_OP_I32_LE_S: + case WASM_OP_I32_LE_U: + case WASM_OP_I32_GE_S: + case WASM_OP_I32_GE_U: + if (!jit_compile_op_i32_compare(cc, INT_EQZ + opcode + - WASM_OP_I32_EQZ)) + return false; + break; + + case WASM_OP_I64_EQZ: + case WASM_OP_I64_EQ: + case WASM_OP_I64_NE: + case WASM_OP_I64_LT_S: + case WASM_OP_I64_LT_U: + case WASM_OP_I64_GT_S: + case WASM_OP_I64_GT_U: + case WASM_OP_I64_LE_S: + case WASM_OP_I64_LE_U: + case WASM_OP_I64_GE_S: + case WASM_OP_I64_GE_U: + if (!jit_compile_op_i64_compare(cc, INT_EQZ + opcode + - WASM_OP_I64_EQZ)) + return false; + break; + + case WASM_OP_F32_EQ: + case WASM_OP_F32_NE: + case WASM_OP_F32_LT: + case WASM_OP_F32_GT: + case WASM_OP_F32_LE: + case WASM_OP_F32_GE: + if (!jit_compile_op_f32_compare(cc, FLOAT_EQ + opcode + - WASM_OP_F32_EQ)) + return false; + break; + + case WASM_OP_F64_EQ: + case WASM_OP_F64_NE: + case WASM_OP_F64_LT: + case WASM_OP_F64_GT: + case WASM_OP_F64_LE: + case WASM_OP_F64_GE: + if (!jit_compile_op_f64_compare(cc, FLOAT_EQ + opcode + - WASM_OP_F64_EQ)) + return false; + break; + + case WASM_OP_I32_CLZ: + if (!jit_compile_op_i32_clz(cc)) + return false; + break; + + case WASM_OP_I32_CTZ: + if (!jit_compile_op_i32_ctz(cc)) + return false; + break; + + case WASM_OP_I32_POPCNT: + if (!jit_compile_op_i32_popcnt(cc)) + return false; + break; + + case WASM_OP_I32_ADD: + case WASM_OP_I32_SUB: + case WASM_OP_I32_MUL: + case WASM_OP_I32_DIV_S: + case WASM_OP_I32_DIV_U: + case WASM_OP_I32_REM_S: + case WASM_OP_I32_REM_U: + if (!jit_compile_op_i32_arithmetic( + cc, INT_ADD + opcode - WASM_OP_I32_ADD, &frame_ip)) + return false; + break; + + case WASM_OP_I32_AND: + case WASM_OP_I32_OR: + case WASM_OP_I32_XOR: + if (!jit_compile_op_i32_bitwise(cc, INT_SHL + opcode + - WASM_OP_I32_AND)) + return false; + break; + + case WASM_OP_I32_SHL: + case WASM_OP_I32_SHR_S: + case WASM_OP_I32_SHR_U: + case WASM_OP_I32_ROTL: + case WASM_OP_I32_ROTR: + if (!jit_compile_op_i32_shift(cc, INT_SHL + opcode + - WASM_OP_I32_SHL)) + return false; + break; + + case WASM_OP_I64_CLZ: + if (!jit_compile_op_i64_clz(cc)) + return false; + break; + + case WASM_OP_I64_CTZ: + if (!jit_compile_op_i64_ctz(cc)) + return false; + break; + + case WASM_OP_I64_POPCNT: + if (!jit_compile_op_i64_popcnt(cc)) + return false; + break; + + case WASM_OP_I64_ADD: + case WASM_OP_I64_SUB: + case WASM_OP_I64_MUL: + case WASM_OP_I64_DIV_S: + case WASM_OP_I64_DIV_U: + case WASM_OP_I64_REM_S: + case WASM_OP_I64_REM_U: + if (!jit_compile_op_i64_arithmetic( + cc, INT_ADD + opcode - WASM_OP_I64_ADD, &frame_ip)) + return false; + break; + + case WASM_OP_I64_AND: + case WASM_OP_I64_OR: + case WASM_OP_I64_XOR: + if (!jit_compile_op_i64_bitwise(cc, INT_SHL + opcode + - WASM_OP_I64_AND)) + return false; + break; + + case WASM_OP_I64_SHL: + case WASM_OP_I64_SHR_S: + case WASM_OP_I64_SHR_U: + case WASM_OP_I64_ROTL: + case WASM_OP_I64_ROTR: + if (!jit_compile_op_i64_shift(cc, INT_SHL + opcode + - WASM_OP_I64_SHL)) + return false; + break; + + case WASM_OP_F32_ABS: + case WASM_OP_F32_NEG: + case WASM_OP_F32_CEIL: + case WASM_OP_F32_FLOOR: + case WASM_OP_F32_TRUNC: + case WASM_OP_F32_NEAREST: + case WASM_OP_F32_SQRT: + if (!jit_compile_op_f32_math(cc, FLOAT_ABS + opcode + - WASM_OP_F32_ABS)) + return false; + break; + + case WASM_OP_F32_ADD: + case WASM_OP_F32_SUB: + case WASM_OP_F32_MUL: + case WASM_OP_F32_DIV: + case WASM_OP_F32_MIN: + case WASM_OP_F32_MAX: + if (!jit_compile_op_f32_arithmetic(cc, FLOAT_ADD + opcode + - WASM_OP_F32_ADD)) + return false; + break; + + case WASM_OP_F32_COPYSIGN: + if (!jit_compile_op_f32_copysign(cc)) + return false; + break; + + case WASM_OP_F64_ABS: + case WASM_OP_F64_NEG: + case WASM_OP_F64_CEIL: + case WASM_OP_F64_FLOOR: + case WASM_OP_F64_TRUNC: + case WASM_OP_F64_NEAREST: + case WASM_OP_F64_SQRT: + if (!jit_compile_op_f64_math(cc, FLOAT_ABS + opcode + - WASM_OP_F64_ABS)) + return false; + break; + + case WASM_OP_F64_ADD: + case WASM_OP_F64_SUB: + case WASM_OP_F64_MUL: + case WASM_OP_F64_DIV: + case WASM_OP_F64_MIN: + case WASM_OP_F64_MAX: + if (!jit_compile_op_f64_arithmetic(cc, FLOAT_ADD + opcode + - WASM_OP_F64_ADD)) + return false; + break; + + case WASM_OP_F64_COPYSIGN: + if (!jit_compile_op_f64_copysign(cc)) + return false; + break; + + case WASM_OP_I32_WRAP_I64: + if (!jit_compile_op_i32_wrap_i64(cc)) + return false; + break; + + case WASM_OP_I32_TRUNC_S_F32: + case WASM_OP_I32_TRUNC_U_F32: + sign = (opcode == WASM_OP_I32_TRUNC_S_F32) ? true : false; + if (!jit_compile_op_i32_trunc_f32(cc, sign, false)) + return false; + break; + + case WASM_OP_I32_TRUNC_S_F64: + case WASM_OP_I32_TRUNC_U_F64: + sign = (opcode == WASM_OP_I32_TRUNC_S_F64) ? true : false; + if (!jit_compile_op_i32_trunc_f64(cc, sign, false)) + return false; + break; + + case WASM_OP_I64_EXTEND_S_I32: + case WASM_OP_I64_EXTEND_U_I32: + sign = (opcode == WASM_OP_I64_EXTEND_S_I32) ? true : false; + if (!jit_compile_op_i64_extend_i32(cc, sign)) + return false; + break; + + case WASM_OP_I64_TRUNC_S_F32: + case WASM_OP_I64_TRUNC_U_F32: + sign = (opcode == WASM_OP_I64_TRUNC_S_F32) ? true : false; + if (!jit_compile_op_i64_trunc_f32(cc, sign, false)) + return false; + break; + + case WASM_OP_I64_TRUNC_S_F64: + case WASM_OP_I64_TRUNC_U_F64: + sign = (opcode == WASM_OP_I64_TRUNC_S_F64) ? true : false; + if (!jit_compile_op_i64_trunc_f64(cc, sign, false)) + return false; + break; + + case WASM_OP_F32_CONVERT_S_I32: + case WASM_OP_F32_CONVERT_U_I32: + sign = (opcode == WASM_OP_F32_CONVERT_S_I32) ? true : false; + if (!jit_compile_op_f32_convert_i32(cc, sign)) + return false; + break; + + case WASM_OP_F32_CONVERT_S_I64: + case WASM_OP_F32_CONVERT_U_I64: + sign = (opcode == WASM_OP_F32_CONVERT_S_I64) ? true : false; + if (!jit_compile_op_f32_convert_i64(cc, sign)) + return false; + break; + + case WASM_OP_F32_DEMOTE_F64: + if (!jit_compile_op_f32_demote_f64(cc)) + return false; + break; + + case WASM_OP_F64_CONVERT_S_I32: + case WASM_OP_F64_CONVERT_U_I32: + sign = (opcode == WASM_OP_F64_CONVERT_S_I32) ? true : false; + if (!jit_compile_op_f64_convert_i32(cc, sign)) + return false; + break; + + case WASM_OP_F64_CONVERT_S_I64: + case WASM_OP_F64_CONVERT_U_I64: + sign = (opcode == WASM_OP_F64_CONVERT_S_I64) ? true : false; + if (!jit_compile_op_f64_convert_i64(cc, sign)) + return false; + break; + + case WASM_OP_F64_PROMOTE_F32: + if (!jit_compile_op_f64_promote_f32(cc)) + return false; + break; + + case WASM_OP_I32_REINTERPRET_F32: + if (!jit_compile_op_i32_reinterpret_f32(cc)) + return false; + break; + + case WASM_OP_I64_REINTERPRET_F64: + if (!jit_compile_op_i64_reinterpret_f64(cc)) + return false; + break; + + case WASM_OP_F32_REINTERPRET_I32: + if (!jit_compile_op_f32_reinterpret_i32(cc)) + return false; + break; + + case WASM_OP_F64_REINTERPRET_I64: + if (!jit_compile_op_f64_reinterpret_i64(cc)) + return false; + break; + + case WASM_OP_I32_EXTEND8_S: + if (!jit_compile_op_i32_extend_i32(cc, 8)) + return false; + break; + + case WASM_OP_I32_EXTEND16_S: + if (!jit_compile_op_i32_extend_i32(cc, 16)) + return false; + break; + + case WASM_OP_I64_EXTEND8_S: + if (!jit_compile_op_i64_extend_i64(cc, 8)) + return false; + break; + + case WASM_OP_I64_EXTEND16_S: + if (!jit_compile_op_i64_extend_i64(cc, 16)) + return false; + break; + + case WASM_OP_I64_EXTEND32_S: + if (!jit_compile_op_i64_extend_i64(cc, 32)) + return false; + break; + + case WASM_OP_MISC_PREFIX: + { + uint32 opcode1; + + read_leb_uint32(frame_ip, frame_ip_end, opcode1); + opcode = (uint32)opcode1; + +#if WASM_ENABLE_BULK_MEMORY != 0 + if (WASM_OP_MEMORY_INIT <= opcode + && opcode <= WASM_OP_MEMORY_FILL + && !cc->enable_bulk_memory) { + goto unsupport_bulk_memory; + } +#endif + +#if WASM_ENABLE_REF_TYPES != 0 + if (WASM_OP_TABLE_INIT <= opcode && opcode <= WASM_OP_TABLE_FILL + && !cc->enable_ref_types) { + goto unsupport_ref_types; + } +#endif + + switch (opcode) { + case WASM_OP_I32_TRUNC_SAT_S_F32: + case WASM_OP_I32_TRUNC_SAT_U_F32: + sign = (opcode == WASM_OP_I32_TRUNC_SAT_S_F32) ? true + : false; + if (!jit_compile_op_i32_trunc_f32(cc, sign, true)) + return false; + break; + case WASM_OP_I32_TRUNC_SAT_S_F64: + case WASM_OP_I32_TRUNC_SAT_U_F64: + sign = (opcode == WASM_OP_I32_TRUNC_SAT_S_F64) ? true + : false; + if (!jit_compile_op_i32_trunc_f64(cc, sign, true)) + return false; + break; + case WASM_OP_I64_TRUNC_SAT_S_F32: + case WASM_OP_I64_TRUNC_SAT_U_F32: + sign = (opcode == WASM_OP_I64_TRUNC_SAT_S_F32) ? true + : false; + if (!jit_compile_op_i64_trunc_f32(cc, sign, true)) + return false; + break; + case WASM_OP_I64_TRUNC_SAT_S_F64: + case WASM_OP_I64_TRUNC_SAT_U_F64: + sign = (opcode == WASM_OP_I64_TRUNC_SAT_S_F64) ? true + : false; + if (!jit_compile_op_i64_trunc_f64(cc, sign, true)) + return false; + break; +#if WASM_ENABLE_BULK_MEMORY != 0 + case WASM_OP_MEMORY_INIT: + { + uint32 seg_index; + read_leb_uint32(frame_ip, frame_ip_end, seg_index); + frame_ip++; + if (!jit_compile_op_memory_init(cc, seg_index)) + return false; + break; + } + case WASM_OP_DATA_DROP: + { + uint32 seg_index; + read_leb_uint32(frame_ip, frame_ip_end, seg_index); + if (!jit_compile_op_data_drop(cc, seg_index)) + return false; + break; + } + case WASM_OP_MEMORY_COPY: + { + frame_ip += 2; + if (!jit_compile_op_memory_copy(cc)) + return false; + break; + } + case WASM_OP_MEMORY_FILL: + { + frame_ip++; + if (!jit_compile_op_memory_fill(cc)) + return false; + break; + } +#endif /* WASM_ENABLE_BULK_MEMORY */ +#if WASM_ENABLE_REF_TYPES != 0 + case WASM_OP_TABLE_INIT: + { + uint32 tbl_idx, tbl_seg_idx; + + read_leb_uint32(frame_ip, frame_ip_end, tbl_seg_idx); + read_leb_uint32(frame_ip, frame_ip_end, tbl_idx); + if (!jit_compile_op_table_init(cc, tbl_idx, + tbl_seg_idx)) + return false; + break; + } + case WASM_OP_ELEM_DROP: + { + uint32 tbl_seg_idx; + + read_leb_uint32(frame_ip, frame_ip_end, tbl_seg_idx); + if (!jit_compile_op_elem_drop(cc, tbl_seg_idx)) + return false; + break; + } + case WASM_OP_TABLE_COPY: + { + uint32 src_tbl_idx, dst_tbl_idx; + + read_leb_uint32(frame_ip, frame_ip_end, dst_tbl_idx); + read_leb_uint32(frame_ip, frame_ip_end, src_tbl_idx); + if (!jit_compile_op_table_copy(cc, src_tbl_idx, + dst_tbl_idx)) + return false; + break; + } + case WASM_OP_TABLE_GROW: + { + uint32 tbl_idx; + + read_leb_uint32(frame_ip, frame_ip_end, tbl_idx); + if (!jit_compile_op_table_grow(cc, tbl_idx)) + return false; + break; + } + + case WASM_OP_TABLE_SIZE: + { + uint32 tbl_idx; + + read_leb_uint32(frame_ip, frame_ip_end, tbl_idx); + if (!jit_compile_op_table_size(cc, tbl_idx)) + return false; + break; + } + case WASM_OP_TABLE_FILL: + { + uint32 tbl_idx; + + read_leb_uint32(frame_ip, frame_ip_end, tbl_idx); + if (!jit_compile_op_table_fill(cc, tbl_idx)) + return false; + break; + } +#endif /* WASM_ENABLE_REF_TYPES */ + default: + jit_set_last_error(cc, "unsupported opcode"); + return false; + } + break; + } + +#if WASM_ENABLE_SHARED_MEMORY != 0 + case WASM_OP_ATOMIC_PREFIX: + { + uint8 bin_op, op_type; + + if (frame_ip < frame_ip_end) { + opcode = *frame_ip++; + } + if (opcode != WASM_OP_ATOMIC_FENCE) { + read_leb_uint32(frame_ip, frame_ip_end, align); + read_leb_uint32(frame_ip, frame_ip_end, offset); + } + switch (opcode) { + case WASM_OP_ATOMIC_WAIT32: + if (!jit_compile_op_atomic_wait(cc, VALUE_TYPE_I32, + align, offset, 4)) + return false; + break; + case WASM_OP_ATOMIC_WAIT64: + if (!jit_compile_op_atomic_wait(cc, VALUE_TYPE_I64, + align, offset, 8)) + return false; + break; + case WASM_OP_ATOMIC_NOTIFY: + if (!jit_compiler_op_atomic_notify(cc, align, offset, + bytes)) + return false; + break; + case WASM_OP_ATOMIC_I32_LOAD: + bytes = 4; + goto op_atomic_i32_load; + case WASM_OP_ATOMIC_I32_LOAD8_U: + bytes = 1; + goto op_atomic_i32_load; + case WASM_OP_ATOMIC_I32_LOAD16_U: + bytes = 2; + op_atomic_i32_load: + if (!jit_compile_op_i32_load(cc, align, offset, bytes, + sign, true)) + return false; + break; + + case WASM_OP_ATOMIC_I64_LOAD: + bytes = 8; + goto op_atomic_i64_load; + case WASM_OP_ATOMIC_I64_LOAD8_U: + bytes = 1; + goto op_atomic_i64_load; + case WASM_OP_ATOMIC_I64_LOAD16_U: + bytes = 2; + goto op_atomic_i64_load; + case WASM_OP_ATOMIC_I64_LOAD32_U: + bytes = 4; + op_atomic_i64_load: + if (!jit_compile_op_i64_load(cc, align, offset, bytes, + sign, true)) + return false; + break; + + case WASM_OP_ATOMIC_I32_STORE: + bytes = 4; + goto op_atomic_i32_store; + case WASM_OP_ATOMIC_I32_STORE8: + bytes = 1; + goto op_atomic_i32_store; + case WASM_OP_ATOMIC_I32_STORE16: + bytes = 2; + op_atomic_i32_store: + if (!jit_compile_op_i32_store(cc, align, offset, bytes, + true)) + return false; + break; + + case WASM_OP_ATOMIC_I64_STORE: + bytes = 8; + goto op_atomic_i64_store; + case WASM_OP_ATOMIC_I64_STORE8: + bytes = 1; + goto op_atomic_i64_store; + case WASM_OP_ATOMIC_I64_STORE16: + bytes = 2; + goto op_atomic_i64_store; + case WASM_OP_ATOMIC_I64_STORE32: + bytes = 4; + op_atomic_i64_store: + if (!jit_compile_op_i64_store(cc, align, offset, bytes, + true)) + return false; + break; + + case WASM_OP_ATOMIC_RMW_I32_CMPXCHG: + bytes = 4; + op_type = VALUE_TYPE_I32; + goto op_atomic_cmpxchg; + case WASM_OP_ATOMIC_RMW_I64_CMPXCHG: + bytes = 8; + op_type = VALUE_TYPE_I64; + goto op_atomic_cmpxchg; + case WASM_OP_ATOMIC_RMW_I32_CMPXCHG8_U: + bytes = 1; + op_type = VALUE_TYPE_I32; + goto op_atomic_cmpxchg; + case WASM_OP_ATOMIC_RMW_I32_CMPXCHG16_U: + bytes = 2; + op_type = VALUE_TYPE_I32; + goto op_atomic_cmpxchg; + case WASM_OP_ATOMIC_RMW_I64_CMPXCHG8_U: + bytes = 1; + op_type = VALUE_TYPE_I64; + goto op_atomic_cmpxchg; + case WASM_OP_ATOMIC_RMW_I64_CMPXCHG16_U: + bytes = 2; + op_type = VALUE_TYPE_I64; + goto op_atomic_cmpxchg; + case WASM_OP_ATOMIC_RMW_I64_CMPXCHG32_U: + bytes = 4; + op_type = VALUE_TYPE_I64; + op_atomic_cmpxchg: + if (!jit_compile_op_atomic_cmpxchg(cc, op_type, align, + offset, bytes)) + return false; + break; + + COMPILE_ATOMIC_RMW(Add, ADD); + COMPILE_ATOMIC_RMW(Sub, SUB); + COMPILE_ATOMIC_RMW(And, AND); + COMPILE_ATOMIC_RMW(Or, OR); + COMPILE_ATOMIC_RMW(Xor, XOR); + COMPILE_ATOMIC_RMW(Xchg, XCHG); + + build_atomic_rmw: + if (!jit_compile_op_atomic_rmw(cc, bin_op, op_type, + align, offset, bytes)) + return false; + break; + + default: + jit_set_last_error(cc, "unsupported opcode"); + return false; + } + break; + } +#endif /* end of WASM_ENABLE_SHARED_MEMORY */ + + default: + jit_set_last_error(cc, "unsupported opcode"); + return false; + } + } + + (void)func_idx; + return true; + +#if WASM_ENABLE_REF_TYPES != 0 +unsupport_ref_types: + jit_set_last_error(cc, "reference type instruction was found, " + "try removing --disable-ref-types option"); + return false; +#endif + +#if WASM_ENABLE_BULK_MEMORY != 0 +unsupport_bulk_memory: + jit_set_last_error(cc, "bulk memory instruction was found, " + "try removing --disable-bulk-memory option"); + return false; +#endif + +fail: + return false; +} + +JitBasicBlock * +jit_frontend_translate_func(JitCompContext *cc) +{ + JitFrame *jit_frame; + JitBlock *jit_block; + + if (!(jit_frame = init_func_translation(cc))) { + return NULL; + } + + if (!(jit_block = create_func_block(cc))) { + return NULL; + } + + if (!jit_compile_func(cc)) { + return NULL; + } + + return jit_block->basic_block_entry; +} diff --git a/core/iwasm/fast-jit/jit_frontend.h b/core/iwasm/fast-jit/jit_frontend.h new file mode 100644 index 000000000..0f8e9d6f0 --- /dev/null +++ b/core/iwasm/fast-jit/jit_frontend.h @@ -0,0 +1,477 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_FRONTEND_H_ +#define _JIT_FRONTEND_H_ + +#include "jit_utils.h" +#include "jit_ir.h" +#include "../interpreter/wasm_interp.h" + +typedef enum IntCond { + INT_EQZ = 0, + INT_EQ, + INT_NE, + INT_LT_S, + INT_LT_U, + INT_GT_S, + INT_GT_U, + INT_LE_S, + INT_LE_U, + INT_GE_S, + INT_GE_U +} IntCond; + +typedef enum FloatCond { + FLOAT_EQ = 0, + FLOAT_NE, + FLOAT_LT, + FLOAT_GT, + FLOAT_LE, + FLOAT_GE, + FLOAT_UNO +} FloatCond; + +typedef enum IntArithmetic { + INT_ADD = 0, + INT_SUB, + INT_MUL, + INT_DIV_S, + INT_DIV_U, + INT_REM_S, + INT_REM_U +} IntArithmetic; + +typedef enum V128Arithmetic { + V128_ADD = 0, + V128_SUB, + V128_MUL, + V128_DIV, + V128_NEG, + V128_MIN, + V128_MAX, +} V128Arithmetic; + +typedef enum IntBitwise { + INT_AND = 0, + INT_OR, + INT_XOR, +} IntBitwise; + +typedef enum V128Bitwise { + V128_NOT, + V128_AND, + V128_ANDNOT, + V128_OR, + V128_XOR, + V128_BITSELECT, +} V128Bitwise; + +typedef enum IntShift { + INT_SHL = 0, + INT_SHR_S, + INT_SHR_U, + INT_ROTL, + INT_ROTR +} IntShift; + +typedef enum FloatMath { + FLOAT_ABS = 0, + FLOAT_NEG, + FLOAT_CEIL, + FLOAT_FLOOR, + FLOAT_TRUNC, + FLOAT_NEAREST, + FLOAT_SQRT +} FloatMath; + +typedef enum FloatArithmetic { + FLOAT_ADD = 0, + FLOAT_SUB, + FLOAT_MUL, + FLOAT_DIV, + FLOAT_MIN, + FLOAT_MAX, +} FloatArithmetic; + +typedef enum JitExceptionID { + EXCE_UNREACHABLE = 0, + EXCE_OUT_OF_MEMORY, + EXCE_OUT_OF_BOUNDS_MEMORY_ACCESS, + EXCE_INTEGER_OVERFLOW, + EXCE_INTEGER_DIVIDE_BY_ZERO, + EXCE_INVALID_CONVERSION_TO_INTEGER, + EXCE_INVALID_FUNCTION_TYPE_INDEX, + EXCE_INVALID_FUNCTION_INDEX, + EXCE_UNDEFINED_ELEMENT, + EXCE_UNINITIALIZED_ELEMENT, + EXCE_CALL_UNLINKED_IMPORT_FUNC, + EXCE_NATIVE_STACK_OVERFLOW, + EXCE_UNALIGNED_ATOMIC, + EXCE_AUX_STACK_OVERFLOW, + EXCE_AUX_STACK_UNDERFLOW, + EXCE_OUT_OF_BOUNDS_TABLE_ACCESS, + EXCE_OPERAND_STACK_OVERFLOW, + EXCE_NUM, +} JitExceptionID; + +/** + * Translate instructions in a function. The translated block must + * end with a branch instruction whose targets are offsets relating to + * the end bcip of the translated block, which are integral constants. + * If a target of a branch is really a constant value (which should be + * rare), put it into a register and then jump to the register instead + * of using the constant value directly in the target. In the + * translation process, don't create any new labels. The code bcip of + * the begin and end of the translated block is stored in the + * jit_annl_begin_bcip and jit_annl_end_bcip annotations of the label + * of the block, which must be the same as the bcips used in + * profiling. + * + * NOTE: the function must explicitly set SP to correct value when the + * entry's bcip is the function's entry address. + * + * @param cc containing compilation context of generated IR + * @param entry entry of the basic block to be translated. If its + * value is NULL, the function will clean up any pass local data that + * might be created previously. + * @param is_reached a bitmap recording which bytecode has been + * reached as a block entry + * + * @return IR block containing translated instructions if succeeds, + * NULL otherwise + */ +JitBasicBlock * +jit_frontend_translate_func(JitCompContext *cc); + +/** + * Generate a block leaving the compiled code, which must store the + * target bcip and other necessary information for switching to + * interpreter or other compiled code and then jump to the exit of the + * cc. + * + * @param cc the compilation context + * @param bcip the target bytecode instruction pointer + * @param sp_offset stack pointer offset at the beginning of the block + * + * @return the leaving block if succeeds, NULL otherwise + */ +JitBlock * +jit_frontend_gen_leaving_block(JitCompContext *cc, void *bcip, + unsigned sp_offset); + +#if 0 +/** + * Print the qualified name of the given function. + * + * @param function the function whose name to be printed + */ +void +jit_frontend_print_function_name(void *function); + +/** + * Get the full name of the function. If the input buffer lengh + * is less than the actual function name length, the function will + * simply return the actuall length and won't write to the buffer. + * + * @param function pointer to a function + * @param buf buffer for the returned name + * @param buf_len lengh of the buffer + * + * @return actual length of the name + */ +unsigned +jit_frontend_get_function_name(void *function, char *buf, unsigned buf_len); + +/** + * Convert the bcip in the given function to an internal offset. + * + * @param function function containing the bcip + * @param bcip bytecode instruction pointer + * + * @return converted offset of the bcip + */ +unsigned +jit_frontend_bcip_to_offset(void *function, void *bcip); +#endif + +/** + * Lower the IR of the given compilation context. + * + * @param cc the compilation context + * + * @return true if succeeds, false otherwise + */ +bool +jit_frontend_lower(JitCompContext *cc); + +/** + * Get the offset from frame pointer to the n-th local variable slot. + * + * @param n the index to the local variable array + * + * @return the offset from frame pointer to the local variable slot + */ +static inline unsigned +offset_of_local(unsigned n) +{ + return offsetof(WASMInterpFrame, lp) + n * 4; +} + +/** + * Generate instruction to load an integer from the frame. + * + * This and the below gen_load_X functions generate instructions to + * load values from the frame into registers if the values have not + * been loaded yet. + * + * @param frame the frame information + * @param n slot index to the local variable array + * + * @return register holding the loaded value + */ +JitReg +gen_load_i32(JitFrame *frame, unsigned n); + +/** + * Generate instruction to load a i64 integer from the frame. + * + * @param frame the frame information + * @param n slot index to the local variable array + * + * @return register holding the loaded value + */ +JitReg +gen_load_i64(JitFrame *frame, unsigned n); + +/** + * Generate instruction to load a floating point value from the frame. + * + * @param frame the frame information + * @param n slot index to the local variable array + * + * @return register holding the loaded value + */ +JitReg +gen_load_f32(JitFrame *frame, unsigned n); + +/** + * Generate instruction to load a double value from the frame. + * + * @param frame the frame information + * @param n slot index to the local variable array + * + * @return register holding the loaded value + */ +JitReg +gen_load_f64(JitFrame *frame, unsigned n); + +/** + * Generate instructions to commit computation result to the frame. + * The general principle is to only commit values that will be used + * through the frame. + * + * @param frame the frame information + * @param begin the begin value slot to commit + * @param end the end value slot to commit + */ +void +gen_commit_values(JitFrame *frame, JitValueSlot *begin, JitValueSlot *end); + +/** + * Generate instructions to commit SP and IP pointers to the frame. + * + * @param frame the frame information + */ +void +gen_commit_sp_ip(JitFrame *frame); + +/** + * Generate commit instructions for the block end. + * + * @param frame the frame information + */ +static inline void +gen_commit_for_branch(JitFrame *frame) +{ + gen_commit_values(frame, frame->lp, frame->sp); +} + +/** + * Generate commit instructions for exception checks. + * + * @param frame the frame information + */ +static inline void +gen_commit_for_exception(JitFrame *frame) +{ + gen_commit_values(frame, frame->lp, frame->lp + frame->max_locals); +} + +/** + * Generate commit instructions to commit all status. + * + * @param frame the frame information + */ +static inline void +gen_commit_for_all(JitFrame *frame) +{ + gen_commit_values(frame, frame->lp, frame->sp); + gen_commit_sp_ip(frame); +} + +static inline void +clear_values(JitFrame *frame) +{ + size_t total_size = + sizeof(JitValueSlot) * (frame->max_locals + frame->max_stacks); + memset(frame->lp, 0, total_size); +} + +static inline void +push_i32(JitFrame *frame, JitReg value) +{ + frame->sp->reg = value; + frame->sp->dirty = 1; + frame->sp++; +} + +static inline void +push_i64(JitFrame *frame, JitReg value) +{ + frame->sp->reg = value; + frame->sp->dirty = 1; + frame->sp++; + frame->sp->reg = value; + frame->sp->dirty = 1; + frame->sp++; +} + +static inline void +push_f32(JitFrame *frame, JitReg value) +{ + push_i32(frame, value); +} + +static inline void +push_f64(JitFrame *frame, JitReg value) +{ + push_i64(frame, value); +} + +static inline JitReg +pop_i32(JitFrame *frame) +{ + frame->sp--; + return gen_load_i32(frame, frame->sp - frame->lp); +} + +static inline JitReg +pop_i64(JitFrame *frame) +{ + frame->sp -= 2; + return gen_load_i64(frame, frame->sp - frame->lp); +} + +static inline JitReg +pop_f32(JitFrame *frame) +{ + frame->sp--; + return gen_load_f32(frame, frame->sp - frame->lp); +} + +static inline JitReg +pop_f64(JitFrame *frame) +{ + frame->sp -= 2; + return gen_load_f64(frame, frame->sp - frame->lp); +} + +static inline void +pop(JitFrame *frame, int n) +{ + frame->sp -= n; + memset(frame->sp, 0, n * sizeof(*frame->sp)); +} + +static inline JitReg +local_i32(JitFrame *frame, int n) +{ + return gen_load_i32(frame, n); +} + +static inline JitReg +local_i64(JitFrame *frame, int n) +{ + return gen_load_i64(frame, n); +} + +static inline JitReg +local_f32(JitFrame *frame, int n) +{ + return gen_load_f32(frame, n); +} + +static inline JitReg +local_f64(JitFrame *frame, int n) +{ + return gen_load_f64(frame, n); +} + +static void +set_local_i32(JitFrame *frame, int n, JitReg val) +{ + frame->lp[n].reg = val; + frame->lp[n].dirty = 1; +} + +static void +set_local_i64(JitFrame *frame, int n, JitReg val) +{ + frame->lp[n].reg = val; + frame->lp[n].dirty = 1; + frame->lp[n + 1].reg = val; + frame->lp[n + 1].dirty = 1; +} + +static inline void +set_local_f32(JitFrame *frame, int n, JitReg val) +{ + set_local_i32(frame, n, val); +} + +static inline void +set_local_f64(JitFrame *frame, int n, JitReg val) +{ + set_local_i64(frame, n, val); +} + +#define POP(jit_value, value_type) \ + do { \ + if (!jit_cc_pop_value(cc, value_type, &jit_value)) \ + goto fail; \ + } while (0) + +#define POP_I32(v) POP(v, VALUE_TYPE_I32) +#define POP_I64(v) POP(v, VALUE_TYPE_I64) +#define POP_F32(v) POP(v, VALUE_TYPE_F32) +#define POP_F64(v) POP(v, VALUE_TYPE_F64) +#define POP_FUNCREF(v) POP(v, VALUE_TYPE_FUNCREF) +#define POP_EXTERNREF(v) POP(v, VALUE_TYPE_EXTERNREF) + +#define PUSH(jit_value, value_type) \ + do { \ + if (!jit_cc_push_value(cc, value_type, jit_value)) \ + goto fail; \ + } while (0) + +#define PUSH_I32(v) PUSH(v, VALUE_TYPE_I32) +#define PUSH_I64(v) PUSH(v, VALUE_TYPE_I64) +#define PUSH_F32(v) PUSH(v, VALUE_TYPE_F32) +#define PUSH_F64(v) PUSH(v, VALUE_TYPE_F64) +#define PUSH_FUNCREF(v) PUSH(v, VALUE_TYPE_FUNCREF) +#define PUSH_EXTERNREF(v) PUSH(v, VALUE_TYPE_EXTERNREF) + +#endif diff --git a/core/iwasm/fast-jit/jit_ir.c b/core/iwasm/fast-jit/jit_ir.c new file mode 100644 index 000000000..986e9cb1b --- /dev/null +++ b/core/iwasm/fast-jit/jit_ir.c @@ -0,0 +1,1500 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_ir.h" +#include "jit_codegen.h" +#include "jit_frontend.h" + +/** + * Operand kinds of instructions. + */ +enum { + JIT_OPND_KIND_Reg, + JIT_OPND_KIND_VReg, + JIT_OPND_KIND_TableSwitch, + JIT_OPND_KIND_LookupSwitch +}; + +/** + * Operand kind of each instruction. + */ +static const uint8 insn_opnd_kind[] = { +#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) JIT_OPND_KIND_##OPND_KIND, +#include "jit_ir.def" +#undef INSN +}; + +/** + * Operand number of each instruction. + */ +static const uint8 insn_opnd_num[] = { +#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) OPND_NUM, +#include "jit_ir.def" +#undef INSN +}; + +/** + * Operand number of each instruction. + */ +static const uint8 insn_opnd_first_use[] = { +#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) FIRST_USE, +#include "jit_ir.def" +#undef INSN +}; + +#define JIT_INSN_NEW_Reg(OPND_NUM) \ + jit_calloc(offsetof(JitInsn, _opnd) + sizeof(JitReg) * (OPND_NUM)) +#define JIT_INSN_NEW_VReg(OPND_NUM) \ + jit_calloc(offsetof(JitInsn, _opnd._opnd_VReg._reg) \ + + sizeof(JitReg) * (OPND_NUM)) + +JitInsn * +_jit_insn_new_Reg_1(JitOpcode opc, JitReg r0) +{ + JitInsn *insn = JIT_INSN_NEW_Reg(1); + + if (insn) { + insn->opcode = opc; + *jit_insn_opnd(insn, 0) = r0; + } + + return insn; +} + +JitInsn * +_jit_insn_new_Reg_2(JitOpcode opc, JitReg r0, JitReg r1) +{ + JitInsn *insn = JIT_INSN_NEW_Reg(2); + + if (insn) { + insn->opcode = opc; + *jit_insn_opnd(insn, 0) = r0; + *jit_insn_opnd(insn, 1) = r1; + } + + return insn; +} + +JitInsn * +_jit_insn_new_Reg_3(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2) +{ + JitInsn *insn = JIT_INSN_NEW_Reg(3); + + if (insn) { + insn->opcode = opc; + *jit_insn_opnd(insn, 0) = r0; + *jit_insn_opnd(insn, 1) = r1; + *jit_insn_opnd(insn, 2) = r2; + } + + return insn; +} + +JitInsn * +_jit_insn_new_Reg_4(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2, JitReg r3) +{ + JitInsn *insn = JIT_INSN_NEW_Reg(4); + + if (insn) { + insn->opcode = opc; + *jit_insn_opnd(insn, 0) = r0; + *jit_insn_opnd(insn, 1) = r1; + *jit_insn_opnd(insn, 2) = r2; + *jit_insn_opnd(insn, 3) = r3; + } + + return insn; +} + +JitInsn * +_jit_insn_new_Reg_5(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2, JitReg r3, + JitReg r4) +{ + JitInsn *insn = JIT_INSN_NEW_Reg(5); + + if (insn) { + insn->opcode = opc; + *jit_insn_opnd(insn, 0) = r0; + *jit_insn_opnd(insn, 1) = r1; + *jit_insn_opnd(insn, 2) = r2; + *jit_insn_opnd(insn, 3) = r3; + *jit_insn_opnd(insn, 4) = r4; + } + + return insn; +} + +JitInsn * +_jit_insn_new_VReg_1(JitOpcode opc, JitReg r0, int n) +{ + JitInsn *insn = JIT_INSN_NEW_VReg(1 + n); + + if (insn) { + insn->opcode = opc; + insn->_opnd._opnd_VReg._reg_num = 1 + n; + *(jit_insn_opndv(insn, 0)) = r0; + } + + return insn; +} + +JitInsn * +_jit_insn_new_VReg_2(JitOpcode opc, JitReg r0, JitReg r1, int n) +{ + JitInsn *insn = JIT_INSN_NEW_VReg(2 + n); + + if (insn) { + insn->opcode = opc; + insn->_opnd._opnd_VReg._reg_num = 2 + n; + *(jit_insn_opndv(insn, 0)) = r0; + *(jit_insn_opndv(insn, 1)) = r1; + } + + return insn; +} + +JitInsn * +_jit_insn_new_TableSwitch_1(JitOpcode opc, JitReg value, int32 low, int32 high) +{ + JitOpndTableSwitch *opnd = NULL; + JitInsn *insn = + jit_calloc(offsetof(JitInsn, _opnd._opnd_TableSwitch.targets) + + sizeof(opnd->targets[0]) * (high - low + 1)); + + if (insn) { + insn->opcode = opc; + opnd = jit_insn_opndts(insn); + opnd->value = value; + opnd->low_value = low; + opnd->high_value = high; + } + + return insn; +} + +JitInsn * +_jit_insn_new_LookupSwitch_1(JitOpcode opc, JitReg value, uint32 num) +{ + JitOpndLookupSwitch *opnd = NULL; + JitInsn *insn = + jit_calloc(offsetof(JitInsn, _opnd._opnd_LookupSwitch.match_pairs) + + sizeof(opnd->match_pairs[0]) * num); + + if (insn) { + insn->opcode = opc; + opnd = jit_insn_opndls(insn); + opnd->value = value; + opnd->match_pairs_num = num; + } + + return insn; +} + +#undef JIT_INSN_NEW_Reg +#undef JIT_INSN_NEW_VReg + +void +jit_insn_insert_before(JitInsn *insn1, JitInsn *insn2) +{ + bh_assert(insn1->prev); + insn1->prev->next = insn2; + insn2->prev = insn1->prev; + insn2->next = insn1; + insn1->prev = insn2; +} + +void +jit_insn_insert_after(JitInsn *insn1, JitInsn *insn2) +{ + bh_assert(insn1->next); + insn1->next->prev = insn2; + insn2->next = insn1->next; + insn2->prev = insn1; + insn1->next = insn2; +} + +void +jit_insn_unlink(JitInsn *insn) +{ + bh_assert(insn->prev); + insn->prev->next = insn->next; + bh_assert(insn->next); + insn->next->prev = insn->prev; + insn->prev = insn->next = NULL; +} + +unsigned +jit_insn_hash(JitInsn *insn) +{ + const uint8 opcode = insn->opcode; + unsigned hash = opcode, i; + + /* Currently, only instructions with Reg kind operand require + hashing. For others, simply use opcode as the hash value. */ + if (insn_opnd_kind[opcode] != JIT_OPND_KIND_Reg + || insn_opnd_num[opcode] < 1) + return hash; + + /* All the instructions with hashing support must be in the + assignment format, i.e. the first operand is the result (hence + being ignored) and all the others are operands. This is also + true for CHK instructions, whose first operand is the instruction + pointer. */ + for (i = 1; i < insn_opnd_num[opcode]; i++) + hash = ((hash << 5) - hash) + *(jit_insn_opnd(insn, i)); + + return hash; +} + +bool +jit_insn_equal(JitInsn *insn1, JitInsn *insn2) +{ + const uint8 opcode = insn1->opcode; + unsigned i; + + if (insn2->opcode != opcode) + return false; + + if (insn_opnd_kind[opcode] != JIT_OPND_KIND_Reg + || insn_opnd_num[opcode] < 1) + return false; + + for (i = 1; i < insn_opnd_num[opcode]; i++) + if (*(jit_insn_opnd(insn1, i)) != *(jit_insn_opnd(insn2, i))) + return false; + + return true; +} + +JitRegVec +jit_insn_opnd_regs(JitInsn *insn) +{ + JitRegVec vec; + JitOpndTableSwitch *ts; + JitOpndLookupSwitch *ls; + + vec._stride = 1; + + switch (insn_opnd_kind[insn->opcode]) { + case JIT_OPND_KIND_Reg: + vec.num = insn_opnd_num[insn->opcode]; + vec._base = jit_insn_opnd(insn, 0); + break; + + case JIT_OPND_KIND_VReg: + vec.num = jit_insn_opndv_num(insn); + vec._base = jit_insn_opndv(insn, 0); + break; + + case JIT_OPND_KIND_TableSwitch: + ts = jit_insn_opndts(insn); + vec.num = ts->high_value - ts->low_value + 3; + vec._base = &ts->value; + break; + + case JIT_OPND_KIND_LookupSwitch: + ls = jit_insn_opndls(insn); + vec.num = ls->match_pairs_num + 2; + vec._base = &ls->value; + vec._stride = sizeof(ls->match_pairs[0]) / sizeof(*vec._base); + break; + } + + return vec; +} + +unsigned +jit_insn_opnd_first_use(JitInsn *insn) +{ + return insn_opnd_first_use[insn->opcode]; +} + +JitBasicBlock * +jit_basic_block_new(JitReg label, int n) +{ + JitBasicBlock *block = jit_insn_new_PHI(label, n); + block->prev = block->next = block; + return block; +} + +void +jit_basic_block_delete(JitBasicBlock *block) +{ + JitInsn *insn, *next_insn, *end; + + if (!block) + return; + + insn = jit_basic_block_first_insn(block); + end = jit_basic_block_end_insn(block); + + for (; insn != end; insn = next_insn) { + next_insn = insn->next; + jit_insn_delete(insn); + } + + jit_insn_delete(block); +} + +JitRegVec +jit_basic_block_preds(JitBasicBlock *block) +{ + JitRegVec vec; + + vec.num = jit_insn_opndv_num(block) - 1; + vec._base = vec.num > 0 ? jit_insn_opndv(block, 1) : NULL; + vec._stride = 1; + + return vec; +} + +JitRegVec +jit_basic_block_succs(JitBasicBlock *block) +{ + JitInsn *last_insn = jit_basic_block_last_insn(block); + JitRegVec vec; + + vec.num = 0; + vec._base = NULL; + vec._stride = 1; + + switch (last_insn->opcode) { + case JIT_OP_JMP: + vec.num = 1; + vec._base = jit_insn_opnd(last_insn, 0); + break; + + case JIT_OP_BEQ: + case JIT_OP_BNE: + case JIT_OP_BGTS: + case JIT_OP_BGES: + case JIT_OP_BLTS: + case JIT_OP_BLES: + case JIT_OP_BGTU: + case JIT_OP_BGEU: + case JIT_OP_BLTU: + case JIT_OP_BLEU: + vec.num = 2; + vec._base = jit_insn_opnd(last_insn, 1); + break; + + case JIT_OP_LOOKUP_SWITCH: + { + JitOpndLookupSwitch *opnd = jit_insn_opndls(last_insn); + vec.num = opnd->match_pairs_num + 1; + vec._base = &opnd->default_target; + vec._stride = sizeof(opnd->match_pairs[0]) / sizeof(*vec._base); + break; + } + + default: + vec._stride = 0; + } + + return vec; +} + +JitCompContext * +jit_cc_init(JitCompContext *cc, unsigned htab_size) +{ + JitBasicBlock *entry_block, *exit_block; + unsigned i, num; + + memset(cc, 0, sizeof(*cc)); + cc->_reference_count = 1; + jit_annl_enable_basic_block(cc); + + /* Create entry and exit blocks. They must be the first two + blocks respectively. */ + if (!(entry_block = jit_cc_new_basic_block(cc, 0)) + || !(exit_block = jit_cc_new_basic_block(cc, 0))) + goto fail; + + if (!(cc->exception_basic_blocks = + jit_calloc(sizeof(JitBasicBlock *) * EXCE_NUM))) + goto fail; + + /* Record the entry and exit labels, whose indexes must be 0 and 1 + respectively. */ + cc->entry_label = jit_basic_block_label(entry_block); + cc->exit_label = jit_basic_block_label(exit_block); + bh_assert(jit_reg_no(cc->entry_label) == 0 + && jit_reg_no(cc->exit_label) == 1); + + cc->hreg_info = jit_codegen_get_hreg_info(); + bh_assert(cc->hreg_info->info[JIT_REG_KIND_I32].num > 3); + + /* Initialize virtual registers for hard registers. */ + for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) { + if ((num = cc->hreg_info->info[i].num)) { + /* Initialize the capacity to be large enough. */ + jit_cc_new_reg(cc, i); + bh_assert(cc->_ann._reg_capacity[i] > num); + cc->_ann._reg_num[i] = num; + } + } + + /* Create registers for frame pointer, exec_env and cmp. */ +#if UINTPTR_MAX == UINT32_MAX + cc->fp_reg = jit_reg_new(JIT_REG_KIND_I32, cc->hreg_info->fp_hreg_index); + cc->exec_env_reg = + jit_reg_new(JIT_REG_KIND_I32, cc->hreg_info->exec_env_hreg_index); +#else + cc->fp_reg = jit_reg_new(JIT_REG_KIND_I64, cc->hreg_info->fp_hreg_index); + cc->exec_env_reg = + jit_reg_new(JIT_REG_KIND_I64, cc->hreg_info->exec_env_hreg_index); +#endif + cc->cmp_reg = jit_reg_new(JIT_REG_KIND_I32, cc->hreg_info->cmp_hreg_index); + + cc->_const_val._hash_table_size = htab_size; + + if (!(cc->_const_val._hash_table = + jit_calloc(htab_size * sizeof(*cc->_const_val._hash_table)))) + goto fail; + + return cc; + +fail: + jit_cc_destroy(cc); + return NULL; +} + +void +jit_cc_destroy(JitCompContext *cc) +{ + unsigned i, end; + JitBasicBlock *block; + + jit_block_stack_destroy(&cc->block_stack); + + jit_free(cc->jit_frame); + + jit_free(cc->_const_val._hash_table); + + /* Release the instruction hash table. */ + jit_cc_disable_insn_hash(cc); + + jit_free(cc->exception_basic_blocks); + + /* Release entry and exit blocks. */ + jit_basic_block_delete(jit_cc_entry_basic_block(cc)); + jit_basic_block_delete(jit_cc_exit_basic_block(cc)); + + /* clang-format off */ + /* Release blocks and instructions. */ + JIT_FOREACH_BLOCK(cc, i, end, block) + { + jit_basic_block_delete(block); + } + /* clang-format on */ + + /* Release constant values. */ + for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) { + jit_free(cc->_const_val._value[i]); + jit_free(cc->_const_val._next[i]); + } + + /* Release storage of annotations. */ +#define ANN_LABEL(TYPE, NAME) jit_annl_disable_##NAME(cc); +#define ANN_INSN(TYPE, NAME) jit_anni_disable_##NAME(cc); +#define ANN_REG(TYPE, NAME) jit_annr_disable_##NAME(cc); +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG +} + +void +jit_cc_delete(JitCompContext *cc) +{ + if (cc && --cc->_reference_count == 0) { + jit_cc_destroy(cc); + jit_free(cc); + } +} + +/* + * Reallocate a memory block with the new_size. + * TODO: replace this with imported jit_realloc when it's available. + */ +static void * +_jit_realloc(void *ptr, unsigned new_size, unsigned old_size) +{ + void *new_ptr = jit_malloc(new_size); + + if (new_ptr) { + bh_assert(new_size > old_size); + + if (ptr) { + memcpy(new_ptr, ptr, old_size); + memset((uint8 *)new_ptr + old_size, 0, new_size - old_size); + jit_free(ptr); + } + else + memset(new_ptr, 0, new_size); + } + + return new_ptr; +} + +static unsigned +hash_of_const(unsigned kind, unsigned size, void *val) +{ + uint8 *p = (uint8 *)val, *end = p + size; + unsigned hash = kind; + + do + hash = ((hash << 5) - hash) + *p++; + while (p != end); + + return hash; +} + +static inline void * +address_of_const(JitCompContext *cc, JitReg reg, unsigned size) +{ + int kind = jit_reg_kind(reg); + unsigned no = jit_reg_no(reg); + unsigned idx = no & ~_JIT_REG_CONST_IDX_FLAG; + + bh_assert(jit_reg_is_const_idx(reg) && idx < cc->_const_val._num[kind]); + + return cc->_const_val._value[kind] + size * idx; +} + +static inline JitReg +next_of_const(JitCompContext *cc, JitReg reg) +{ + int kind = jit_reg_kind(reg); + unsigned no = jit_reg_no(reg); + unsigned idx = no & ~_JIT_REG_CONST_IDX_FLAG; + + bh_assert(jit_reg_is_const_idx(reg) && idx < cc->_const_val._num[kind]); + + return cc->_const_val._next[kind][idx]; +} + +/** + * Put a constant value into the compilation context. + * + * @param cc compilation context + * @param kind register kind + * @param size size of the value + * @param val pointer to value which must be aligned + * + * @return a constant register containing the value + */ +static JitReg +_jit_cc_new_const(JitCompContext *cc, int kind, unsigned size, void *val) +{ + unsigned num = cc->_const_val._num[kind], slot; + unsigned capacity = cc->_const_val._capacity[kind]; + uint8 *new_value; + JitReg r, *new_next; + + bh_assert(num <= capacity); + + /* Find the existing value first. */ + slot = hash_of_const(kind, size, val) % cc->_const_val._hash_table_size; + r = cc->_const_val._hash_table[slot]; + + for (; r; r = next_of_const(cc, r)) + if (jit_reg_kind(r) == kind + && !memcmp(val, address_of_const(cc, r, size), size)) + return r; + + if (num == capacity) { + /* Increase the space of value and next. */ + capacity = capacity > 0 ? (capacity + capacity / 2) : 16; + new_value = _jit_realloc(cc->_const_val._value[kind], size * capacity, + size * num); + new_next = + _jit_realloc(cc->_const_val._next[kind], + sizeof(*new_next) * capacity, sizeof(*new_next) * num); + + if (new_value && new_next) { + cc->_const_val._value[kind] = new_value; + cc->_const_val._next[kind] = new_next; + } + else { + jit_free(new_value); + jit_free(new_next); + return 0; + } + + cc->_const_val._capacity[kind] = capacity; + } + + bh_assert(num + 1 < (uint32)_JIT_REG_CONST_IDX_FLAG); + r = jit_reg_new(kind, _JIT_REG_CONST_IDX_FLAG | num); + memcpy(cc->_const_val._value[kind] + size * num, val, size); + cc->_const_val._next[kind][num] = cc->_const_val._hash_table[slot]; + cc->_const_val._hash_table[slot] = r; + cc->_const_val._num[kind] = num + 1; + + return r; +} + +static inline int32 +get_const_val_in_reg(JitReg reg) +{ + int shift = 8 * sizeof(reg) - _JIT_REG_KIND_SHIFT + 1; + return ((int32)(reg << shift)) >> shift; +} + +#define _JIT_CC_NEW_CONST_HELPER(KIND, TYPE, val) \ + do { \ + JitReg reg = jit_reg_new( \ + JIT_REG_KIND_##KIND, \ + (_JIT_REG_CONST_VAL_FLAG | ((JitReg)val & ~_JIT_REG_KIND_MASK))); \ + \ + if ((TYPE)get_const_val_in_reg(reg) == val) \ + return reg; \ + return _jit_cc_new_const(cc, JIT_REG_KIND_##KIND, sizeof(val), &val); \ + } while (0) + +JitReg +jit_cc_new_const_I32_rel(JitCompContext *cc, int32 val, uint32 rel) +{ + uint64 val64 = (uint64)(uint32)val | ((uint64)rel << 32); + _JIT_CC_NEW_CONST_HELPER(I32, uint64, val64); +} + +JitReg +jit_cc_new_const_I64(JitCompContext *cc, int64 val) +{ + _JIT_CC_NEW_CONST_HELPER(I64, int64, val); +} + +JitReg +jit_cc_new_const_F32(JitCompContext *cc, float val) +{ + int32 float_neg_zero = 0x80000000; + + if (!memcmp(&val, &float_neg_zero, sizeof(float))) + /* Create const -0.0f */ + return _jit_cc_new_const(cc, JIT_REG_KIND_F32, sizeof(float), &val); + + _JIT_CC_NEW_CONST_HELPER(F32, float, val); +} + +JitReg +jit_cc_new_const_F64(JitCompContext *cc, double val) +{ + int64 double_neg_zero = 0x8000000000000000ll; + + if (!memcmp(&val, &double_neg_zero, sizeof(double))) + /* Create const -0.0d */ + return _jit_cc_new_const(cc, JIT_REG_KIND_F64, sizeof(double), &val); + + _JIT_CC_NEW_CONST_HELPER(F64, double, val); +} + +#undef _JIT_CC_NEW_CONST_HELPER + +#define _JIT_CC_GET_CONST_HELPER(KIND, TYPE) \ + do { \ + bh_assert(jit_reg_kind(reg) == JIT_REG_KIND_##KIND); \ + bh_assert(jit_reg_is_const(reg)); \ + \ + return (jit_reg_is_const_val(reg) \ + ? (TYPE)get_const_val_in_reg(reg) \ + : *(TYPE *)(address_of_const(cc, reg, sizeof(TYPE)))); \ + } while (0) + +static uint64 +jit_cc_get_const_I32_helper(JitCompContext *cc, JitReg reg) +{ + _JIT_CC_GET_CONST_HELPER(I32, uint64); +} + +uint32 +jit_cc_get_const_I32_rel(JitCompContext *cc, JitReg reg) +{ + return (uint32)(jit_cc_get_const_I32_helper(cc, reg) >> 32); +} + +int32 +jit_cc_get_const_I32(JitCompContext *cc, JitReg reg) +{ + return (int32)(jit_cc_get_const_I32_helper(cc, reg)); +} + +int64 +jit_cc_get_const_I64(JitCompContext *cc, JitReg reg) +{ + _JIT_CC_GET_CONST_HELPER(I64, int64); +} + +float +jit_cc_get_const_F32(JitCompContext *cc, JitReg reg) +{ + _JIT_CC_GET_CONST_HELPER(F32, float); +} + +double +jit_cc_get_const_F64(JitCompContext *cc, JitReg reg) +{ + _JIT_CC_GET_CONST_HELPER(F64, double); +} + +#undef _JIT_CC_GET_CONST_HELPER + +#define _JIT_REALLOC_ANN(TYPE, NAME, ANN, POSTFIX) \ + if (successful && cc->_ann._##ANN##_##NAME##_enabled) { \ + TYPE *ptr = _jit_realloc(cc->_ann._##ANN##_##NAME POSTFIX, \ + sizeof(TYPE) * capacity, sizeof(TYPE) * num); \ + if (ptr) \ + cc->_ann._##ANN##_##NAME POSTFIX = ptr; \ + else \ + successful = false; \ + } + +JitReg +jit_cc_new_label(JitCompContext *cc) +{ + unsigned num = cc->_ann._label_num; + unsigned capacity = cc->_ann._label_capacity; + bool successful = true; + + bh_assert(num <= capacity); + + if (num == capacity) { + capacity = capacity > 0 ? (capacity + capacity / 2) : 16; + +#define EMPTY_POSTFIX +#define ANN_LABEL(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, label, EMPTY_POSTFIX) +#include "jit_ir.def" +#undef ANN_LABEL +#undef EMPTY_POSTFIX + + if (!successful) + return 0; + + cc->_ann._label_capacity = capacity; + } + + cc->_ann._label_num = num + 1; + + return jit_reg_new(JIT_REG_KIND_L32, num); +} + +JitBasicBlock * +jit_cc_new_basic_block(JitCompContext *cc, int n) +{ + JitReg label = jit_cc_new_label(cc); + JitBasicBlock *block = NULL; + + if (label && (block = jit_basic_block_new(label, n))) + /* Void 0 register indicates error in creation. */ + *(jit_annl_basic_block(cc, label)) = block; + + return block; +} + +JitBasicBlock * +jit_cc_resize_basic_block(JitCompContext *cc, JitBasicBlock *block, int n) +{ + JitReg label = jit_basic_block_label(block); + JitInsn *insn = jit_basic_block_first_insn(block); + JitBasicBlock *new_block = jit_basic_block_new(label, n); + + if (!new_block) + return NULL; + + jit_insn_unlink(block); + + if (insn != block) + jit_insn_insert_before(insn, new_block); + + bh_assert(*(jit_annl_basic_block(cc, label)) == block); + *(jit_annl_basic_block(cc, label)) = new_block; + jit_insn_delete(block); + + return new_block; +} + +bool +jit_cc_enable_insn_hash(JitCompContext *cc, unsigned n) +{ + if (jit_anni_is_enabled__hash_link(cc)) + return true; + + if (!jit_anni_enable__hash_link(cc)) + return false; + + /* The table must not exist. */ + bh_assert(!cc->_insn_hash_table._table); + + /* Integer overflow cannot happen because n << 4G (at most several + times of 64K in the most extreme case). */ + if (!(cc->_insn_hash_table._table = + jit_calloc(n * sizeof(*cc->_insn_hash_table._table)))) { + jit_anni_disable__hash_link(cc); + return false; + } + + cc->_insn_hash_table._size = n; + return true; +} + +void +jit_cc_disable_insn_hash(JitCompContext *cc) +{ + jit_anni_disable__hash_link(cc); + jit_free(cc->_insn_hash_table._table); + cc->_insn_hash_table._table = NULL; + cc->_insn_hash_table._size = 0; +} + +void +jit_cc_reset_insn_hash(JitCompContext *cc) +{ + if (jit_anni_is_enabled__hash_link(cc)) + memset(cc->_insn_hash_table._table, 0, + cc->_insn_hash_table._size + * sizeof(*cc->_insn_hash_table._table)); +} + +JitInsn * +jit_cc_set_insn_uid(JitCompContext *cc, JitInsn *insn) +{ + if (insn) { + unsigned num = cc->_ann._insn_num; + unsigned capacity = cc->_ann._insn_capacity; + bool successful = true; + + bh_assert(num <= capacity); + + if (num == capacity) { + capacity = capacity > 0 ? (capacity + capacity / 2) : 64; + +#define EMPTY_POSTFIX +#define ANN_INSN(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, insn, EMPTY_POSTFIX) +#include "jit_ir.def" +#undef ANN_INSN +#undef EMPTY_POSTFIX + + if (!successful) + return NULL; + + cc->_ann._insn_capacity = capacity; + } + + cc->_ann._insn_num = num + 1; + insn->uid = num; + } + + return insn; +} + +JitInsn * +_jit_cc_set_insn_uid_for_new_insn(JitCompContext *cc, JitInsn *insn) +{ + if (jit_cc_set_insn_uid(cc, insn)) + return insn; + + jit_insn_delete(insn); + return NULL; +} + +static JitReg +normalize_insn(JitCompContext *cc, JitInsn **pinsn) +{ +#if 0 + JitInsn *insn = *pinsn; + JitReg opnd1; + + /* TODO: impelement the rest part. See gen_array_addr. */ + + switch (insn->opcode) { + case JIT_OP_I32TOI1: + opnd1 = *(jit_insn_opnd (insn, 1)); + if (jit_reg_is_const (opnd1)) + return jit_cc_new_const_I32 (cc, (int8)jit_cc_get_const_I32 (cc, opnd1)); + break; + + case JIT_OP_I32TOU1: + opnd1 = *(jit_insn_opnd (insn, 1)); + if (jit_reg_is_const (opnd1)) + return jit_cc_new_const_I32 (cc, (uint8)jit_cc_get_const_I32 (cc, opnd1)); + break; + + case JIT_OP_I32TOI2: + opnd1 = *(jit_insn_opnd (insn, 1)); + if (jit_reg_is_const (opnd1)) + return jit_cc_new_const_I32 (cc, (int16)jit_cc_get_const_I32 (cc, opnd1)); + break; + + case JIT_OP_I32TOU2: + opnd1 = *(jit_insn_opnd (insn, 1)); + if (jit_reg_is_const (opnd1)) + return jit_cc_new_const_I32 (cc, (uint16)jit_cc_get_const_I32 (cc, opnd1)); + break; + + case JIT_OP_I32TOI64: + break; + + case JIT_OP_U4TOI64: + break; + + case JIT_OP_I64TOI32: + break; + + case JIT_OP_NEG: + break; + + case JIT_OP_NOT: + break; + + case JIT_OP_ADD: + break; + + case JIT_OP_SUB: + break; + + case JIT_OP_MUL: + break; + + case JIT_OP_DIV: + break; + + case JIT_OP_REM: + break; + + case JIT_OP_SHL: + break; + + case JIT_OP_SHRS: + break; + + case JIT_OP_SHRU: + break; + + case JIT_OP_OR: + break; + + case JIT_OP_XOR: + break; + + case JIT_OP_AND: + break; + + case JIT_OP_CMP: + break; + } +#endif + + return 0; +} + +JitInsn * +_jit_cc_new_insn_norm(JitCompContext *cc, JitReg *result, JitInsn *insn) +{ + if (!insn) + /* Creation of insn failed (due to OOM). */ + { + *result = 0; + return NULL; + } + + /* Try to normalize the instruction first. */ + if ((*result = normalize_insn(cc, &insn))) + /* It's folded to a constant, don't add the insn to cc. */ + { + bh_assert(jit_reg_is_const(*result)); + jit_insn_delete(insn); + return NULL; + } + + /* Try to find the existing equivalent instructions. */ + if (jit_anni_is_enabled__hash_link(cc)) { + unsigned slot = jit_insn_hash(insn) % cc->_insn_hash_table._size; + JitInsn **entry = &cc->_insn_hash_table._table[slot]; + JitInsn *insn1; + + for (insn1 = *entry; insn1; insn1 = *(jit_anni__hash_link(cc, insn1))) + if (jit_insn_equal(insn, insn1)) + /* Found, don't add the insn to cc. */ + { + JitRegVec vec = jit_insn_opnd_regs(insn1); + *result = *(jit_reg_vec_at(&vec, 0)); + bh_assert(*result); + jit_insn_delete(insn); + return insn1; + } + + if ((insn = _jit_cc_set_insn_uid_for_new_insn(cc, insn))) + /* Add insn to the linked list of the hash table entry. */ + { + *(jit_anni__hash_link(cc, insn)) = *entry; + *entry = insn; + } + } + else + insn = _jit_cc_set_insn_uid_for_new_insn(cc, insn); + + return insn; +} + +JitInsn * +_gen_insn_norm_1(JitCompContext *cc, JitBasicBlock *block, unsigned kind, + JitReg *result, JitInsn *insn) +{ + if (!*result && insn) { + JitRegVec vec = jit_insn_opnd_regs(insn); + JitReg *lhs = jit_reg_vec_at(&vec, 0); + + if (!*lhs) + *lhs = jit_cc_new_reg(cc, kind); + + *result = *lhs; + jit_basic_block_append_insn(block, insn); + *(jit_annr_def_insn(cc, *lhs)) = insn; + + return insn; + } + + return NULL; +} + +JitReg +jit_cc_new_reg(JitCompContext *cc, unsigned kind) +{ + unsigned num = jit_cc_reg_num(cc, kind); + unsigned capacity = cc->_ann._reg_capacity[kind]; + bool successful = true; + + bh_assert(num <= capacity); + + if (num == capacity) { + capacity = (capacity == 0 + /* Initialize the capacity to be larger than hard + register number. */ + ? cc->hreg_info->info[kind].num + 16 + : capacity + capacity / 2); + +#define ANN_REG(TYPE, NAME) _JIT_REALLOC_ANN(TYPE, NAME, reg, [kind]) +#include "jit_ir.def" +#undef ANN_REG + + if (!successful) + return 0; + + cc->_ann._reg_capacity[kind] = capacity; + } + + cc->_ann._reg_num[kind] = num + 1; + + return jit_reg_new(kind, num); +} + +#undef _JIT_REALLOC_ANN + +#define ANN_LABEL(TYPE, NAME) \ + bool jit_annl_enable_##NAME(JitCompContext *cc) \ + { \ + if (cc->_ann._label_##NAME##_enabled) \ + return true; \ + \ + if (cc->_ann._label_capacity > 0 \ + && !(cc->_ann._label_##NAME = \ + jit_calloc(cc->_ann._label_capacity * sizeof(TYPE)))) \ + return false; \ + \ + cc->_ann._label_##NAME##_enabled = 1; \ + return true; \ + } +#define ANN_INSN(TYPE, NAME) \ + bool jit_anni_enable_##NAME(JitCompContext *cc) \ + { \ + if (cc->_ann._insn_##NAME##_enabled) \ + return true; \ + \ + if (cc->_ann._insn_capacity > 0 \ + && !(cc->_ann._insn_##NAME = \ + jit_calloc(cc->_ann._insn_capacity * sizeof(TYPE)))) \ + return false; \ + \ + cc->_ann._insn_##NAME##_enabled = 1; \ + return true; \ + } +#define ANN_REG(TYPE, NAME) \ + bool jit_annr_enable_##NAME(JitCompContext *cc) \ + { \ + unsigned k; \ + \ + if (cc->_ann._reg_##NAME##_enabled) \ + return true; \ + \ + for (k = JIT_REG_KIND_VOID; k < JIT_REG_KIND_L32; k++) \ + if (cc->_ann._reg_capacity[k] > 0 \ + && !(cc->_ann._reg_##NAME[k] = jit_calloc( \ + cc->_ann._reg_capacity[k] * sizeof(TYPE)))) { \ + jit_annr_disable_##NAME(cc); \ + return false; \ + } \ + \ + cc->_ann._reg_##NAME##_enabled = 1; \ + return true; \ + } +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG + +#define ANN_LABEL(TYPE, NAME) \ + void jit_annl_disable_##NAME(JitCompContext *cc) \ + { \ + jit_free(cc->_ann._label_##NAME); \ + cc->_ann._label_##NAME = NULL; \ + cc->_ann._label_##NAME##_enabled = 0; \ + } +#define ANN_INSN(TYPE, NAME) \ + void jit_anni_disable_##NAME(JitCompContext *cc) \ + { \ + jit_free(cc->_ann._insn_##NAME); \ + cc->_ann._insn_##NAME = NULL; \ + cc->_ann._insn_##NAME##_enabled = 0; \ + } +#define ANN_REG(TYPE, NAME) \ + void jit_annr_disable_##NAME(JitCompContext *cc) \ + { \ + unsigned k; \ + \ + for (k = JIT_REG_KIND_VOID; k < JIT_REG_KIND_L32; k++) { \ + jit_free(cc->_ann._reg_##NAME[k]); \ + cc->_ann._reg_##NAME[k] = NULL; \ + } \ + \ + cc->_ann._reg_##NAME##_enabled = 0; \ + } +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG + +char * +jit_get_last_error(JitCompContext *cc) +{ + return cc->last_error[0] == '\0' ? "" : cc->last_error; +} + +void +jit_set_last_error_v(JitCompContext *cc, const char *format, ...) +{ + va_list args; + va_start(args, format); + vsnprintf(cc->last_error, sizeof(cc->last_error), format, args); + va_end(args); +} + +void +jit_set_last_error(JitCompContext *cc, const char *error) +{ + if (error) + snprintf(cc->last_error, sizeof(cc->last_error), "Error: %s", error); + else + cc->last_error[0] = '\0'; +} + +bool +jit_cc_update_cfg(JitCompContext *cc) +{ + JitBasicBlock *block; + unsigned block_index, end, succ_index, idx; + JitReg *target; + bool retval = false; + + if (!jit_annl_enable_pred_num(cc)) + return false; + + /* Update pred_num of all blocks. */ + JIT_FOREACH_BLOCK_ENTRY_EXIT(cc, block_index, end, block) + { + JitRegVec succs = jit_basic_block_succs(block); + + JIT_REG_VEC_FOREACH(succs, succ_index, target) + if (jit_reg_is_kind(L32, *target)) + *(jit_annl_pred_num(cc, *target)) += 1; + } + + /* Resize predecessor vectors of body blocks. */ + JIT_FOREACH_BLOCK(cc, block_index, end, block) + { + if (!jit_cc_resize_basic_block( + cc, block, + *(jit_annl_pred_num(cc, jit_basic_block_label(block))))) + goto cleanup_and_return; + } + + /* Fill in predecessor vectors all blocks. */ + JIT_FOREACH_BLOCK_REVERSE_ENTRY_EXIT(cc, block_index, block) + { + JitRegVec succs = jit_basic_block_succs(block), preds; + + JIT_REG_VEC_FOREACH(succs, succ_index, target) + if (jit_reg_is_kind(L32, *target)) { + preds = jit_basic_block_preds(*(jit_annl_basic_block(cc, *target))); + bh_assert(*(jit_annl_pred_num(cc, *target)) > 0); + idx = *(jit_annl_pred_num(cc, *target)) - 1; + *(jit_annl_pred_num(cc, *target)) = idx; + *(jit_reg_vec_at(&preds, idx)) = jit_basic_block_label(block); + } + } + + retval = true; + +cleanup_and_return: + jit_annl_disable_pred_num(cc); + return retval; +} + +void +jit_value_stack_push(JitValueStack *stack, JitValue *value) +{ + if (!stack->value_list_head) + stack->value_list_head = stack->value_list_end = value; + else { + stack->value_list_end->next = value; + value->prev = stack->value_list_end; + stack->value_list_end = value; + } +} + +JitValue * +jit_value_stack_pop(JitValueStack *stack) +{ + JitValue *value = stack->value_list_end; + + bh_assert(stack->value_list_end); + + if (stack->value_list_head == stack->value_list_end) + stack->value_list_head = stack->value_list_end = NULL; + else { + stack->value_list_end = stack->value_list_end->prev; + stack->value_list_end->next = NULL; + value->prev = NULL; + } + + return value; +} + +void +jit_value_stack_destroy(JitValueStack *stack) +{ + JitValue *value = stack->value_list_head, *p; + + while (value) { + p = value->next; + jit_free(value); + value = p; + } +} + +void +jit_block_stack_push(JitBlockStack *stack, JitBlock *block) +{ + if (!stack->block_list_head) + stack->block_list_head = stack->block_list_end = block; + else { + stack->block_list_end->next = block; + block->prev = stack->block_list_end; + stack->block_list_end = block; + } +} + +JitBlock * +jit_block_stack_pop(JitBlockStack *stack) +{ + JitBlock *block = stack->block_list_end; + + bh_assert(stack->block_list_end); + + if (stack->block_list_head == stack->block_list_end) + stack->block_list_head = stack->block_list_end = NULL; + else { + stack->block_list_end = stack->block_list_end->prev; + stack->block_list_end->next = NULL; + block->prev = NULL; + } + + return block; +} + +void +jit_block_stack_destroy(JitBlockStack *stack) +{ + JitBlock *block = stack->block_list_head, *p; + + while (block) { + p = block->next; + jit_value_stack_destroy(&block->value_stack); + jit_block_destroy(block); + block = p; + } +} + +bool +jit_block_add_incoming_insn(JitBlock *block, JitInsn *insn) +{ + JitIncomingInsn *incoming_insn; + + if (!(incoming_insn = jit_calloc((uint32)sizeof(JitIncomingInsn)))) + return false; + + incoming_insn->insn = insn; + incoming_insn->next = block->incoming_insns_for_end_bb; + block->incoming_insns_for_end_bb = incoming_insn; + return true; +} + +void +jit_block_destroy(JitBlock *block) +{ + JitIncomingInsn *incoming_insn, *incoming_insn_next; + + jit_value_stack_destroy(&block->value_stack); + if (block->param_types) + jit_free(block->param_types); + if (block->result_types) + jit_free(block->result_types); + + incoming_insn = block->incoming_insns_for_end_bb; + while (incoming_insn) { + incoming_insn_next = incoming_insn->next; + jit_free(incoming_insn); + incoming_insn = incoming_insn_next; + } + + jit_free(block); +} + +static inline uint8 +to_stack_value_type(uint8 type) +{ +#if WASM_ENABLE_REF_TYPES != 0 + if (type == VALUE_TYPE_EXTERNREF || type == VALUE_TYPE_FUNCREF) + return VALUE_TYPE_I32; +#endif + return type; +} + +bool +jit_cc_pop_value(JitCompContext *cc, uint8 type, JitReg *p_value) +{ + JitValue *jit_value; + JitReg value; + + if (!cc->block_stack.block_list_end) { + jit_set_last_error(cc, "WASM block stack underflow"); + return false; + } + if (!cc->block_stack.block_list_end->value_stack.value_list_end) { + jit_set_last_error(cc, "WASM data stack underflow"); + return false; + } + + jit_value = + jit_value_stack_pop(&cc->block_stack.block_list_end->value_stack); + bh_assert(jit_value); + + if (jit_value->type != to_stack_value_type(type)) { + jit_set_last_error(cc, "invalid WASM stack data type"); + jit_free(jit_value); + return false; + } + + switch (jit_value->type) { + case VALUE_TYPE_I32: + value = pop_i32(cc->jit_frame); + break; + case VALUE_TYPE_I64: + value = pop_i64(cc->jit_frame); + break; + case VALUE_TYPE_F32: + value = pop_f32(cc->jit_frame); + break; + case VALUE_TYPE_F64: + value = pop_f64(cc->jit_frame); + break; + default: + bh_assert(0); + break; + } + + bh_assert(value = jit_value->value); + *p_value = value; + jit_free(jit_value); + return true; +} + +bool +jit_cc_push_value(JitCompContext *cc, uint8 type, JitReg value) +{ + JitValue *jit_value; + + if (!cc->block_stack.block_list_end) { + jit_set_last_error(cc, "WASM block stack underflow"); + return false; + } + + if (!(jit_value = jit_calloc(sizeof(JitValue)))) { + jit_set_last_error(cc, "allocate memory failed"); + return false; + } + + jit_value->type = to_stack_value_type(type); + jit_value->value = value; + jit_value_stack_push(&cc->block_stack.block_list_end->value_stack, + jit_value); + + switch (jit_value->type) { + case VALUE_TYPE_I32: + push_i32(cc->jit_frame, value); + break; + case VALUE_TYPE_I64: + push_i64(cc->jit_frame, value); + break; + case VALUE_TYPE_F32: + push_f32(cc->jit_frame, value); + break; + case VALUE_TYPE_F64: + push_f64(cc->jit_frame, value); + break; + } + + return true; +} + +bool +_jit_insn_check_opnd_access_Reg(const JitInsn *insn, unsigned n) +{ + unsigned opcode = insn->opcode; + return (insn_opnd_kind[opcode] == JIT_OPND_KIND_Reg + && n < insn_opnd_num[opcode]); +} + +bool +_jit_insn_check_opnd_access_VReg(const JitInsn *insn, unsigned n) +{ + unsigned opcode = insn->opcode; + return (insn_opnd_kind[opcode] == JIT_OPND_KIND_VReg + && n < insn->_opnd._opnd_VReg._reg_num); +} + +bool +_jit_insn_check_opnd_access_TableSwitch(const JitInsn *insn) +{ + unsigned opcode = insn->opcode; + return (insn_opnd_kind[opcode] == JIT_OPND_KIND_TableSwitch); +} + +bool +_jit_insn_check_opnd_access_LookupSwitch(const JitInsn *insn) +{ + unsigned opcode = insn->opcode; + return (insn_opnd_kind[opcode] == JIT_OPND_KIND_LookupSwitch); +} diff --git a/core/iwasm/fast-jit/jit_ir.def b/core/iwasm/fast-jit/jit_ir.def new file mode 100644 index 000000000..8afffbbf7 --- /dev/null +++ b/core/iwasm/fast-jit/jit_ir.def @@ -0,0 +1,457 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +/** + * @file jit-ir.def + * + * @brief Definition of JIT IR instructions and annotations. + */ + +/** + * @def INSN (NAME, OPND_KIND, OPND_NUM, FIRST_USE) + * + * Definition of IR instructions + * + * @param NAME name of the opcode + * @param OPND_KIND kind of the operand(s) + * @param OPND_NUM number of the operand(s) + * @param FIRST_USE index of the first use register + * + * @p OPND_KIND and @p OPND_NUM together determine the format of an + * instruction. There are four kinds of formats: + * + * 1) Reg: fixed-number register operands, @p OPND_NUM specifies the + * number of operands; + * + * 2) VReg: variable-number register operands, @p OPND_NUM specifies + * the number of fixed register operands; + * + * 3) TableSwitch: tableswitch instruction's format, @p OPND_NUM must + * be 1; + * + * 4) LookupSwitch: lookupswitch instruction's format, @p OPND_NUM + * must be 1. + * + * Instruction operands are all registers and they are organized in an + * order that all registers defined by the instruction, if any, appear + * before the registers used by the instruction. The @p FIRST_USE is + * the index of the first use register in the register vector sorted + * in this order. Use @c jit_insn_opnd_regs to get the register + * vector in this order and use @c jit_insn_opnd_first_use to get the + * index of the first use register. + * + * Every instruction with name @p NAME has the following definitions: + * + * @c JEFF_OP_NAME: the enum opcode of insn NAME + * @c jit_insn_new_NAME (...): creates a new instance of insn NAME + * + * An instruction is deleted by function: + * + * @c jit_insn_delete (@p insn) + * + * In the scope of this IR's terminology, operand and argument have + * different meanings. The operand is a general notation, which + * denotes every raw operand of an instruction, while the argument + * only denotes the variable part of operands of instructions of VReg + * kind. For example, a VReg instruction phi node "r0 = phi(r1, r2)" + * has three operands opnd[0]: r0, opnd[1]: r1 and opnd[2]: r2, but + * only two arguments arg[0]: r1 and arg[1]: r2. Operands or + * arguments of instructions with various formats can be access + * through the following APIs: + * + * @c jit_insn_opnd (@p insn, @p n): for Reg_N formats + * @c jit_insn_opndv (@p insn, @p n): for VReg_N formats + * @c jit_insn_opndv_num (@p insn): for VReg_N formats + * @c jit_insn_opndts (@p insn): for TableSwitch_1 format + * @c jit_insn_opndls (@p insn): for LookupSwitch_1 format + */ + +#ifndef INSN +#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) +#endif + +/* Comparison instructions */ +INSN(I32_EQZ, Reg, 3, 1) +INSN(I32_EQ, Reg, 3, 1) +INSN(I32_NE, Reg, 3, 1) +INSN(I32_LT_S, Reg, 3, 1) +INSN(I32_LT_U, Reg, 3, 1) +INSN(I32_GT_S, Reg, 3, 1) +INSN(I32_GT_U, Reg, 3, 1) +INSN(I32_LE_S, Reg, 3, 1) +INSN(I32_LE_U, Reg, 3, 1) +INSN(I32_GE_S, Reg, 3, 1) +INSN(I32_GE_U, Reg, 3, 1) + +INSN(I64_EQZ, Reg, 3, 1) +INSN(I64_EQ, Reg, 3, 1) +INSN(I64_NE, Reg, 3, 1) +INSN(I64_LT_S, Reg, 3, 1) +INSN(I64_LT_U, Reg, 3, 1) +INSN(I64_GT_S, Reg, 3, 1) +INSN(I64_GT_U, Reg, 3, 1) +INSN(I64_LE_S, Reg, 3, 1) +INSN(I64_LE_U, Reg, 3, 1) +INSN(I64_GE_S, Reg, 3, 1) +INSN(I64_GE_U, Reg, 3, 1) + +INSN(F32_EQ, Reg, 3, 1) +INSN(F32_NE, Reg, 3, 1) +INSN(F32_LT, Reg, 3, 1) +INSN(F32_GT, Reg, 3, 1) +INSN(F32_LE, Reg, 3, 1) +INSN(F32_GE, Reg, 3, 1) + +INSN(F64_EQ, Reg, 3, 1) +INSN(F64_NE, Reg, 3, 1) +INSN(F64_LT, Reg, 3, 1) +INSN(F64_GT, Reg, 3, 1) +INSN(F64_LE, Reg, 3, 1) +INSN(F64_GE, Reg, 3, 1) + +/* Select instruction */ +INSN(SELECT, Reg, 4, 1) + +/* Control instructions */ +INSN(JMP, Reg, 1, 0) +INSN(BEQ, Reg, 3, 0) +INSN(BNE, Reg, 3, 0) +INSN(BGTS, Reg, 3, 0) +INSN(BGES, Reg, 3, 0) +INSN(BLTS, Reg, 3, 0) +INSN(BLES, Reg, 3, 0) +INSN(BGTU, Reg, 3, 0) +INSN(BGEU, Reg, 3, 0) +INSN(BLTU, Reg, 3, 0) +INSN(BLEU, Reg, 3, 0) +INSN(TABLE_SWITCH, TableSwitch, 1, 0) +INSN(LOOKUP_SWITCH, LookupSwitch, 1, 0) + +/* check zero divisor */ +INSN(CHECK_DIV_ZERO, Reg, 3, 0) +/* check stack overflow */ +INSN(CHECK_SOE, Reg, 3, 0) + +/* Call and return instructions */ +INSN(CALLNATIVE, VReg, 2, 1) +INSN(CALLBC, Reg, 3, 0) +INSN(RETURN, Reg, 1, 0) + +/* Move and conversion instructions that transfer values among + registers of the same kind (move) or different kinds (convert) */ +INSN(MOV, Reg, 2, 1) +INSN(PHI, VReg, 1, 1) + +INSN(I32TOI8, Reg, 2, 1) +INSN(I32TOU8, Reg, 2, 1) +INSN(I32TOI16, Reg, 2, 1) +INSN(I32TOU16, Reg, 2, 1) +INSN(I32TOI64, Reg, 2, 1) +INSN(I32TOF32, Reg, 2, 1) +INSN(I32TOF64, Reg, 2, 1) +INSN(U32TOI64, Reg, 2, 1) +INSN(U32TOF32, Reg, 2, 1) +INSN(U32TOF64, Reg, 2, 1) +INSN(I64TOI32, Reg, 2, 1) +INSN(I64TOF32, Reg, 2, 1) +INSN(I64TOF64, Reg, 2, 1) +INSN(F32TOI32, Reg, 2, 1) +INSN(F32TOI64, Reg, 2, 1) +INSN(F32TOF64, Reg, 2, 1) +INSN(F64TOI32, Reg, 2, 1) +INSN(F64TOI64, Reg, 2, 1) +INSN(F64TOF32, Reg, 2, 1) + +/* Arithmetic and bitwise instructions: */ +INSN(NEG, Reg, 2, 1) +INSN(NOT, Reg, 2, 1) +INSN(ADD, Reg, 3, 1) +INSN(SUB, Reg, 3, 1) +INSN(MUL, Reg, 3, 1) +INSN(DIV, Reg, 3, 1) +INSN(REM, Reg, 3, 1) +INSN(SHL, Reg, 3, 1) +INSN(SHRS, Reg, 3, 1) +INSN(SHRU, Reg, 3, 1) +INSN(OR, Reg, 3, 1) +INSN(XOR, Reg, 3, 1) +INSN(AND, Reg, 3, 1) +INSN(CMP, Reg, 3, 1) + +/* Select instruction: */ +INSN(SELECTEQ, Reg, 4, 1) +INSN(SELECTNE, Reg, 4, 1) +INSN(SELECTGTS, Reg, 4, 1) +INSN(SELECTGES, Reg, 4, 1) +INSN(SELECTLTS, Reg, 4, 1) +INSN(SELECTLES, Reg, 4, 1) +INSN(SELECTGTU, Reg, 4, 1) +INSN(SELECTGEU, Reg, 4, 1) +INSN(SELECTLTU, Reg, 4, 1) +INSN(SELECTLEU, Reg, 4, 1) + +/* Memory access instructions: */ +INSN(LDSELF, Reg, 1, 1) +INSN(LDJITINFO, Reg, 1, 1) +INSN(LDI8, Reg, 3, 1) +INSN(LDU8, Reg, 3, 1) +INSN(LDI16, Reg, 3, 1) +INSN(LDU16, Reg, 3, 1) +INSN(LDI32, Reg, 3, 1) +INSN(LDU32, Reg, 3, 1) +INSN(LDI64, Reg, 3, 1) +INSN(LDU64, Reg, 3, 1) +INSN(LDF32, Reg, 3, 1) +INSN(LDF64, Reg, 3, 1) +INSN(LDV64, Reg, 3, 1) +INSN(LDV128, Reg, 3, 1) +INSN(LDV256, Reg, 3, 1) +INSN(STI8, Reg, 3, 0) +INSN(STI16, Reg, 3, 0) +INSN(STI32, Reg, 3, 0) +INSN(STI64, Reg, 3, 0) +INSN(STF32, Reg, 3, 0) +INSN(STF64, Reg, 3, 0) +INSN(STV64, Reg, 3, 1) +INSN(STV128, Reg, 3, 1) +INSN(STV256, Reg, 3, 1) + +#if 0 +/* Memory instructions */ +INSN(I32_LOAD, Reg, 2, 1) +INSN(I64_LOAD, Reg, 2, 1) +INSN(F32_LOAD, Reg, 2, 1) +INSN(F64_LOAD, Reg, 2, 1) +INSN(I32_LOAD8_S, Reg, 2, 1) +INSN(I32_LOAD8_U, Reg, 2, 1) +INSN(I32_LOAD16_S, Reg, 2, 1) +INSN(I32_LOAD16_U, Reg, 2, 1) +INSN(I64_LOAD8_S, Reg, 2, 1) +INSN(I64_LOAD8_U, Reg, 2, 1) +INSN(I64_LOAD16_S, Reg, 2, 1) +INSN(I64_LOAD16_U, Reg, 2, 1) +INSN(I64_LOAD32_S, Reg, 2, 1) +INSN(I64_LOAD32_U, Reg, 2, 1) +INSN(I32_STORE, Reg, 2, 0) +INSN(I64_STORE, Reg, 2, 0) +INSN(F32_STORE, Reg, 2, 0) +INSN(F64_STORE, Reg, 2, 0) +INSN(I32_STORE8, Reg, 2, 0) +INSN(I32_STORE16, Reg, 2, 0) +INSN(I64_STORE8, Reg, 2, 0) +INSN(I64_STORE16, Reg, 2, 0) +INSN(I64_STORE32, Reg, 2, 0) + +/* Numeric operators */ +INSN(I32_CLZ, Reg, 2, 1) +INSN(I32_CTZ, Reg, 2, 1) +INSN(I32_POPCNT, Reg, 2, 1) +INSN(I32_ADD, Reg, 3, 1) +INSN(I32_SUB, Reg, 3, 1) +INSN(I32_MUL, Reg, 3, 1) +INSN(I32_DIV_S, Reg, 3, 1) +INSN(I32_DIV_U, Reg, 3, 1) +INSN(I32_REM_S, Reg, 3, 1) +INSN(I32_REM_U, Reg, 3, 1) +INSN(I32_AND, Reg, 3, 1) +INSN(I32_OR, Reg, 3, 1) +INSN(I32_XOR, Reg, 3, 1) +INSN(I32_SHL, Reg, 3, 1) +INSN(I32_SHR_S, Reg, 3, 1) +INSN(I32_SHR_U, Reg, 3, 1) +INSN(I32_ROTL, Reg, 3, 1) +INSN(I32_ROTR, Reg, 3, 1) + +INSN(I64_CLZ, Reg, 2, 1) +INSN(I64_CTZ, Reg, 2, 1) +INSN(I64_POPCNT, Reg, 2, 1) +INSN(I64_ADD, Reg, 3, 1) +INSN(I64_SUB, Reg, 3, 1) +INSN(I64_MUL, Reg, 3, 1) +INSN(I64_DIV_S, Reg, 3, 1) +INSN(I64_DIV_U, Reg, 3, 1) +INSN(I64_REM_S, Reg, 3, 1) +INSN(I64_REM_U, Reg, 3, 1) +INSN(I64_AND, Reg, 3, 1) +INSN(I64_OR, Reg, 3, 1) +INSN(I64_XOR, Reg, 3, 1) +INSN(I64_SHL, Reg, 3, 1) +INSN(I64_SHR_S, Reg, 3, 1) +INSN(I64_SHR_U, Reg, 3, 1) +INSN(I64_ROTL, Reg, 3, 1) +INSN(I64_ROTR, Reg, 3, 1) + +INSN(F32_ABS, Reg, 2, 1) +INSN(F32_NEG, Reg, 2, 1) +INSN(F32_CEIL, Reg, 2, 1) +INSN(F32_FLOOR, Reg, 2, 1) +INSN(F32_TRUNC, Reg, 2, 1) +INSN(F32_NEAREST, Reg, 2, 1) +INSN(F32_SQRT, Reg, 2, 1) +INSN(F32_ADD, Reg, 3, 1) +INSN(F32_SUB, Reg, 3, 1) +INSN(F32_MUL, Reg, 3, 1) +INSN(F32_DIV, Reg, 3, 1) +INSN(F32_MIN, Reg, 3, 1) +INSN(F32_MAX, Reg, 3, 1) +INSN(F32_COPYSIGN, Reg, 3, 1) + +INSN(F64_ABS, Reg, 2, 1) +INSN(F64_NEG, Reg, 2, 1) +INSN(F64_CEIL, Reg, 2, 1) +INSN(F64_FLOOR, Reg, 2, 1) +INSN(F64_TRUNC, Reg, 2, 1) +INSN(F64_NEAREST, Reg, 2, 1) +INSN(F64_SQRT, Reg, 2, 1) +INSN(F64_ADD, Reg, 3, 1) +INSN(F64_SUB, Reg, 3, 1) +INSN(F64_MUL, Reg, 3, 1) +INSN(F64_DIV, Reg, 3, 1) +INSN(F64_MIN, Reg, 3, 1) +INSN(F64_MAX, Reg, 3, 1) +INSN(F64_COPYSIGN, Reg, 3, 1) + +/* Convert instructions */ +INSN(I32_WRAP_I64, Reg, 2, 1) +INSN(I32_TRUNC_S_F32, Reg, 2, 1) +INSN(I32_TRUNC_U_F32, Reg, 2, 1) +INSN(I32_TRUNC_S_F64, Reg, 2, 1) +INSN(I32_TRUNC_U_F64, Reg, 2, 1) + +INSN(I64_EXTEND_S_I32, Reg, 2, 1) +INSN(I64_EXTEND_U_I32, Reg, 2, 1) +INSN(I64_TRUNC_S_F32, Reg, 2, 1) +INSN(I64_TRUNC_U_F32, Reg, 2, 1) +INSN(I64_TRUNC_S_F64, Reg, 2, 1) +INSN(I64_TRUNC_U_F64, Reg, 2, 1) + +INSN(F32_CONVERT_S_I32, Reg, 2, 1) +INSN(F32_CONVERT_U_I32, Reg, 2, 1) +INSN(F32_CONVERT_S_I64, Reg, 2, 1) +INSN(F32_CONVERT_U_I64, Reg, 2, 1) +INSN(F32_DEMOTE_F64, Reg, 2, 1) + +INSN(F64_CONVERT_S_I32, Reg, 2, 1) +INSN(F64_CONVERT_U_I32, Reg, 2, 1) +INSN(F64_CONVERT_S_I64, Reg, 2, 1) +INSN(F64_CONVERT_U_I64, Reg, 2, 1) +INSN(F64_PROMOTE_F32, Reg, 2, 1) + +INSN(I32_EXTEND8_S, Reg, 2, 1) +INSN(I32_EXTEND16_S, Reg, 2, 1) +INSN(I64_EXTEND8_S, Reg, 2, 1) +INSN(I64_EXTEND16_S, Reg, 2, 1) +INSN(I64_EXTEND32_S, Reg, 2, 1) + +INSN(I32_TRUNC_SAT_S_F32, Reg, 2, 1) +INSN(I32_TRUNC_SAT_U_F32, Reg, 2, 1) +INSN(I32_TRUNC_SAT_S_F64, Reg, 2, 1) +INSN(I32_TRUNC_SAT_U_F64, Reg, 2, 1) +INSN(I64_TRUNC_SAT_S_F32, Reg, 2, 1) +INSN(I64_TRUNC_SAT_U_F32, Reg, 2, 1) +INSN(I64_TRUNC_SAT_S_F64, Reg, 2, 1) +INSN(I64_TRUNC_SAT_U_F64, Reg, 2, 1) +#endif + +#undef INSN + +/** + * @def ANN_LABEL (TYPE, NAME) + * + * Definition of label annotations. + * + * @param TYPE type of the annotation + * @param NAME name of the annotation + * + * Each defined annotation with name NAME has the following APIs: + * + * @c jit_annl_NAME (cc, label): accesses the annotation NAME of + * label @p label + * @c jit_annl_enable_NAME (cc): enables the annotation NAME + * @c jit_annl_disable_NAME (cc): disables the annotation NAME + * @c jit_annl_is_enabled_NAME (cc): check whether the annotation NAME + * is enabled + */ + +#ifndef ANN_LABEL +#define ANN_LABEL(TYPE, NAME) +#endif + +/* Basic Block of a label. */ +ANN_LABEL(JitBasicBlock *, basic_block) +/* Predecessor number of the block that is only used in + jit_cc_update_cfg for updating the CFG. */ +ANN_LABEL(uint16, pred_num) +/* Execution frequency of a block. We can split critical edges with + empty blocks so we don't need to store frequencies of edges. */ +ANN_LABEL(uint16, freq) +/* Begin bytecode instruction pointer of the block. */ +ANN_LABEL(uint8 *, begin_bcip) +/* End bytecode instruction pointer of the block. */ +ANN_LABEL(uint8 *, end_bcip) +/* Stack pointer offset at the end of the block. */ +ANN_LABEL(uint16, end_sp) +/* The label of the next physically adjacent block. */ +ANN_LABEL(JitReg, next_label) +/* Compiled code address of the block. */ +ANN_LABEL(void *, jitted_addr) + +#undef ANN_LABEL + +/** + * @def ANN_INSN (TYPE, NAME) + * + * Definition of instruction annotations. + * + * @param TYPE type of the annotation + * @param NAME name of the annotation + * + * Each defined annotation with name NAME has the following APIs: + * + * @c jit_anni_NAME (cc, insn): accesses the annotation NAME of + * instruction @p insn + * @c jit_anni_enable_NAME (cc): enables the annotation NAME + * @c jit_anni_disable_NAME (cc): disables the annotation NAME + * @c jit_anni_is_enabled_NAME (cc): check whether the annotation NAME + * is enabled + */ + +#ifndef ANN_INSN +#define ANN_INSN(TYPE, NAME) +#endif + +/* A private annotation for linking instructions with the same hash + value, which is only used by the compilation context's hash table + of instructions. */ +ANN_INSN(JitInsn *, _hash_link) + +#undef ANN_INSN + +/** + * @def ANN_REG (TYPE, NAME) + * + * Definition of register annotations. + * + * @param TYPE type of the annotation + * @param NAME name of the annotation + * + * Each defined annotation with name NAME has the following APIs: + * + * @c jit_annr_NAME (cc, reg): accesses the annotation NAME of + * register @p reg + * @c jit_annr_enable_NAME (cc): enables the annotation NAME + * @c jit_annr_disable_NAME (cc): disables the annotation NAME + * @c jit_annr_is_enabled_NAME (cc): check whether the annotation NAME + * is enabled + */ + +#ifndef ANN_REG +#define ANN_REG(TYPE, NAME) +#endif + +/* Defining instruction of registers satisfying SSA property. */ +ANN_REG(JitInsn *, def_insn) + +#undef ANN_REG diff --git a/core/iwasm/fast-jit/jit_ir.h b/core/iwasm/fast-jit/jit_ir.h new file mode 100644 index 000000000..111c67d22 --- /dev/null +++ b/core/iwasm/fast-jit/jit_ir.h @@ -0,0 +1,1840 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_IR_H_ +#define _JIT_IR_H_ + +#include "bh_platform.h" +#include "../interpreter/wasm.h" +#include "jit_utils.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * Register (operand) representation of JIT IR. + * + * Encoding: [4-bit: kind, 28-bit register no.] + * + * Registers in JIT IR are classified into different kinds according + * to types of values they can hold. The classification is based on + * most processors' hardware register classifications, which include + * various sets of integer, floating point and vector registers with + * different sizes. These registers can be mapped onto corresponding + * kinds of hardware registers by register allocator. Instructions + * can only operate on allowed kinds of registers. For example, an + * integer instruction cannot operate on floating point or vector + * registers. Some encodings of these kinds of registers also + * represent immediate constant values and indexes to constant tables + * (see below). In that case, those registers are read-only. Writing + * to them is illegal. Reading from an immediate constant value + * register always returns the constant value encoded in the register + * no. Reading from a constant table index register always returns + * the constant value stored at the encoded index of the constant + * table of the register's kind. Immediate constant values and values + * indexed by constant table indexes can only be loaded into the + * corresponding kinds of registers if they must be loaded into + * registers. Besides these common kinds of registers, labels of + * basic blocks are also treated as registers of a special kind, which + * hold code addresses of basic block labels and are read-only. Each + * basic block is assigned one unique label register. With this + * unification, we can use the same set of load instructions to load + * values either from addresses stored in normal registers or from + * addresses of labels. Besides these register kinds, the void kind + * is a special kind of registers to denote some error occurs when a + * normal register is expected. Or it can be used as result operand + * of call and invoke instructions to denote no return values. The + * variable registers are classified into two sets: the hard registers + * whose register numbers are less than the hard register numbers of + * their kinds and the virtual registers whose register numbers are + * greater than or equal to the hard register numbers. Before + * register allocation is done, hard registers may appear in the IR + * due to special usages of passes frontend (e.g. fp_reg and exec_env_reg) + * or lower_cg. In the mean time (including during register + * allocation), those hard registers are treated same as virtual + * registers except that they may not be SSA and they can only be + * allocated to the hard registers of themselves. + * + * Classification of registers: + * + void register (kind == JIT_REG_KIND_VOID, no. must be 0) + * + label registers (kind == JIT_REG_KIND_L32) + * + value registers (kind == JIT_REG_KIND_I32/I64/F32/F64/V64/V128/V256) + * | + constants (_JIT_REG_CONST_VAL_FLAG | _JIT_REG_CONST_IDX_FLAG) + * | | + constant values (_JIT_REG_CONST_VAL_FLAG) + * | | + constant indexes (_JIT_REG_CONST_IDX_FLAG) + * | + variables (!(_JIT_REG_CONST_VAL_FLAG | _JIT_REG_CONST_IDX_FLAG)) + * | | + hard registers (no. < hard register number) + * | | + virtual registers (no. >= hard register number) + */ +typedef uint32 JitReg; + +/* + * Mask and shift bits of register kind. + */ +#define _JIT_REG_KIND_MASK 0xf0000000 +#define _JIT_REG_KIND_SHIFT 28 + +/* + * Mask of register no. which must be the least significant bits. + */ +#define _JIT_REG_NO_MASK (~_JIT_REG_KIND_MASK) + +/* + * Constant value flag (the most significant bit) of register + * no. field of integer, floating point and vector registers. If this + * flag is set in the register no., the rest bits of register + * no. represent a signed (27-bit) integer constant value of the + * corresponding type of the register and the register is read-only. + */ +#define _JIT_REG_CONST_VAL_FLAG ((_JIT_REG_NO_MASK >> 1) + 1) + +/* + * Constant index flag of non-constant-value (constant value flag is + * not set in register no. field) integer, floating point and vector + * regisers. If this flag is set, the rest bits of the register + * no. represent an index to the constant value table of the + * corresponding type of the register and the register is read-only. + */ +#define _JIT_REG_CONST_IDX_FLAG (_JIT_REG_CONST_VAL_FLAG >> 1) + +/** + * Register kinds. Don't change the order of the defined values. The + * L32 kind must be after all normal kinds (see _const_val and _reg_ann + * of JitCompContext). + */ +typedef enum JitRegKind { + JIT_REG_KIND_VOID = 0x00, /* void type */ + JIT_REG_KIND_I32 = 0x01, /* 32-bit signed or unsigned integer */ + JIT_REG_KIND_I64 = 0x02, /* 64-bit signed or unsigned integer */ + JIT_REG_KIND_F32 = 0x03, /* 32-bit floating point */ + JIT_REG_KIND_F64 = 0x04, /* 64-bit floating point */ + JIT_REG_KIND_V64 = 0x05, /* 64-bit vector */ + JIT_REG_KIND_V128 = 0x06, /* 128-bit vector */ + JIT_REG_KIND_V256 = 0x07, /* 256-bit vector */ + JIT_REG_KIND_L32 = 0x08, /* 32-bit label address */ + JIT_REG_KIND_NUM /* number of register kinds */ +} JitRegKind; + +/** + * Construct a new JIT IR register from the kind and no. + * + * @param reg_kind register kind + * @param reg_no register no. + * + * @return the new register with the given kind and no. + */ +static inline JitReg +jit_reg_new(unsigned reg_kind, unsigned reg_no) +{ + return (JitReg)((reg_kind << _JIT_REG_KIND_SHIFT) | reg_no); +} + +/** + * Get the register kind of the given register. + * + * @param r a JIT IR register + * + * @return the register kind of register r + */ +static inline int +jit_reg_kind(JitReg r) +{ + return (r & _JIT_REG_KIND_MASK) >> _JIT_REG_KIND_SHIFT; +} + +/** + * Get the register no. of the given JIT IR register. + * + * @param r a JIT IR register + * + * @return the register no. of register r + */ +static inline int +jit_reg_no(JitReg r) +{ + return r & _JIT_REG_NO_MASK; +} + +/** + * Check whether the given register is a normal value register. + * + * @param r a JIT IR register + * + * @return true iff the register is a normal value register + */ +static inline bool +jit_reg_is_value(JitReg r) +{ + unsigned kind = jit_reg_kind(r); + return kind > JIT_REG_KIND_VOID && kind < JIT_REG_KIND_L32; +} + +/** + * Check whether the given register is a constant value. + * + * @param r a JIT IR register + * + * @return true iff register r is a constant value + */ +static inline bool +jit_reg_is_const_val(JitReg r) +{ + return jit_reg_is_value(r) && (r & _JIT_REG_CONST_VAL_FLAG); +} + +/** + * Check whether the given register is a constant table index. + * + * @param r a JIT IR register + * + * @return true iff register r is a constant table index + */ +static inline bool +jit_reg_is_const_idx(JitReg r) +{ + return (jit_reg_is_value(r) && !jit_reg_is_const_val(r) + && (r & _JIT_REG_CONST_IDX_FLAG)); +} + +/** + * Check whether the given register is a constant. + * + * @param r a JIT IR register + * + * @return true iff register r is a constant + */ +static inline bool +jit_reg_is_const(JitReg r) +{ + return (jit_reg_is_value(r) + && (r & (_JIT_REG_CONST_VAL_FLAG | _JIT_REG_CONST_IDX_FLAG))); +} + +/** + * Check whether the given register is a normal variable register. + * + * @param r a JIT IR register + * + * @return true iff the register is a normal variable register + */ +static inline bool +jit_reg_is_variable(JitReg r) +{ + return (jit_reg_is_value(r) + && !(r & (_JIT_REG_CONST_VAL_FLAG | _JIT_REG_CONST_IDX_FLAG))); +} + +/** + * Test whether the register is the given kind. + * + * @param KIND register kind name + * @param R register + * + * @return true if the register is the given kind + */ +#define jit_reg_is_kind(KIND, R) (jit_reg_kind(R) == JIT_REG_KIND_##KIND) + +/** + * Construct a zero IR register with given the kind. + * + * @param kind the kind of the value + * + * @return a constant register of zero + */ +static inline JitReg +jit_reg_new_zero(unsigned kind) +{ + bh_assert(kind != JIT_REG_KIND_VOID && kind < JIT_REG_KIND_L32); + return jit_reg_new(kind, _JIT_REG_CONST_VAL_FLAG); +} + +/** + * Test whether the register is a zero constant value. + * + * @param reg an IR register + * + * @return true iff the register is a constant zero + */ +static inline JitReg +jit_reg_is_zero(JitReg reg) +{ + return (jit_reg_is_value(reg) + && jit_reg_no(reg) == _JIT_REG_CONST_VAL_FLAG); +} + +/** + * Operand of instructions with fixed-number register operand(s). + */ +typedef JitReg JitOpndReg; + +/** + * Operand of instructions with variable-number register operand(s). + */ +typedef struct JitOpndVReg { + uint32 _reg_num; + JitReg _reg[1]; +} JitOpndVReg; + +/** + * Operand of tableswitch instruction. + */ +typedef struct JitOpndTableSwitch { + int32 low_value; /* lowest value of the table */ + int32 high_value; /* highest value of the table */ + /* NOTE: distance between JitReg operands must be the same (see + jit_insn_opnd_regs). */ + JitReg value; /* the value to be compared */ + /* NOTE: offset between adjacent targets must be sizeof (targets[0]) + (see implementation of jit_block_succs), so the default_target + field must be here. */ + JitReg default_target; /* default target BB */ + JitReg targets[1]; /* jump target BBs */ +} JitOpndTableSwitch; + +/** + * Operand of lookupswitch instruction. + */ +typedef struct JitOpndLookupSwitch { + /* NOTE: distance between JitReg operands must be the same (see + jit_insn_opnd_regs). */ + JitReg value; /* the value to be compared */ + uint32 match_pairs_num; /* match pairs number */ + /* NOTE: offset between adjacent targets must be sizeof + (match_pairs[0]) (see implementation of jit_basic_block_succs), + so the default_target field must be here. */ + JitReg default_target; /* default target BB */ + struct { + int32 value; /* match value of the match pair */ + JitReg target; /* target BB of the match pair */ + } match_pairs[1]; /* match pairs of the instruction */ +} JitOpndLookupSwitch; + +/** + * Instruction of JIT IR. + */ +typedef struct JitInsn { + /* Pointers to the previous and next instructions. */ + struct JitInsn *prev; + struct JitInsn *next; + + /* Opcode of the instruction. */ + uint16 opcode; + + /* Reserved field that may be used by optimizations locally. */ + uint8 flags_u8; + + /* The unique ID of the instruction. */ + uint16 uid; + + /* Operands for different kinds of instructions. */ + union { + /* For instructions with fixed-number register operand(s). */ + JitOpndReg _opnd_Reg[1]; + + /* For instructions with variable-number register operand(s). */ + JitOpndVReg _opnd_VReg; + + /* For tableswitch instruction. */ + JitOpndTableSwitch _opnd_TableSwitch; + + /* For lookupswitch instruction. */ + JitOpndLookupSwitch _opnd_LookupSwitch; + } _opnd; +} JitInsn; + +/** + * Opcodes of IR instructions. + */ +typedef enum JitOpcode { +#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) JIT_OP_##NAME, +#include "jit_ir.def" +#undef INSN + JIT_OP_OPCODE_NUMBER +} JitOpcode; + +/* + * Helper functions for creating new instructions. Don't call them + * directly. Use jit_insn_new_NAME, such as jit_insn_new_MOV instead. + */ +JitInsn * +_jit_insn_new_Reg_1(JitOpcode opc, JitReg r0); +JitInsn * +_jit_insn_new_Reg_2(JitOpcode opc, JitReg r0, JitReg r1); +JitInsn * +_jit_insn_new_Reg_3(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2); +JitInsn * +_jit_insn_new_Reg_4(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2, JitReg r3); +JitInsn * +_jit_insn_new_Reg_5(JitOpcode opc, JitReg r0, JitReg r1, JitReg r2, JitReg r3, + JitReg r4); +JitInsn * +_jit_insn_new_VReg_1(JitOpcode opc, JitReg r0, int n); +JitInsn * +_jit_insn_new_VReg_2(JitOpcode opc, JitReg r0, JitReg r1, int n); +JitInsn * +_jit_insn_new_TableSwitch_1(JitOpcode opc, JitReg value, int32 low, int32 high); +JitInsn * +_jit_insn_new_LookupSwitch_1(JitOpcode opc, JitReg value, uint32 num); + +/* + * Instruction creation functions jit_insn_new_NAME, where NAME is the + * name of the instruction defined in jit_ir.def. + */ +#define ARG_DECL_Reg_1 JitReg r0 +#define ARG_LIST_Reg_1 r0 +#define ARG_DECL_Reg_2 JitReg r0, JitReg r1 +#define ARG_LIST_Reg_2 r0, r1 +#define ARG_DECL_Reg_3 JitReg r0, JitReg r1, JitReg r2 +#define ARG_LIST_Reg_3 r0, r1, r2 +#define ARG_DECL_Reg_4 JitReg r0, JitReg r1, JitReg r2, JitReg r3 +#define ARG_LIST_Reg_4 r0, r1, r2, r3 +#define ARG_DECL_Reg_5 JitReg r0, JitReg r1, JitReg r2, JitReg r3, JitReg r4 +#define ARG_LIST_Reg_5 r0, r1, r2, r3, r4 +#define ARG_DECL_VReg_1 JitReg r0, int n +#define ARG_LIST_VReg_1 r0, n +#define ARG_DECL_VReg_2 JitReg r0, JitReg r1, int n +#define ARG_LIST_VReg_2 r0, r1, n +#define ARG_DECL_TableSwitch_1 JitReg value, int32 low, int32 high +#define ARG_LIST_TableSwitch_1 value, low, high +#define ARG_DECL_LookupSwitch_1 JitReg value, uint32 num +#define ARG_LIST_LookupSwitch_1 value, num +#define INSN(NAME, OPND_KIND, OPND_NUM, FIRST_USE) \ + static inline JitInsn *jit_insn_new_##NAME( \ + ARG_DECL_##OPND_KIND##_##OPND_NUM) \ + { \ + return _jit_insn_new_##OPND_KIND##_##OPND_NUM( \ + JIT_OP_##NAME, ARG_LIST_##OPND_KIND##_##OPND_NUM); \ + } +#include "jit_ir.def" +#undef INSN +#undef ARG_DECL_Reg_1 +#undef ARG_LIST_Reg_1 +#undef ARG_DECL_Reg_2 +#undef ARG_LIST_Reg_2 +#undef ARG_DECL_Reg_3 +#undef ARG_LIST_Reg_3 +#undef ARG_DECL_Reg_4 +#undef ARG_LIST_Reg_4 +#undef ARG_DECL_Reg_5 +#undef ARG_LIST_Reg_5 +#undef ARG_DECL_VReg_1 +#undef ARG_LIST_VReg_1 +#undef ARG_DECL_VReg_2 +#undef ARG_LIST_VReg_2 +#undef ARG_DECL_TableSwitch_1 +#undef ARG_LIST_TableSwitch_1 +#undef ARG_DECL_LookupSwitch_1 +#undef ARG_LIST_LookupSwitch_1 + +/** + * Delete an instruction + * + * @param insn an instruction to be deleted + */ +static inline void +jit_insn_delete(JitInsn *insn) +{ + jit_free(insn); +} + +/* + * Runtime type check functions that check whether accessing the n-th + * operand is legal. They are only used for in self-verification + * mode. + * + * @param insn any JIT IR instruction + * @param n index of the operand to access + * + * @return true if the access is legal + */ +bool +_jit_insn_check_opnd_access_Reg(const JitInsn *insn, unsigned n); +bool +_jit_insn_check_opnd_access_VReg(const JitInsn *insn, unsigned n); +bool +_jit_insn_check_opnd_access_TableSwitch(const JitInsn *insn); +bool +_jit_insn_check_opnd_access_LookupSwitch(const JitInsn *insn); + +/** + * Get the pointer to the n-th register operand of the given + * instruction. The instruction format must be Reg. + * + * @param insn a Reg format instruction + * @param n index of the operand to get + * + * @return pointer to the n-th operand + */ +static inline JitReg * +jit_insn_opnd(JitInsn *insn, int n) +{ + bh_assert(_jit_insn_check_opnd_access_Reg(insn, n)); + return &insn->_opnd._opnd_Reg[n]; +} + +/** + * Get the pointer to the n-th register operand of the given + * instruction. The instruction format must be VReg. + * + * @param insn a VReg format instruction + * @param n index of the operand to get + * + * @return pointer to the n-th operand + */ +static inline JitReg * +jit_insn_opndv(JitInsn *insn, int n) +{ + bh_assert(_jit_insn_check_opnd_access_VReg(insn, n)); + return &insn->_opnd._opnd_VReg._reg[n]; +} + +/** + * Get the operand number of the given instruction. The instruction + * format must be VReg. + * + * @param insn a VReg format instruction + * + * @return operand number of the instruction + */ +static inline unsigned +jit_insn_opndv_num(const JitInsn *insn) +{ + bh_assert(_jit_insn_check_opnd_access_VReg(insn, 0)); + return insn->_opnd._opnd_VReg._reg_num; +} + +/** + * Get the pointer to the TableSwitch operand of the given + * instruction. The instruction format must be TableSwitch. + * + * @param insn a TableSwitch format instruction + * + * @return pointer to the operand + */ +static inline JitOpndTableSwitch * +jit_insn_opndts(JitInsn *insn) +{ + bh_assert(_jit_insn_check_opnd_access_TableSwitch(insn)); + return &insn->_opnd._opnd_TableSwitch; +} + +/** + * Get the pointer to the LookupSwitch operand of the given + * instruction. The instruction format must be LookupSwitch. + * + * @param insn a LookupSwitch format instruction + * + * @return pointer to the operand + */ +static inline JitOpndLookupSwitch * +jit_insn_opndls(JitInsn *insn) +{ + bh_assert(_jit_insn_check_opnd_access_LookupSwitch(insn)); + return &insn->_opnd._opnd_LookupSwitch; +} + +/** + * Insert instruction @p insn2 before instruction @p insn1. + * + * @param insn1 any instruction + * @param insn2 any instruction + */ +void +jit_insn_insert_before(JitInsn *insn1, JitInsn *insn2); + +/** + * Insert instruction @p insn2 after instruction @p insn1. + * + * @param insn1 any instruction + * @param insn2 any instruction + */ +void +jit_insn_insert_after(JitInsn *insn1, JitInsn *insn2); + +/** + * Unlink the instruction @p insn from the containing list. + * + * @param insn an instruction + */ +void +jit_insn_unlink(JitInsn *insn); + +/** + * Get the hash value of the comparable instruction (pure functions + * and exception check instructions). + * + * @param insn an instruction + * + * @return hash value of the instruction + */ +unsigned +jit_insn_hash(JitInsn *insn); + +/** + * Compare whether the two comparable instructions are the same. + * + * @param insn1 the first instruction + * @param insn2 the second instruction + * + * @return true if the two instructions are the same + */ +bool +jit_insn_equal(JitInsn *insn1, JitInsn *insn2); + +/** + * Register vector for accessing predecessors and successors of a + * basic block. + */ +typedef struct JitRegVec { + JitReg *_base; /* points to the first register */ + int32 _stride; /* stride to the next register */ + uint32 num; /* number of registers */ +} JitRegVec; + +/** + * Get the address of the i-th register in the register vector. + * + * @param vec a register vector + * @param i index to the register vector + * + * @return the address of the i-th register in the vector + */ +static inline JitReg * +jit_reg_vec_at(const JitRegVec *vec, unsigned i) +{ + bh_assert(i < vec->num); + return vec->_base + vec->_stride * i; +} + +/** + * Visit each element in a register vector. + * + * @param V (JitRegVec) the register vector + * @param I (unsigned) index variable in the vector + * @param R (JitReg *) resiger pointer variable + */ +#define JIT_REG_VEC_FOREACH(V, I, R) \ + for ((I) = 0, (R) = (V)._base; (I) < (V).num; (I)++, (R) += (V)._stride) + +/** + * Visit each register defined by an instruction. + * + * @param V (JitRegVec) register vector of the instruction + * @param I (unsigned) index variable in the vector + * @param R (JitReg *) resiger pointer variable + * @param F index of the first used register + */ +#define JIT_REG_VEC_FOREACH_DEF(V, I, R, F) \ + for ((I) = 0, (R) = (V)._base; (I) < (F); (I)++, (R) += (V)._stride) + +/** + * Visit each register used by an instruction. + * + * @param V (JitRegVec) register vector of the instruction + * @param I (unsigned) index variable in the vector + * @param R (JitReg *) resiger pointer variable + * @param F index of the first used register + */ +#define JIT_REG_VEC_FOREACH_USE(V, I, R, F) \ + for ((I) = (F), (R) = (V)._base + (F) * (V)._stride; (I) < (V).num; \ + (I)++, (R) += (V)._stride) + +/** + * Get a generic register vector that contains all register operands. + * The registers defined by the instruction, if any, appear before the + * registers used by the instruction. + * + * @param insn an instruction + * + * @return a register vector containing register operands + */ +JitRegVec +jit_insn_opnd_regs(JitInsn *insn); + +/** + * Get the index of the first use register in the register vector + * returned by jit_insn_opnd_regs. + * + * @param insn an instruction + * + * @return the index of the first use register in the register vector + */ +unsigned +jit_insn_opnd_first_use(JitInsn *insn); + +/** + * Basic Block of JIT IR. It is a basic block only if the IR is not in + * non-BB form. The block is represented by a special phi node, whose + * result and arguments are label registers. The result label is the + * containing block's label. The arguments are labels of predecessors + * of the block. Successor labels are stored in the last instruction, + * which must be a control flow instruction. Instructions of a block + * are linked in a circular linked list with the block phi node as the + * end of the list. The next and prev field of the block phi node + * point to the first and last instructions of the block. + */ +typedef JitInsn JitBasicBlock; + +/** + * Create a new basic block instance. + * + * @param label the label of the new basic block + * @param n number of predecessors + * + * @return the created new basic block instance + */ +JitBasicBlock * +jit_basic_block_new(JitReg label, int n); + +/** + * Delete a basic block instance and all instructions init. + * + * @param block the basic block to be deleted + */ +void +jit_basic_block_delete(JitBasicBlock *block); + +/** + * Get the label of the basic block. + * + * @param block a basic block instance + * + * @return the label of the basic block + */ +static inline JitReg +jit_basic_block_label(JitBasicBlock *block) +{ + return *(jit_insn_opndv(block, 0)); +} + +/** + * Get the first instruction of the basic block. + * + * @param block a basic block instance + * + * @return the first instruction of the basic block + */ +static inline JitInsn * +jit_basic_block_first_insn(JitBasicBlock *block) +{ + return block->next; +} + +/** + * Get the last instruction of the basic block. + * + * @param block a basic block instance + * + * @return the last instruction of the basic block + */ +static inline JitInsn * +jit_basic_block_last_insn(JitBasicBlock *block) +{ + return block->prev; +} + +/** + * Get the end of instruction list of the basic block (which is always + * the block itself). + * + * @param block a basic block instance + * + * @return the end of instruction list of the basic block + */ +static inline JitInsn * +jit_basic_block_end_insn(JitBasicBlock *block) +{ + return block; +} + +/** + * Visit each instruction in the block from the first to the last. In + * the code block, the instruction pointer @p I must be a valid + * pointer to an instruction in the block. That means if the + * instruction may be deleted, @p I must point to the previous or next + * valid instruction before the next iteration. + * + * @param B (JitBasicBlock *) the block + * @param I (JitInsn *) instruction visited + */ +#define JIT_FOREACH_INSN(B, I) \ + for (I = jit_basic_block_first_insn(B); I != jit_basic_block_end_insn(B); \ + I = I->next) + +/** + * Visit each instruction in the block from the last to the first. In + * the code block, the instruction pointer @p I must be a valid + * pointer to an instruction in the block. That means if the + * instruction may be deleted, @p I must point to the previous or next + * valid instruction before the next iteration. + * + * @param B (JitBasicBlock *) the block + * @param I (JitInsn *) instruction visited + */ +#define JIT_FOREACH_INSN_REVERSE(B, I) \ + for (I = jit_basic_block_last_insn(B); I != jit_basic_block_end_insn(B); \ + I = I->prev) + +/** + * Prepend an instruction in the front of the block. The position is + * just after the block phi node (the block instance itself). + * + * @param block a block + * @param insn an instruction to be prepended + */ +static inline void +jit_basic_block_prepend_insn(JitBasicBlock *block, JitInsn *insn) +{ + jit_insn_insert_after(block, insn); +} + +/** + * Append an instruction to the end of the basic block. + * + * @param block a basic block + * @param insn an instruction to be appended + */ +static inline void +jit_basic_block_append_insn(JitBasicBlock *block, JitInsn *insn) +{ + jit_insn_insert_before(block, insn); +} + +/** + * Get the register vector of predecessors of a basic block. + * + * @param block a JIT IR block + * + * @return register vector of the predecessors + */ +JitRegVec +jit_basic_block_preds(JitBasicBlock *block); + +/** + * Get the register vector of successors of a basic block. + * + * @param block a JIT IR basic block + * + * @return register vector of the successors + */ +JitRegVec +jit_basic_block_succs(JitBasicBlock *block); + +/** + * Hard register information of one kind. + */ +typedef struct JitHardRegInfo { + struct { + /* Hard register number of this kind. */ + uint32 num; + + /* Whether each register is fixed. */ + const uint8 *fixed; + + /* Whether each register is caller-saved in the native ABI. */ + const uint8 *caller_saved_native; + + /* Whether each register is caller-saved in the JITed ABI. */ + const uint8 *caller_saved_jitted; + } info[JIT_REG_KIND_L32]; + + /* The indexes of hard registers of frame pointer, exec_env and cmp. */ + uint32 fp_hreg_index; + uint32 exec_env_hreg_index; + uint32 cmp_hreg_index; +} JitHardRegInfo; + +struct JitBlock; +struct JitCompContext; + +/** + * Value in the WASM operation stack, each stack element + * is a Jit register + */ +typedef struct JitValue { + struct JitValue *next; + struct JitValue *prev; + JitReg value; + /* VALUE_TYPE_I32/I64/F32/F64/VOID */ + uint8 type; +} JitValue; + +/** + * Value stack, represents stack elements in a WASM block + */ +typedef struct JitValueStack { + JitValue *value_list_head; + JitValue *value_list_end; +} JitValueStack; + +/* Record information of a value slot of local variable or stack + during translation. */ +typedef struct JitValueSlot { + /* The virtual register that holds the value of the slot if the + value of the slot is in register. */ + JitReg reg; + + /* The dirty bit of the value slot. It's set if the value in + register is newer than the value in memory. */ + uint32 dirty : 1; + + /* Whether the new value in register is a reference, which is valid + only when the dirty bit is set. */ + uint32 ref : 1; + + /* Committed reference flag. 0: unknown, 1: not-reference, 2: + reference. */ + uint32 committed_ref : 2; +} JitValueSlot; + +/* Frame information for translation */ +typedef struct JitFrame { + /* The current wasm module */ + WASMModule *cur_wasm_module; + /* The current wasm function */ + WASMFunction *cur_wasm_func; + /* The current wasm function index */ + uint32 cur_wasm_func_idx; + /* The current compilation context */ + struct JitCompContext *cc; + + /* Max local slot number. */ + uint32 max_locals; + + /* Max operand stack slot number. */ + uint32 max_stacks; + + /* Stack top pointer */ + JitValueSlot *sp; + + /* Committed stack top pointer */ + JitValueSlot *committed_sp; + + /* Local variables */ + JitValueSlot lp[1]; +} JitFrame; + +typedef struct JitIncomingInsn { + struct JitIncomingInsn *next; + JitInsn *insn; +} JitIncomingInsn, *JitIncomingInsnList; + +typedef struct JitBlock { + struct JitBlock *next; + struct JitBlock *prev; + + /* The current Jit Block */ + struct JitCompContext *cc; + + /* LABEL_TYPE_BLOCK/LOOP/IF/FUNCTION */ + uint32 label_type; + + /* code of else opcode of this block, if it is a IF block */ + uint8 *wasm_code_else; + /* code of end opcode of this block */ + uint8 *wasm_code_end; + + /* JIT label points to code begin */ + JitBasicBlock *basic_block_entry; + /* JIT label points to code else */ + JitBasicBlock *basic_block_else; + /* JIT label points to code end */ + JitBasicBlock *basic_block_end; + + /* Incoming INSN for basic_block_else */ + JitInsn *incoming_insn_for_else_bb; + /* Incoming INSNs for basic_block_end */ + JitIncomingInsnList incoming_insns_for_end_bb; + + /* WASM operation stack */ + JitValueStack value_stack; + + /* Param count/types/PHIs of this block */ + uint32 param_count; + uint8 *param_types; + + /* Result count/types/PHIs of this block */ + uint32 result_count; + uint8 *result_types; + + /* The begin frame stack pointer of this block */ + JitValueSlot *frame_sp_begin; +} JitBlock; + +/** + * Block stack, represents WASM block stack elements + */ +typedef struct JitBlockStack { + JitBlock *block_list_head; + JitBlock *block_list_end; +} JitBlockStack; + +/** + * The JIT compilation context for one compilation process of a + * compilation unit. + */ +typedef struct JitCompContext { + /* Hard register information of each kind. */ + const JitHardRegInfo *hreg_info; + + /* No. of the pass to be applied. */ + uint8 cur_pass_no; + + /* The current wasm module */ + WASMModule *cur_wasm_module; + /* The current wasm function */ + WASMFunction *cur_wasm_func; + /* The current wasm function index */ + uint32 cur_wasm_func_idx; + /* The block stack */ + JitBlockStack block_stack; + + bool mem_space_unchanged; + + /* Entry and exit labels of the compilation unit, whose numbers must + be 0 and 1 respectively (see JIT_FOREACH_BLOCK). */ + JitReg entry_label; + JitReg exit_label; + JitBasicBlock *exception_basic_blocks; + + /* The current basic block to generate instructions */ + JitBasicBlock *cur_basic_block; + + /* Registers of frame pointer, exec_env and CMP result. */ + JitReg fp_reg; + JitReg exec_env_reg; + JitReg cmp_reg; + + /* Current frame information for translation */ + JitFrame *jit_frame; + + /* The total frame size of current function */ + uint32 total_frame_size; + + /* The offset of jitted_return_address in the frame, which is set by + the pass frontend and used by the pass codegen. */ + uint32 jitted_return_address_offset; + + /* Begin and end addresses of the jitted code produced by the pass + codegen and consumed by the region registration after codegen and + the pass dump. */ + void *jitted_addr_begin; + void *jitted_addr_end; + + char last_error[128]; + + /* Below fields are all private. Don't access them directly. */ + + /* Reference count of the compilation context. */ + uint16 _reference_count; + + /* Constant values. */ + struct { + /* Number of constant values of each kind. */ + uint32 _num[JIT_REG_KIND_L32]; + + /* Capacity of register annotations of each kind. */ + uint32 _capacity[JIT_REG_KIND_L32]; + + /* Constant vallues of each kind. */ + uint8 *_value[JIT_REG_KIND_L32]; + + /* Next element on the list of values with the same hash code. */ + JitReg *_next[JIT_REG_KIND_L32]; + + /* Size of the hash table. */ + uint32 _hash_table_size; + + /* Map values to JIT register. */ + JitReg *_hash_table; + } _const_val; + + /* Annotations of labels, registers and instructions. */ + struct { + /* Number of all ever created labels. */ + uint32 _label_num; + + /* Capacity of label annotations. */ + uint32 _label_capacity; + + /* Number of all ever created instructions. */ + uint32 _insn_num; + + /* Capacity of instruction annotations. */ + uint32 _insn_capacity; + + /* Number of ever created registers of each kind. */ + uint32 _reg_num[JIT_REG_KIND_L32]; + + /* Capacity of register annotations of each kind. */ + uint32 _reg_capacity[JIT_REG_KIND_L32]; + + /* Storage of annotations. */ +#define ANN_LABEL(TYPE, NAME) TYPE *_label_##NAME; +#define ANN_INSN(TYPE, NAME) TYPE *_insn_##NAME; +#define ANN_REG(TYPE, NAME) TYPE *_reg_##NAME[JIT_REG_KIND_L32]; +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG + + /* Flags of annotations. */ +#define ANN_LABEL(TYPE, NAME) uint32 _label_##NAME##_enabled : 1; +#define ANN_INSN(TYPE, NAME) uint32 _insn_##NAME##_enabled : 1; +#define ANN_REG(TYPE, NAME) uint32 _reg_##NAME##_enabled : 1; +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG + } _ann; + + /* Instruction hash table. */ + struct { + /* Size of the hash table. */ + uint32 _size; + + /* The hash table. */ + JitInsn **_table; + } _insn_hash_table; +} JitCompContext; + +/* + * Annotation accessing functions jit_annl_NAME, jit_anni_NAME and + * jit_annr_NAME. + */ +#define ANN_LABEL(TYPE, NAME) \ + static inline TYPE *jit_annl_##NAME(JitCompContext *cc, JitReg label) \ + { \ + unsigned idx = jit_reg_no(label); \ + bh_assert(jit_reg_kind(label) == JIT_REG_KIND_L32); \ + bh_assert(idx < cc->_ann._label_num); \ + bh_assert(cc->_ann._label_##NAME##_enabled); \ + return &cc->_ann._label_##NAME[idx]; \ + } +#define ANN_INSN(TYPE, NAME) \ + static inline TYPE *jit_anni_##NAME(JitCompContext *cc, JitInsn *insn) \ + { \ + unsigned uid = insn->uid; \ + bh_assert(uid < cc->_ann._insn_num); \ + bh_assert(cc->_ann._insn_##NAME##_enabled); \ + return &cc->_ann._insn_##NAME[uid]; \ + } +#define ANN_REG(TYPE, NAME) \ + static inline TYPE *jit_annr_##NAME(JitCompContext *cc, JitReg reg) \ + { \ + unsigned kind = jit_reg_kind(reg); \ + unsigned no = jit_reg_no(reg); \ + bh_assert(kind < JIT_REG_KIND_L32); \ + bh_assert(no < cc->_ann._reg_num[kind]); \ + bh_assert(cc->_ann._reg_##NAME##_enabled); \ + return &cc->_ann._reg_##NAME[kind][no]; \ + } +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG + +/* + * Annotation enabling functions jit_annl_enable_NAME, + * jit_anni_enable_NAME and jit_annr_enable_NAME, which allocate + * sufficient memory for the annotations. + */ +#define ANN_LABEL(TYPE, NAME) bool jit_annl_enable_##NAME(JitCompContext *cc); +#define ANN_INSN(TYPE, NAME) bool jit_anni_enable_##NAME(JitCompContext *cc); +#define ANN_REG(TYPE, NAME) bool jit_annr_enable_##NAME(JitCompContext *cc); +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG + +/* + * Annotation disabling functions jit_annl_disable_NAME, + * jit_anni_disable_NAME and jit_annr_disable_NAME, which release + * memory of the annotations. Before calling these functions, + * resources owned by the annotations must be explictely released. + */ +#define ANN_LABEL(TYPE, NAME) void jit_annl_disable_##NAME(JitCompContext *cc); +#define ANN_INSN(TYPE, NAME) void jit_anni_disable_##NAME(JitCompContext *cc); +#define ANN_REG(TYPE, NAME) void jit_annr_disable_##NAME(JitCompContext *cc); +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG + +/* + * Functions jit_annl_is_enabled_NAME, jit_anni_is_enabled_NAME and + * jit_annr_is_enabled_NAME for checking whether an annotation is + * enabled. + */ +#define ANN_LABEL(TYPE, NAME) \ + static inline bool jit_annl_is_enabled_##NAME(JitCompContext *cc) \ + { \ + return !!cc->_ann._label_##NAME##_enabled; \ + } +#define ANN_INSN(TYPE, NAME) \ + static inline bool jit_anni_is_enabled_##NAME(JitCompContext *cc) \ + { \ + return !!cc->_ann._insn_##NAME##_enabled; \ + } +#define ANN_REG(TYPE, NAME) \ + static inline bool jit_annr_is_enabled_##NAME(JitCompContext *cc) \ + { \ + return !!cc->_ann._reg_##NAME##_enabled; \ + } +#include "jit_ir.def" +#undef ANN_LABEL +#undef ANN_INSN +#undef ANN_REG + +/** + * Initialize a compilation context. + * + * @param cc the compilation context + * @param htab_size the initial hash table size of constant pool + * + * @return cc if succeeds, NULL otherwise + */ +JitCompContext * +jit_cc_init(JitCompContext *cc, unsigned htab_size); + +/** + * Release all resources of a compilation context, which doesn't + * include the compilation context itself. + * + * @param cc the compilation context + */ +void +jit_cc_destroy(JitCompContext *cc); + +/** + * Increase the reference count of the compilation context. + * + * @param cc the compilation context + */ +static inline void +jit_cc_inc_ref(JitCompContext *cc) +{ + cc->_reference_count++; +} + +/** + * Decrease the reference_count and destroy and free the compilation + * context if the reference_count is decreased to zero. + * + * @param cc the compilation context + */ +void +jit_cc_delete(JitCompContext *cc); + +/** + * Create a I32 constant value with relocatable into the compilation + * context. A constant value that has relocation info cannot be + * constant-folded as normal constants because its value depends on + * runtime context and may be different in different executions. + * + * @param cc compilation context + * @param val a I32 value + * @param rel relocation information + * + * @return a constant register containing the value + */ +JitReg +jit_cc_new_const_I32_rel(JitCompContext *cc, int32 val, uint32 rel); + +/** + * Create a I32 constant value without relocation info (0) into the + * compilation context. + * + * @param cc compilation context + * @param val a I32 value + * + * @return a constant register containing the value + */ +static inline JitReg +jit_cc_new_const_I32(JitCompContext *cc, int32 val) +{ + return jit_cc_new_const_I32_rel(cc, val, 0); +} + +/** + * Create a I64 constant value into the compilation context. + * + * @param cc compilation context + * @param val a I64 value + * + * @return a constant register containing the value + */ +JitReg +jit_cc_new_const_I64(JitCompContext *cc, int64 val); + +/** + * Create a F32 constant value into the compilation context. + * + * @param cc compilation context + * @param val a F32 value + * + * @return a constant register containing the value + */ +JitReg +jit_cc_new_const_F32(JitCompContext *cc, float val); + +/** + * Create a F64 constant value into the compilation context. + * + * @param cc compilation context + * @param val a F64 value + * + * @return a constant register containing the value + */ +JitReg +jit_cc_new_const_F64(JitCompContext *cc, double val); + +/** + * Get the relocation info of a I32 constant register. + * + * @param cc compilation context + * @param reg constant register + * + * @return the relocation info of the constant + */ +uint32 +jit_cc_get_const_I32_rel(JitCompContext *cc, JitReg reg); + +/** + * Get the constant value of a I32 constant register. + * + * @param cc compilation context + * @param reg constant register + * + * @return the constant value + */ +int32 +jit_cc_get_const_I32(JitCompContext *cc, JitReg reg); + +/** + * Get the constant value of a I64 constant register. + * + * @param cc compilation context + * @param reg constant register + * + * @return the constant value + */ +int64 +jit_cc_get_const_I64(JitCompContext *cc, JitReg reg); + +/** + * Get the constant value of a F32 constant register. + * + * @param cc compilation context + * @param reg constant register + * + * @return the constant value + */ +float +jit_cc_get_const_F32(JitCompContext *cc, JitReg reg); + +/** + * Get the constant value of a F64 constant register. + * + * @param cc compilation context + * @param reg constant register + * + * @return the constant value + */ +double +jit_cc_get_const_F64(JitCompContext *cc, JitReg reg); + +/** + * Get the number of total created labels. + * + * @param cc the compilation context + * + * @return the number of total created labels + */ +static inline unsigned +jit_cc_label_num(JitCompContext *cc) +{ + return cc->_ann._label_num; +} + +/** + * Get the number of total created instructions. + * + * @param cc the compilation context + * + * @return the number of total created instructions + */ +static inline unsigned +jit_cc_insn_num(JitCompContext *cc) +{ + return cc->_ann._insn_num; +} + +/** + * Get the number of total created registers. + * + * @param cc the compilation context + * @param kind the register kind + * + * @return the number of total created registers + */ +static inline unsigned +jit_cc_reg_num(JitCompContext *cc, unsigned kind) +{ + bh_assert(kind < JIT_REG_KIND_L32); + return cc->_ann._reg_num[kind]; +} + +/** + * Create a new label in the compilation context. + * + * @param cc the compilation context + * + * @return a new label in the compilation context + */ +JitReg +jit_cc_new_label(JitCompContext *cc); + +/** + * Create a new block with a new label in the compilation context. + * + * @param cc the compilation context + * @param n number of predecessors + * + * @return a new block with a new label in the compilation context + */ +JitBasicBlock * +jit_cc_new_basic_block(JitCompContext *cc, int n); + +/** + * Resize the predecessor number of a block. + * + * @param cc the containing compilation context + * @param block block to be resized + * @param n new number of predecessors + * + * @return the new block if succeeds, NULL otherwise + */ +JitBasicBlock * +jit_cc_resize_basic_block(JitCompContext *cc, JitBasicBlock *block, int n); + +/** + * Initialize the instruction hash table to the given size and enable + * the instruction's _hash_link annotation. + * + * @param cc the containing compilation context + * @param n size of the hash table + * + * @return true if succeeds, false otherwise + */ +bool +jit_cc_enable_insn_hash(JitCompContext *cc, unsigned n); + +/** + * Destroy the instruction hash table and disable the instruction's + * _hash_link annotation. + * + * @param cc the containing compilation context + */ +void +jit_cc_disable_insn_hash(JitCompContext *cc); + +/** + * Reset the hash table entries. + * + * @param cc the containing compilation context + */ +void +jit_cc_reset_insn_hash(JitCompContext *cc); + +/** + * Allocate a new instruction ID in the compilation context and set it + * to the given instruction. + * + * @param cc the compilation context + * @param insn IR instruction + * + * @return the insn with uid being set + */ +JitInsn * +jit_cc_set_insn_uid(JitCompContext *cc, JitInsn *insn); + +/* + * Similar to jit_cc_set_insn_uid except that if setting uid failed, + * delete the insn. Only used by jit_cc_new_insn + */ +JitInsn * +_jit_cc_set_insn_uid_for_new_insn(JitCompContext *cc, JitInsn *insn); + +/** + * Create a new instruction in the compilation context. + * + * @param cc the compilationo context + * @param NAME instruction name + * + * @return a new instruction in the compilation context + */ +#define jit_cc_new_insn(cc, NAME, ...) \ + _jit_cc_set_insn_uid_for_new_insn(cc, jit_insn_new_##NAME(__VA_ARGS__)) + +/* + * Helper function for jit_cc_new_insn_norm. + */ +JitInsn * +_jit_cc_new_insn_norm(JitCompContext *cc, JitReg *result, JitInsn *insn); + +/** + * Create a new instruction in the compilation context and normalize + * the instruction (constant folding and simplification etc.). If the + * instruction hashing is enabled (anni__hash_link is enabled), try to + * find the existing equivalent insruction first before adding a new + * one to the compilation contest. + * + * @param cc the compilationo context + * @param result returned result of the instruction. If the value is + * non-zero, it is the result of the constant-folding or an exsiting + * equivalent instruction, in which case no instruction is added into + * the compilation context. Otherwise, a new normalized instruction + * has been added into the compilation context. + * @param NAME instruction name + * + * @return a new or existing instruction in the compilation context + */ +#define jit_cc_new_insn_norm(cc, result, NAME, ...) \ + _jit_cc_new_insn_norm(cc, result, jit_insn_new_##NAME(__VA_ARGS__)) + +/** + * Helper function for GEN_INSN + * + * @param cc compilation context + * @param block the current block + * @param insn the new instruction + * + * @return the new instruction if inserted, NULL otherwise + */ +static inline JitInsn * +_gen_insn(JitCompContext *cc, JitInsn *insn) +{ + if (insn) + jit_basic_block_append_insn(cc->cur_basic_block, insn); + + return insn; +} + +/** + * Generate and append an instruction to the current block. + */ +#define GEN_INSN(...) _gen_insn(cc, jit_cc_new_insn(cc, __VA_ARGS__)) + +/** + * Helper function for GEN_INSN_NORM_1 + * + * @param cc compilation context + * @param block the current block + * @param kind kind fo the result register + * @param result points to the returned result register + * @param insn the instruction + * + * @return the new instruction if inserted, NULL otherwise + */ +JitInsn * +_gen_insn_norm_1(JitCompContext *cc, JitBasicBlock *block, unsigned kind, + JitReg *result, JitInsn *insn); + +/** + * Helper macro for GEN_INSN_NORM + */ +#define GEN_INSN_NORM_1(Kind, result, ...) \ + _gen_insn_norm_1(cc, cc->cur_basic_block, Kind, &result, \ + jit_cc_new_insn_norm(cc, &result, __VA_ARGS__)) + +/** + * Generate and append a normalized instruction to the current block. + * + * @param Type type of the result + * @param result result of the normalized instruction + */ +#define GEN_INSN_NORM(Type, result, ...) \ + GEN_INSN_NORM_1(JIT_REG_KIND_##Type, result, __VA_ARGS__) + +/** + * Create a constant register without relocation info. + * + * @param Type type of the register + * @param val the constant value + * + * @return the constant register if succeeds, 0 otherwise + */ +#define NEW_CONST(Type, val) jit_cc_new_const_##Type(cc, val) + +/** + * Create a new virtual register in the compilation context. + * + * @param cc the compilation context + * @param kind kind of the register + * + * @return a new label in the compilation context + */ +JitReg +jit_cc_new_reg(JitCompContext *cc, unsigned kind); + +/* + * Create virtual registers with specific types in the compilation + * context. They are more convenient than the above one. + */ + +static inline JitReg +jit_cc_new_reg_I32(JitCompContext *cc) +{ + return jit_cc_new_reg(cc, JIT_REG_KIND_I32); +} + +static inline JitReg +jit_cc_new_reg_I64(JitCompContext *cc) +{ + return jit_cc_new_reg(cc, JIT_REG_KIND_I64); +} + +static inline JitReg +jit_cc_new_reg_F32(JitCompContext *cc) +{ + return jit_cc_new_reg(cc, JIT_REG_KIND_F32); +} + +static inline JitReg +jit_cc_new_reg_F64(JitCompContext *cc) +{ + return jit_cc_new_reg(cc, JIT_REG_KIND_F64); +} + +static inline JitReg +jit_cc_new_reg_V64(JitCompContext *cc) +{ + return jit_cc_new_reg(cc, JIT_REG_KIND_V64); +} + +static inline JitReg +jit_cc_new_reg_V128(JitCompContext *cc) +{ + return jit_cc_new_reg(cc, JIT_REG_KIND_V128); +} + +static inline JitReg +jit_cc_new_reg_V256(JitCompContext *cc) +{ + return jit_cc_new_reg(cc, JIT_REG_KIND_V256); +} + +/** + * Get the hard register numbe of the given kind + * + * @param cc the compilation context + * @param kind the register kind + * + * @return number of hard registers of the given kind + */ +static inline unsigned +jit_cc_hreg_num(JitCompContext *cc, unsigned kind) +{ + bh_assert(kind < JIT_REG_KIND_L32); + return cc->hreg_info->info[kind].num; +} + +/** + * Check whether a given register is a hard register. + * + * @param cc the compilation context + * @param reg the register which must be a variable + * + * @return true if the register is a hard register + */ +static inline bool +jit_cc_is_hreg(JitCompContext *cc, JitReg reg) +{ + unsigned kind = jit_reg_kind(reg); + unsigned no = jit_reg_no(reg); + bh_assert(jit_reg_is_variable(reg)); + return no < cc->hreg_info->info[kind].num; +} + +/** + * Check whether the given hard register is fixed. + * + * @param cc the compilation context + * @param reg the hard register + * + * @return true if the hard register is fixed + */ +static inline bool +jit_cc_is_hreg_fixed(JitCompContext *cc, JitReg reg) +{ + unsigned kind = jit_reg_kind(reg); + unsigned no = jit_reg_no(reg); + bh_assert(jit_cc_is_hreg(cc, reg)); + return !!cc->hreg_info->info[kind].fixed[no]; +} + +/** + * Check whether the given hard register is caller-saved-native. + * + * @param cc the compilation context + * @param reg the hard register + * + * @return true if the hard register is caller-saved-native + */ +static inline bool +jit_cc_is_hreg_caller_saved_native(JitCompContext *cc, JitReg reg) +{ + unsigned kind = jit_reg_kind(reg); + unsigned no = jit_reg_no(reg); + bh_assert(jit_cc_is_hreg(cc, reg)); + return !!cc->hreg_info->info[kind].caller_saved_native[no]; +} + +/** + * Check whether the given hard register is caller-saved-jitted. + * + * @param cc the compilation context + * @param reg the hard register + * + * @return true if the hard register is caller-saved-jitted + */ +static inline bool +jit_cc_is_hreg_caller_saved_jitted(JitCompContext *cc, JitReg reg) +{ + unsigned kind = jit_reg_kind(reg); + unsigned no = jit_reg_no(reg); + bh_assert(jit_cc_is_hreg(cc, reg)); + return !!cc->hreg_info->info[kind].caller_saved_jitted[no]; +} + +/** + * Return the entry block of the compilation context. + * + * @param cc the compilation context + * + * @return the entry block of the compilation context + */ +static inline JitBasicBlock * +jit_cc_entry_basic_block(JitCompContext *cc) +{ + return *(jit_annl_basic_block(cc, cc->entry_label)); +} + +/** + * Return the exit block of the compilation context. + * + * @param cc the compilation context + * + * @return the exit block of the compilation context + */ +static inline JitBasicBlock * +jit_cc_exit_basic_block(JitCompContext *cc) +{ + return *(jit_annl_basic_block(cc, cc->exit_label)); +} + +char * +jit_get_last_error(JitCompContext *cc); + +void +jit_set_last_error(JitCompContext *cc, const char *error); + +void +jit_set_last_error_v(JitCompContext *cc, const char *format, ...); + +void +jit_value_stack_push(JitValueStack *stack, JitValue *value); + +JitValue * +jit_value_stack_pop(JitValueStack *stack); + +void +jit_value_stack_destroy(JitValueStack *stack); + +void +jit_block_stack_push(JitBlockStack *stack, JitBlock *block); + +JitBlock * +jit_block_stack_pop(JitBlockStack *stack); + +void +jit_block_stack_destroy(JitBlockStack *stack); + +bool +jit_block_add_incoming_insn(JitBlock *block, JitInsn *insn); + +void +jit_block_destroy(JitBlock *block); + +bool +jit_cc_push_value(JitCompContext *cc, uint8 type, JitReg value); + +bool +jit_cc_pop_value(JitCompContext *cc, uint8 type, JitReg *p_value); + +/** + * Update the control flow graph after successors of blocks are + * changed so that the predecessor vector of each block represents the + * updated status. The predecessors may not be required by all + * passes, so we don't need to keep them always being updated. + * + * @param cc the compilation context + * + * @return true if succeeds, false otherwise + */ +bool +jit_cc_update_cfg(JitCompContext *cc); + +/** + * Visit each normal block (which is not entry nor exit block) in a + * compilation context. New blocks can be added in the loop body, but + * they won't be visited. Blocks can also be removed safely (by + * setting the label's block annotation to NULL) in the loop body. + * + * @param CC (JitCompContext *) the compilation context + * @param I (unsigned) index variable of the block (label no) + * @param E (unsigned) end index variable of block (last index + 1) + * @param B (JitBasicBlock *) block pointer variable + */ +#define JIT_FOREACH_BLOCK(CC, I, E, B) \ + for ((I) = 2, (E) = (CC)->_ann._label_num; (I) < (E); (I)++) \ + if (((B) = (CC)->_ann._label_basic_block[(I)])) + +/** + * The version that includes entry and exit block. + */ +#define JIT_FOREACH_BLOCK_ENTRY_EXIT(CC, I, E, B) \ + for ((I) = 0, (E) = (CC)->_ann._label_num; (I) < (E); (I)++) \ + if (((B) = (CC)->_ann._label_basic_block[(I)])) + +/** + * Visit each normal block (which is not entry nor exit block) in a + * compilation context in reverse order. New blocks can be added in + * the loop body, but they won't be visited. Blocks can also be + * removed safely (by setting the label's block annotation to NULL) in + * the loop body. + * + * @param CC (JitCompContext *) the compilation context + * @param I (unsigned) index of the block (label no) + * @param B (JitBasicBlock *) block pointer + */ +#define JIT_FOREACH_BLOCK_REVERSE(CC, I, B) \ + for ((I) = (CC)->_ann._label_num; (I) > 2; (I)--) \ + if (((B) = (CC)->_ann._label_basic_block[(I)-1])) + +/** + * The version that includes entry and exit block. + */ +#define JIT_FOREACH_BLOCK_REVERSE_ENTRY_EXIT(CC, I, B) \ + for ((I) = (CC)->_ann._label_num; (I) > 0; (I)--) \ + if (((B) = (CC)->_ann._label_basic_block[(I)-1])) + +#ifdef __cplusplus +} +#endif + +#endif /* end of _JIT_IR_H_ */ diff --git a/core/iwasm/fast-jit/jit_regalloc.c b/core/iwasm/fast-jit/jit_regalloc.c new file mode 100644 index 000000000..c931f673f --- /dev/null +++ b/core/iwasm/fast-jit/jit_regalloc.c @@ -0,0 +1,781 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_utils.h" +#include "jit_compiler.h" + +/** + * A uint16 stack for storing distances of occurrences of virtual + * registers. + */ +typedef struct UintStack { + /* Capacity of the stack. */ + uint32 capacity; + + /* Top index of the stack. */ + uint32 top; + + /* Elements of the vector. */ + uint16 elem[1]; +} UintStack; + +static bool +uint_stack_push(UintStack **stack, unsigned val) +{ + unsigned capacity = *stack ? (*stack)->capacity : 0; + unsigned top = *stack ? (*stack)->top : 0; + + bh_assert(top <= capacity); + + if (top == capacity) { + const unsigned elem_size = sizeof((*stack)->elem[0]); + unsigned new_capacity = capacity ? capacity + capacity / 2 : 4; + UintStack *new_stack = + jit_malloc(offsetof(UintStack, elem) + elem_size * new_capacity); + + if (!new_stack) + return false; + + new_stack->capacity = new_capacity; + new_stack->top = top; + + if (*stack) + memcpy(new_stack->elem, (*stack)->elem, elem_size * top); + + jit_free(*stack); + *stack = new_stack; + } + + (*stack)->elem[(*stack)->top++] = val; + + return true; +} + +static int +uint_stack_top(UintStack *stack) +{ + return stack->elem[stack->top - 1]; +} + +static void +uint_stack_delete(UintStack **stack) +{ + jit_free(*stack); + *stack = NULL; +} + +static void +uint_stack_pop(UintStack **stack) +{ + bh_assert((*stack)->top > 0); + + if (--(*stack)->top == 0) + uint_stack_delete(stack); +} + +/** + * Information of a virtual register. + */ +typedef struct VirtualReg { + /* The hard register allocated to this virtual register. */ + JitReg hreg; + + /* The spill slot allocated to this virtual register. */ + JitReg slot; + + /* The hard register allocated to global virtual registers. It is 0 + for local registers, whose lifetime is within one basic block. */ + JitReg global_hreg; + + /* Distances from the beginning of basic block of all occurrences of the + virtual register in the basic block. */ + UintStack *distances; +} VirtualReg; + +/** + * Information of a hard register. + */ +typedef struct HardReg { + /* The virtual register this hard register is allocated to. */ + JitReg vreg; +} HardReg; + +/** + * Information of a spill slot. + */ +typedef struct SpillSlot { + /* The virtual register this spill slot is allocated to. */ + JitReg vreg; +} SpillSlot; + +typedef struct RegallocContext { + /* The compiler context. */ + JitCompContext *cc; + + /* Information of virtual registers. The register allocation must + not increase the virtual register number during the allocation + process. */ + VirtualReg *vregs[JIT_REG_KIND_L32]; + + /* Information of hard registers. */ + HardReg *hregs[JIT_REG_KIND_L32]; + + /* Number of elements in the spill_slots array. */ + uint32 spill_slot_num; + + /* Information of spill slots. */ + SpillSlot *spill_slots; + + /* The last define-released hard register. */ + JitReg last_def_released_hreg; +} RegallocContext; + +/** + * Get the VirtualReg structure of the given virtual register. + * + * @param rc the regalloc context + * @param vreg the virtual register + * + * @return the VirtualReg structure of the given virtual register + */ +static VirtualReg * +rc_get_vr(RegallocContext *rc, JitReg vreg) +{ + unsigned kind = jit_reg_kind(vreg); + unsigned no = jit_reg_no(vreg); + + bh_assert(jit_reg_is_variable(vreg)); + + return &rc->vregs[kind][no]; +} + +/** + * Get the HardReg structure of the given hard register. + * + * @param rc the regalloc context + * @param hreg the hard register + * + * @return the HardReg structure of the given hard register + */ +static HardReg * +rc_get_hr(RegallocContext *rc, JitReg hreg) +{ + unsigned kind = jit_reg_kind(hreg); + unsigned no = jit_reg_no(hreg); + + bh_assert(jit_reg_is_variable(hreg) && jit_cc_is_hreg(rc->cc, hreg)); + + return &rc->hregs[kind][no]; +} + +/** + * Get the SpillSlot structure of the given slot. + * + * @param rc the regalloc context + * @param slot the constant register representing the slot index + * + * @return the SpillSlot of the given slot + */ +static SpillSlot * +rc_get_spill_slot(RegallocContext *rc, JitReg slot) +{ + unsigned index = jit_cc_get_const_I32(rc->cc, slot); + + bh_assert(index < rc->spill_slot_num); + + return &rc->spill_slots[index]; +} + +/** + * Get the stride in the spill slots of the register. + * + * @param reg a virtual register + * + * @return stride in the spill slots + */ +static unsigned +get_reg_stride(JitReg reg) +{ + static const uint8 strides[] = { 0, 1, 2, 1, 2, 2, 4, 8, 0 }; + return strides[jit_reg_kind(reg)]; +} + +/** + * Allocate a spill slot for the given virtual register. + * + * @param rc the regalloc context + * @param vreg the virtual register + * + * @return the spill slot encoded in a consant register + */ +static JitReg +rc_alloc_spill_slot(RegallocContext *rc, JitReg vreg) +{ + const unsigned stride = get_reg_stride(vreg); + unsigned mask, new_num, i, j; + SpillSlot *slots; + + bh_assert(stride > 0); + + for (i = 0; i < rc->spill_slot_num; i += stride) + for (j = i;; j++) { + if (j == i + stride) + /* Found a free slot for vreg. */ + goto found; + + if (rc->spill_slots[j].vreg) + break; + } + + /* No free slot, increase the slot number. */ + mask = stride - 1; + /* Align the slot index. */ + i = (rc->spill_slot_num + mask) & ~mask; + new_num = i == 0 ? 32 : i + i / 2; + + if (!(slots = jit_calloc(sizeof(*slots) * new_num))) + return 0; + + if (rc->spill_slots) + memcpy(slots, rc->spill_slots, sizeof(*slots) * rc->spill_slot_num); + + jit_free(rc->spill_slots); + rc->spill_slots = slots; + rc->spill_slot_num = new_num; + +found: + /* Now, i is the first slot for vreg. */ +#if 0 /* TODO: check the spill */ + if (rc->cc->interp_frame_size + (i + stride) * 4 + > rc->cc->total_frame_size) + /* No frame space for the spill area. */ + return 0; +#endif + + /* Allocate the slot(s) to vreg. */ + for (j = i; j < i + stride; j++) + rc->spill_slots[j].vreg = vreg; + + return jit_cc_new_const_I32(rc->cc, i); +} + +/** + * Free a spill slot. + * + * @param rc the regalloc context + * @param slot_reg the constant register representing the slot index + */ +static void +rc_free_spill_slot(RegallocContext *rc, JitReg slot_reg) +{ + if (slot_reg) { + SpillSlot *slot = rc_get_spill_slot(rc, slot_reg); + const JitReg vreg = slot->vreg; + const unsigned stride = get_reg_stride(vreg); + unsigned i; + + for (i = 0; i < stride; i++) + slot[i].vreg = 0; + } +} + +static void +rc_destroy(RegallocContext *rc) +{ + unsigned i, j; + + for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) { + const unsigned vreg_num = jit_cc_reg_num(rc->cc, i); + + if (rc->vregs[i]) + for (j = 0; j < vreg_num; j++) + uint_stack_delete(&rc->vregs[i][j].distances); + + jit_free(rc->vregs[i]); + jit_free(rc->hregs[i]); + } + + jit_free(rc->spill_slots); +} + +static bool +rc_init(RegallocContext *rc, JitCompContext *cc) +{ + unsigned i, j; + + memset(rc, 0, sizeof(*rc)); + rc->cc = cc; + + for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) { + const unsigned vreg_num = jit_cc_reg_num(cc, i); + const unsigned hreg_num = jit_cc_hreg_num(cc, i); + + if (!(rc->vregs[i] = jit_calloc(sizeof(VirtualReg) * vreg_num)) + || !(rc->hregs[i] = jit_calloc(sizeof(HardReg) * hreg_num))) + goto fail; + + /* Hard registers can only be allocated to themselves. */ + for (j = 0; j < hreg_num; j++) + rc->vregs[i][j].global_hreg = jit_reg_new(i, j); + } + + return true; + +fail: + rc_destroy(rc); + + return false; +} + +/** + * Check whether the gien register is an allocation candidate, which + * must be a variable register that is not fixed hard register. + * + * @param cc the compilation context + * @param reg the register + * + * @return true if the register is an allocation candidate + */ +static bool +is_alloc_candidate(JitCompContext *cc, JitReg reg) +{ + return (jit_reg_is_variable(reg) + && (!jit_cc_is_hreg(cc, reg) || !jit_cc_is_hreg_fixed(cc, reg))); +} + +/** + * Collect distances from the beginning of basic block of all occurrences of + * each virtual register. + * + * @param rc the regalloc context + * @param basic_block the basic block + * + * @return distance of the end instruction if succeeds, -1 otherwise + */ +static int +collect_distances(RegallocContext *rc, JitBasicBlock *basic_block) +{ + JitInsn *insn; + int distance = 1; + + JIT_FOREACH_INSN(basic_block, insn) + { + JitRegVec regvec = jit_insn_opnd_regs(insn); + unsigned i; + JitReg *regp; + + /* NOTE: the distance may be pushed more than once if the + virtual register occurs multiple times in the + instruction. */ + JIT_REG_VEC_FOREACH(regvec, i, regp) + if (is_alloc_candidate(rc->cc, *regp)) + if (!uint_stack_push(&(rc_get_vr(rc, *regp))->distances, distance)) + return -1; + + distance++; + } + + return distance; +} + +static JitReg +offset_of_spill_slot(JitCompContext *cc, JitReg slot) +{ + /* TODO: check the spill */ + return jit_cc_new_const_I32( + cc, /*cc->interp_frame_size + jit_cc_get_const_I32 (cc, slot) * 4*/ 0); +} + +/** + * Reload the virtual register from memory. Reload instruction will + * be inserted after the given instruction. + * + * @param rc the regalloc context + * @param vreg the virtual register to be reloaded + * @param cur_insn the current instruction after which the reload + * insertion will be inserted + * + * @return the reload instruction if succeeds, NULL otherwise + */ +static JitInsn * +reload_vreg(RegallocContext *rc, JitReg vreg, JitInsn *cur_insn) +{ + VirtualReg *vr = rc_get_vr(rc, vreg); + HardReg *hr = rc_get_hr(rc, vr->hreg); + JitInsn *insn = NULL; + + if (vreg == rc->cc->exec_env_reg) + /* Reload exec_env_reg with LDSELF. */ + insn = jit_cc_new_insn(rc->cc, LDSELF, vr->hreg); + else + /* Allocate spill slot if not yet and reload from there. */ + { + JitReg fp_reg = rc->cc->fp_reg, offset; + + if (!vr->slot && !(vr->slot = rc_alloc_spill_slot(rc, vreg))) + /* Cannot allocte spill slot (due to OOM or frame size limit). */ + return NULL; + + offset = offset_of_spill_slot(rc->cc, vr->slot); + + switch (jit_reg_kind(vreg)) { + case JIT_REG_KIND_I32: + insn = jit_cc_new_insn(rc->cc, LDI32, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_I64: + insn = jit_cc_new_insn(rc->cc, LDI64, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_F32: + insn = jit_cc_new_insn(rc->cc, LDF32, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_F64: + insn = jit_cc_new_insn(rc->cc, LDF64, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_V64: + insn = jit_cc_new_insn(rc->cc, LDV64, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_V128: + insn = + jit_cc_new_insn(rc->cc, LDV128, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_V256: + insn = + jit_cc_new_insn(rc->cc, LDV256, vr->hreg, fp_reg, offset); + break; + default: + bh_assert(0); + } + } + + if (insn) + jit_insn_insert_after(cur_insn, insn); + + bh_assert(hr->vreg == vreg); + hr->vreg = vr->hreg = 0; + + return insn; +} + +/** + * Spill the virtual register (which cannot be exec_env_reg) to memory. + * Spill instruction will be inserted after the given instruction. + * + * @param rc the regalloc context + * @param vreg the virtual register to be reloaded + * @param cur_insn the current instruction after which the reload + * insertion will be inserted + * + * @return the spill instruction if succeeds, NULL otherwise + */ +static JitInsn * +spill_vreg(RegallocContext *rc, JitReg vreg, JitInsn *cur_insn) +{ + VirtualReg *vr = rc_get_vr(rc, vreg); + JitReg fp_reg = rc->cc->fp_reg, offset; + JitInsn *insn; + + /* There is no chance to spill exec_env_reg. */ + bh_assert(vreg != rc->cc->exec_env_reg); + bh_assert(vr->hreg && vr->slot); + offset = offset_of_spill_slot(rc->cc, vr->slot); + + switch (jit_reg_kind(vreg)) { + case JIT_REG_KIND_I32: + insn = jit_cc_new_insn(rc->cc, STI32, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_I64: + insn = jit_cc_new_insn(rc->cc, STI64, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_F32: + insn = jit_cc_new_insn(rc->cc, STF32, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_F64: + insn = jit_cc_new_insn(rc->cc, STF64, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_V64: + insn = jit_cc_new_insn(rc->cc, STV64, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_V128: + insn = jit_cc_new_insn(rc->cc, STV128, vr->hreg, fp_reg, offset); + break; + case JIT_REG_KIND_V256: + insn = jit_cc_new_insn(rc->cc, STV256, vr->hreg, fp_reg, offset); + break; + default: + bh_assert(0); + } + + if (insn) + jit_insn_insert_after(cur_insn, insn); + + return insn; +} + +/** + * Allocate a hard register for the virtual register. Necessary + * reloade instruction will be inserted after the given instruction. + * + * @param rc the regalloc context + * @param vreg the virtual register + * @param insn the instruction after which the reload insertion will + * be inserted + * @param distance the distance of the current instruction + * + * @return the hard register allocated if succeeds, 0 otherwise + */ +static JitReg +allocate_hreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance) +{ + const int kind = jit_reg_kind(vreg); + const HardReg *hregs = rc->hregs[kind]; + const unsigned hreg_num = jit_cc_hreg_num(rc->cc, kind); + JitReg hreg, vreg_to_reload = 0; + int min_distance = distance, vr_distance; + VirtualReg *vr = rc_get_vr(rc, vreg); + unsigned i; + + if (hreg_num == 0) + /* Unsupported hard register kind. */ + { + /* TODO: how to set error */ + /*jit_set_error (JIT_ERROR_UNSUPPORTED_HREG);*/ + return 0; + } + + if (vr->global_hreg) + /* It has globally allocated register, we can only use it. */ + { + if ((vreg_to_reload = (rc_get_hr(rc, vr->global_hreg))->vreg)) + if (!reload_vreg(rc, vreg_to_reload, insn)) + return 0; + + return vr->global_hreg; + } + + /* Use the last define-released register if its kind is correct and + it's free so as to optimize for two-operand instructions. */ + if (jit_reg_kind(rc->last_def_released_hreg) == kind + && (rc_get_hr(rc, rc->last_def_released_hreg))->vreg == 0) + return rc->last_def_released_hreg; + + /* No hint given, just try to pick any free register. */ + for (i = 0; i < hreg_num; i++) { + hreg = jit_reg_new(kind, i); + + if (jit_cc_is_hreg_fixed(rc->cc, hreg)) + continue; + + if (hregs[i].vreg == 0) + /* Found a free one, return it. */ + return hreg; + } + + /* No free registers, need to spill and reload one. */ + for (i = 0; i < hreg_num; i++) { + if (jit_cc_is_hreg_fixed(rc->cc, jit_reg_new(kind, i))) + continue; + + vr = rc_get_vr(rc, hregs[i].vreg); + vr_distance = vr->distances ? uint_stack_top(vr->distances) : 0; + + if (vr_distance < min_distance) { + min_distance = vr_distance; + vreg_to_reload = hregs[i].vreg; + hreg = jit_reg_new(kind, i); + } + } + + bh_assert(min_distance < distance); + + if (!reload_vreg(rc, vreg_to_reload, insn)) + return 0; + + return hreg; +} + +/** + * Allocate a hard register for the virtual register if not allocated + * yet. Necessary spill and reloade instructions will be inserted + * before/after and after the given instruction. This operation will + * convert the virtual register's state from 1 or 3 to 2. + * + * @param rc the regalloc context + * @param vreg the virtual register + * @param insn the instruction after which the spill and reload + * insertions will be inserted + * @param distance the distance of the current instruction + * + * @return the hard register allocated to the virtual register if + * succeeds, 0 otherwise + */ +static JitReg +allocate_for_vreg(RegallocContext *rc, JitReg vreg, JitInsn *insn, int distance) +{ + VirtualReg *vr = rc_get_vr(rc, vreg); + + if (vr->hreg) + /* It has had a hard register, reuse it. */ + return vr->hreg; + + /* Not allocated yet. */ + if ((vr->hreg = allocate_hreg(rc, vreg, insn, distance))) + (rc_get_hr(rc, vr->hreg))->vreg = vreg; + + return vr->hreg; +} + +/** + * Clobber live registers. + * + * @param rc the regalloc context + * @param is_native whether it's native ABI or JITed ABI + * @param insn the instruction after which the reload insertion will + * be inserted + * + * @return true if succeeds, false otherwise + */ +static bool +clobber_live_regs(RegallocContext *rc, bool is_native, JitInsn *insn) +{ + unsigned i, j; + + for (i = JIT_REG_KIND_VOID; i < JIT_REG_KIND_L32; i++) { + const unsigned hreg_num = jit_cc_hreg_num(rc->cc, i); + + for (j = 0; j < hreg_num; j++) { + JitReg hreg = jit_reg_new(i, j); + bool caller_saved = + (is_native ? jit_cc_is_hreg_caller_saved_native(rc->cc, hreg) + : jit_cc_is_hreg_caller_saved_jitted(rc->cc, hreg)); + + if (caller_saved && rc->hregs[i][j].vreg) + if (!reload_vreg(rc, rc->hregs[i][j].vreg, insn)) + return false; + } + } + + return true; +} + +/** + * Do local register allocation for the given basic block + * + * @param rc the regalloc context + * @param basic_block the basic block + * @param distance the distance of the last instruction of the basic block + * + * @return true if succeeds, false otherwise + */ +static bool +allocate_for_basic_block(RegallocContext *rc, JitBasicBlock *basic_block, + int distance) +{ + JitInsn *insn; + + JIT_FOREACH_INSN_REVERSE(basic_block, insn) + { + JitRegVec regvec = jit_insn_opnd_regs(insn); + unsigned first_use = jit_insn_opnd_first_use(insn); + unsigned i; + JitReg *regp; + + distance--; + + JIT_REG_VEC_FOREACH_DEF(regvec, i, regp, first_use) + if (is_alloc_candidate(rc->cc, *regp)) { + const JitReg vreg = *regp; + VirtualReg *vr = rc_get_vr(rc, vreg); + + if (!(*regp = allocate_for_vreg(rc, vreg, insn, distance))) + return false; + + /* Spill the register if required. */ + if (vr->slot && !spill_vreg(rc, vreg, insn)) + return false; + + bh_assert(uint_stack_top(vr->distances) == distance); + uint_stack_pop(&vr->distances); + /* Record the define-released hard register. */ + rc->last_def_released_hreg = vr->hreg; + /* Release the hreg and spill slot. */ + rc_free_spill_slot(rc, vr->slot); + (rc_get_hr(rc, vr->hreg))->vreg = 0; + vr->hreg = vr->slot = 0; + } + + if (insn->opcode == JIT_OP_CALLBC) { + if (!clobber_live_regs(rc, false, insn)) + return false; + + /* The exec_env_reg is implicitly used by the callee. */ + if (!allocate_for_vreg(rc, rc->cc->exec_env_reg, insn, distance)) + return false; + } + else if (insn->opcode == JIT_OP_CALLNATIVE) { + if (!clobber_live_regs(rc, true, insn)) + return false; + } + + JIT_REG_VEC_FOREACH_USE(regvec, i, regp, first_use) + if (is_alloc_candidate(rc->cc, *regp)) { + if (!allocate_for_vreg(rc, *regp, insn, distance)) + return false; + } + + JIT_REG_VEC_FOREACH_USE(regvec, i, regp, first_use) + if (is_alloc_candidate(rc->cc, *regp)) { + VirtualReg *vr = rc_get_vr(rc, *regp); + bh_assert(uint_stack_top(vr->distances) == distance); + uint_stack_pop(&vr->distances); + *regp = vr->hreg; + } + } + + return true; +} + +bool +jit_pass_regalloc(JitCompContext *cc) +{ + RegallocContext rc; + unsigned label_index, end_label_index; + JitBasicBlock *basic_block; + VirtualReg *self_vr; + bool retval = false; + + if (!rc_init(&rc, cc)) + return false; + + /* NOTE: don't allocate new virtual registers during allocation + because the rc->vregs array is fixed size. */ + + /* TODO: allocate hard registers for global virtual registers here. + Currently, exec_env_reg is the only global virtual register. */ + self_vr = rc_get_vr(&rc, cc->exec_env_reg); + + JIT_FOREACH_BLOCK_ENTRY_EXIT(cc, label_index, end_label_index, basic_block) + { + int distance; + + /* TODO: initialize hreg for live-out registers. */ + self_vr->hreg = self_vr->global_hreg; + (rc_get_hr(&rc, cc->exec_env_reg))->vreg = cc->exec_env_reg; + + if ((distance = collect_distances(&rc, basic_block)) < 0) + goto cleanup_and_return; + + if (!allocate_for_basic_block(&rc, basic_block, distance)) + goto cleanup_and_return; + + /* TODO: generate necessary spills for live-in registers. */ + } + + retval = true; + +cleanup_and_return: + rc_destroy(&rc); + + return retval; +} diff --git a/core/iwasm/fast-jit/jit_utils.c b/core/iwasm/fast-jit/jit_utils.c new file mode 100644 index 000000000..57a3e8f67 --- /dev/null +++ b/core/iwasm/fast-jit/jit_utils.c @@ -0,0 +1,19 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#include "jit_utils.h" + +JitBitmap * +jit_bitmap_new(uintptr_t begin_index, unsigned bitnum) +{ + JitBitmap *bitmap; + + if ((bitmap = jit_calloc(offsetof(JitBitmap, map) + (bitnum + 7) / 8))) { + bitmap->begin_index = begin_index; + bitmap->end_index = begin_index + bitnum; + } + + return bitmap; +} diff --git a/core/iwasm/fast-jit/jit_utils.h b/core/iwasm/fast-jit/jit_utils.h new file mode 100644 index 000000000..c165b7a3c --- /dev/null +++ b/core/iwasm/fast-jit/jit_utils.h @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2021 Intel Corporation. All rights reserved. + * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception + */ + +#ifndef _JIT_UTILS_H_ +#define _JIT_UTILS_H_ + +#include "bh_platform.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * A simple fixed size bitmap. + */ +typedef struct JitBitmap { + /* The first valid bit index. */ + uintptr_t begin_index; + + /* The last valid bit index plus one. */ + uintptr_t end_index; + + /* The bitmap. */ + uint8 map[1]; +} JitBitmap; + +static inline void * +jit_malloc(unsigned int size) +{ + return wasm_runtime_malloc(size); +} + +static inline void * +jit_calloc(unsigned int size) +{ + void *ret = wasm_runtime_malloc(size); + if (ret) { + memset(ret, 0, size); + } + return ret; +} + +static inline void +jit_free(void *ptr) +{ + if (ptr) + wasm_runtime_free(ptr); +} + +/** + * Create a new bitmap. + * + * @param begin_index the first valid bit index + * @param bitnum maximal bit number of the bitmap. + * + * @return the new bitmap if succeeds, NULL otherwise. + */ +JitBitmap * +jit_bitmap_new(uintptr_t begin_index, unsigned bitnum); + +/** + * Delete a bitmap. + * + * @param bitmap the bitmap to be deleted + */ +static inline void +jit_bitmap_delete(JitBitmap *bitmap) +{ + jit_free(bitmap); +} + +/** + * Check whether the given index is in the range of the bitmap. + * + * @param bitmap the bitmap + * @param n the bit index + * + * @return true if the index is in range, false otherwise + */ +static inline bool +jit_bitmap_is_in_range(JitBitmap *bitmap, unsigned n) +{ + return n >= bitmap->begin_index && n < bitmap->end_index; +} + +/** + * Get a bit in the bitmap + * + * @param bitmap the bitmap + * @param n the n-th bit to be get + * + * @return value of the bit + */ +static inline int +jit_bitmap_get_bit(JitBitmap *bitmap, unsigned n) +{ + unsigned idx = n - bitmap->begin_index; + bh_assert(n >= bitmap->begin_index && n < bitmap->end_index); + return (bitmap->map[idx / 8] >> (idx % 8)) & 1; +} + +/** + * Set a bit in the bitmap. + * + * @param bitmap the bitmap + * @param n the n-th bit to be set + */ +static inline void +jit_bitmap_set_bit(JitBitmap *bitmap, unsigned n) +{ + unsigned idx = n - bitmap->begin_index; + bh_assert(n >= bitmap->begin_index && n < bitmap->end_index); + bitmap->map[idx / 8] |= 1 << (idx % 8); +} + +/** + * Clear a bit in the bitmap. + * + * @param bitmap the bitmap + * @param n the n-th bit to be cleared + */ +static inline void +jit_bitmap_clear_bit(JitBitmap *bitmap, unsigned n) +{ + unsigned idx = n - bitmap->begin_index; + bh_assert(n >= bitmap->begin_index && n < bitmap->end_index); + bitmap->map[idx / 8] &= ~(1 << (idx % 8)); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/core/iwasm/interpreter/wasm.h b/core/iwasm/interpreter/wasm.h index 9160350be..2e6250778 100644 --- a/core/iwasm/interpreter/wasm.h +++ b/core/iwasm/interpreter/wasm.h @@ -254,6 +254,9 @@ struct WASMFunction { uint8 *consts; uint32 const_cell_num; #endif +#if WASM_ENABLE_FAST_JIT != 0 + void *jitted_code; +#endif }; struct WASMGlobal { @@ -323,6 +326,7 @@ typedef struct WASMFastOPCodeNode { uint8 orig_op; } WASMFastOPCodeNode; #endif + struct WASMModule { /* Module type, for module loaded from WASM bytecode binary, this field is Wasm_Module_Bytecode; @@ -414,9 +418,12 @@ struct WASMModule { #if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0 bh_list fast_opcode_list; uint8 *buf_code; + uint64 buf_code_size; +#endif +#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_DEBUG_AOT != 0 \ + || WASM_ENABLE_FAST_JIT != 0 uint8 *load_addr; uint64 load_size; - uint64 buf_code_size; #endif #if WASM_ENABLE_DEBUG_INTERP != 0 @@ -437,6 +444,11 @@ struct WASMModule { const uint8 *name_section_buf; const uint8 *name_section_buf_end; #endif + +#if WASM_ENABLE_FAST_JIT != 0 + /* point to JITed functions */ + void **func_ptrs; +#endif }; typedef struct BlockType { diff --git a/core/iwasm/interpreter/wasm_interp.h b/core/iwasm/interpreter/wasm_interp.h index 4ac36edae..ecd8abe6a 100644 --- a/core/iwasm/interpreter/wasm_interp.h +++ b/core/iwasm/interpreter/wasm_interp.h @@ -26,6 +26,11 @@ typedef struct WASMInterpFrame { /* Instruction pointer of the bytecode array. */ uint8 *ip; +#if WASM_ENABLE_FAST_JIT != 0 + uint8 *jitted_return_addr; + uint32 spill_cache[16]; +#endif + #if WASM_ENABLE_PERF_PROFILING != 0 uint64 time_started; #endif diff --git a/core/iwasm/interpreter/wasm_interp_classic.c b/core/iwasm/interpreter/wasm_interp_classic.c index 0bfa02fcc..0d6745f9a 100644 --- a/core/iwasm/interpreter/wasm_interp_classic.c +++ b/core/iwasm/interpreter/wasm_interp_classic.c @@ -15,6 +15,9 @@ #if WASM_ENABLE_THREAD_MGR != 0 && WASM_ENABLE_DEBUG_INTERP != 0 #include "../libraries/thread-mgr/thread_manager.h" #endif +#if WASM_ENABLE_FAST_JIT != 0 +#include "../fast-jit/jit_compiler.h" +#endif typedef int32 CellType_I32; typedef int64 CellType_I64; @@ -3761,7 +3764,12 @@ wasm_interp_call_wasm(WASMModuleInstance *module_inst, WASMExecEnv *exec_env, } } else { +#if WASM_ENABLE_FAST_JIT == 0 wasm_interp_call_func_bytecode(module_inst, exec_env, function, frame); +#else + jit_interp_switch_to_jitted(exec_env, frame, function, + function->u.func->jitted_code); +#endif } /* Output the return value to the caller */ diff --git a/core/iwasm/interpreter/wasm_loader.c b/core/iwasm/interpreter/wasm_loader.c index 51327d6cc..c8bd9e6c2 100644 --- a/core/iwasm/interpreter/wasm_loader.c +++ b/core/iwasm/interpreter/wasm_loader.c @@ -13,6 +13,9 @@ #if WASM_ENABLE_DEBUG_INTERP != 0 #include "../libraries/debug-engine/debug_engine.h" #endif +#if WASM_ENABLE_FAST_JIT != 0 +#include "../fast-jit/jit_compiler.h" +#endif /* Read a value of given type from the address pointed to by the given pointer and increase the pointer to the position just after the @@ -3226,6 +3229,13 @@ load_from_sections(WASMModule *module, WASMSection *sections, #endif } +#if WASM_ENABLE_FAST_JIT != 0 + if (!jit_compiler_compile_all(module)) { + set_error_buf(error_buf, error_buf_size, "fast jit compilation failed"); + return false; + } +#endif + #if WASM_ENABLE_MEMORY_TRACING != 0 wasm_runtime_dump_module_mem_consumption((WASMModuleCommon *)module); #endif @@ -3589,7 +3599,7 @@ wasm_loader_load(const uint8 *buf, uint32 size, return NULL; } -#if WASM_ENABLE_DEBUG_INTERP != 0 +#if WASM_ENABLE_DEBUG_INTERP != 0 || WASM_ENABLE_FAST_JIT != 0 module->load_addr = (uint8 *)buf; module->load_size = size; #endif @@ -7309,7 +7319,8 @@ re_scan: operand_offset = local_offset; PUSH_OFFSET_TYPE(local_type); #else -#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) +#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \ + && (WASM_ENABLE_FAST_JIT == 0) if (local_offset < 0x80) { #if WASM_ENABLE_DEBUG_INTERP != 0 record_fast_op(module, p_org, *p_org); @@ -7384,7 +7395,8 @@ re_scan: POP_OFFSET_TYPE(local_type); } #else -#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) +#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \ + && (WASM_ENABLE_FAST_JIT == 0) if (local_offset < 0x80) { #if WASM_ENABLE_DEBUG_INTERP != 0 record_fast_op(module, p_org, *p_org); @@ -7455,7 +7467,8 @@ re_scan: *(loader_ctx->frame_offset - wasm_value_type_cell_num(local_type))); #else -#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) +#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) \ + && (WASM_ENABLE_FAST_JIT == 0) if (local_offset < 0x80) { #if WASM_ENABLE_DEBUG_INTERP != 0 record_fast_op(module, p_org, *p_org); @@ -7505,7 +7518,6 @@ re_scan: PUSH_TYPE(global_type); #if WASM_ENABLE_FAST_INTERP == 0 -#if (WASM_ENABLE_WAMR_COMPILER == 0) && (WASM_ENABLE_JIT == 0) if (global_type == VALUE_TYPE_I64 || global_type == VALUE_TYPE_F64) { #if WASM_ENABLE_DEBUG_INTERP != 0 @@ -7513,7 +7525,6 @@ re_scan: #endif *p_org = WASM_OP_GET_GLOBAL_64; } -#endif #else /* else of WASM_ENABLE_FAST_INTERP */ if (global_type == VALUE_TYPE_I64 || global_type == VALUE_TYPE_F64) { diff --git a/core/iwasm/interpreter/wasm_mini_loader.c b/core/iwasm/interpreter/wasm_mini_loader.c index c3ccd55ae..023942d46 100644 --- a/core/iwasm/interpreter/wasm_mini_loader.c +++ b/core/iwasm/interpreter/wasm_mini_loader.c @@ -10,6 +10,9 @@ #include "wasm_opcode.h" #include "wasm_runtime.h" #include "../common/wasm_native.h" +#if WASM_ENABLE_FAST_JIT != 0 +#include "../fast-jit/jit_compiler.h" +#endif /* Read a value of given type from the address pointed to by the given pointer and increase the pointer to the position just after the @@ -2137,6 +2140,13 @@ load_from_sections(WASMModule *module, WASMSection *sections, } } +#if WASM_ENABLE_FAST_JIT != 0 + if (!jit_compiler_compile_all(module)) { + set_error_buf(error_buf, error_buf_size, "fast jit compilation failed"); + return false; + } +#endif + #if WASM_ENABLE_MEMORY_TRACING != 0 wasm_runtime_dump_module_mem_consumption(module); #endif diff --git a/product-mini/platforms/linux/CMakeLists.txt b/product-mini/platforms/linux/CMakeLists.txt index c0b6f6a8c..b0b347792 100644 --- a/product-mini/platforms/linux/CMakeLists.txt +++ b/product-mini/platforms/linux/CMakeLists.txt @@ -50,6 +50,11 @@ if (NOT DEFINED WAMR_BUILD_JIT) set (WAMR_BUILD_JIT 0) endif () +if (NOT DEFINED WAMR_BUILD_FAST_JIT) + # Disable Fast JIT by default + set (WAMR_BUILD_FAST_JIT 0) +endif () + if (NOT DEFINED WAMR_BUILD_LIBC_BUILTIN) # Enable libc builtin support by default set (WAMR_BUILD_LIBC_BUILTIN 1)