mirror of
https://github.com/bytecodealliance/wasm-micro-runtime.git
synced 2025-05-09 13:16:26 +00:00
Refine llvm pass order (#948)
Put Vectorize passes before GVN/LICM passes as normally the former gains more performance improvement and the latter might break the optimizations for the former. Can improve performance of several sightglass cases. And don't check exception throw after calling an AOT function if it is and recursive call, similar to handing of Spec tail call opcode.
This commit is contained in:
parent
6bcf048523
commit
30cb05f223
|
@ -560,6 +560,10 @@ aot_compile_op_call(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|||
goto fail;
|
||||
}
|
||||
else {
|
||||
bool recursive_call =
|
||||
(func_ctx == func_ctxes[func_idx - import_func_count]) ? true
|
||||
: false;
|
||||
|
||||
if (comp_ctx->is_indirect_mode) {
|
||||
LLVMTypeRef func_ptr_type;
|
||||
|
||||
|
@ -603,7 +607,8 @@ aot_compile_op_call(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx,
|
|||
|
||||
/* Check whether there was exception thrown when executing
|
||||
the function */
|
||||
if (!tail_call && !check_exception_thrown(comp_ctx, func_ctx))
|
||||
if (!tail_call && !recursive_call
|
||||
&& !check_exception_thrown(comp_ctx, func_ctx))
|
||||
goto fail;
|
||||
}
|
||||
|
||||
|
|
|
@ -500,7 +500,7 @@ create_cur_exception(AOTCompContext *comp_ctx, AOTFuncContext *func_ctx)
|
|||
|
||||
offset = I32_CONST(offsetof(AOTModuleInstance, cur_exception));
|
||||
func_ctx->cur_exception = LLVMBuildInBoundsGEP(
|
||||
comp_ctx->builder, func_ctx->aot_inst, &offset, 1, "cur_execption");
|
||||
comp_ctx->builder, func_ctx->aot_inst, &offset, 1, "cur_exception");
|
||||
if (!func_ctx->cur_exception) {
|
||||
aot_set_last_error("llvm build in bounds gep failed.");
|
||||
return false;
|
||||
|
@ -1877,6 +1877,8 @@ aot_create_comp_context(AOTCompData *comp_data, aot_comp_option_t option)
|
|||
aot_set_last_error("create LLVM target machine failed.");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
LLVMSetTarget(comp_ctx->module, triple_norm);
|
||||
}
|
||||
|
||||
if (option->enable_simd && strcmp(comp_ctx->target_arch, "x86_64") != 0
|
||||
|
@ -1935,6 +1937,11 @@ aot_create_comp_context(AOTCompData *comp_data, aot_comp_option_t option)
|
|||
LLVMAddIndVarSimplifyPass(comp_ctx->pass_mgr);
|
||||
|
||||
if (!option->is_jit_mode) {
|
||||
/* Put Vectorize passes before GVN/LICM passes as the former
|
||||
might gain more performance improvement and the latter might
|
||||
break the optimizations for the former */
|
||||
LLVMAddLoopVectorizePass(comp_ctx->pass_mgr);
|
||||
LLVMAddSLPVectorizePass(comp_ctx->pass_mgr);
|
||||
LLVMAddLoopRotatePass(comp_ctx->pass_mgr);
|
||||
LLVMAddLoopUnswitchPass(comp_ctx->pass_mgr);
|
||||
LLVMAddInstructionCombiningPass(comp_ctx->pass_mgr);
|
||||
|
@ -1944,11 +1951,9 @@ aot_create_comp_context(AOTCompData *comp_data, aot_comp_option_t option)
|
|||
disable them when building as multi-thread mode */
|
||||
LLVMAddGVNPass(comp_ctx->pass_mgr);
|
||||
LLVMAddLICMPass(comp_ctx->pass_mgr);
|
||||
LLVMAddInstructionCombiningPass(comp_ctx->pass_mgr);
|
||||
LLVMAddCFGSimplificationPass(comp_ctx->pass_mgr);
|
||||
}
|
||||
LLVMAddLoopVectorizePass(comp_ctx->pass_mgr);
|
||||
LLVMAddSLPVectorizePass(comp_ctx->pass_mgr);
|
||||
LLVMAddInstructionCombiningPass(comp_ctx->pass_mgr);
|
||||
LLVMAddCFGSimplificationPass(comp_ctx->pass_mgr);
|
||||
}
|
||||
|
||||
/* Create metadata for llvm float experimental constrained intrinsics */
|
||||
|
@ -2608,4 +2613,4 @@ aot_load_const_from_table(AOTCompContext *comp_ctx, LLVMValueRef base,
|
|||
}
|
||||
|
||||
return const_value;
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user