From 1d255ce0b56f3309fe964c798e1c43ae71471d97 Mon Sep 17 00:00:00 2001 From: Huang Qi Date: Mon, 24 Nov 2025 19:19:18 +0800 Subject: [PATCH] refactor: Improve debug experience with CFI directives and proper label naming in Thumb invokeNative (#4719) This change enhances debugging support and improves assembly code quality: 1. Add CFI (Call Frame Information) directives (.cfi_startproc, .cfi_def_cfa_offset, .cfi_offset, .cfi_endproc) to provide proper call frame information for debuggers 2. Improve label naming by using dot-prefixed conventions (.Lreturn, .Lcall_func, etc.) following assembly best practices These improvements enhance debugging capabilities and code maintainability while preserving functionality. Signed-off-by: Huang Qi --- core/iwasm/common/arch/invokeNative_thumb.s | 32 +++++++++++++-------- 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/core/iwasm/common/arch/invokeNative_thumb.s b/core/iwasm/common/arch/invokeNative_thumb.s index 8087c63b8..9a3f651b4 100644 --- a/core/iwasm/common/arch/invokeNative_thumb.s +++ b/core/iwasm/common/arch/invokeNative_thumb.s @@ -12,7 +12,7 @@ invokeNative: .globl _invokeNative _invokeNative: #endif /* end of BH_PLATFORM_DARWIN */ - + .cfi_startproc /* * Arguments passed in: * @@ -24,34 +24,40 @@ _invokeNative: push {r4, r5, r6, r7} push {lr} sub sp, sp, #4 /* make sp 8 byte aligned */ + .cfi_def_cfa_offset 24 + .cfi_offset lr, -20 + .cfi_offset r4, -16 + .cfi_offset r5, -12 + .cfi_offset r6, -8 + .cfi_offset r7, -4 mov ip, r0 /* ip = function ptr */ mov r4, r1 /* r4 = argv */ mov r5, r2 /* r5 = argc */ cmp r5, #1 /* at least one argument required: exec_env */ - blt return + blt .Lreturn mov r6, #0 /* increased stack size */ ldr r0, [r4] /* r0 = argv[0] = exec_env */ add r4, r4, #4 /* r4 += 4 */ cmp r5, #1 - beq call_func + beq .Lcall_func ldr r1, [r4] /* r1 = argv[1] */ add r4, r4, #4 cmp r5, #2 - beq call_func + beq .Lcall_func ldr r2, [r4] /* r2 = argv[2] */ add r4, r4, #4 cmp r5, #3 - beq call_func + beq .Lcall_func ldr r3, [r4] /* r3 = argv[3] */ add r4, r4, #4 cmp r5, #4 - beq call_func + beq .Lcall_func sub r5, r5, #4 /* argc -= 4, now we have r0 ~ r3 */ @@ -66,29 +72,31 @@ _invokeNative: mov sp, r7 mov lr, r2 /* save r2 */ -loop_args: /* copy left arguments to stack */ + +.Lloop_args: /* copy left arguments to stack */ cmp r5, #0 - beq call_func1 + beq .Lcall_func1 ldr r2, [r4] add r4, r4, #4 str r2, [r7] add r7, r7, #4 sub r5, r5, #1 - b loop_args + b .Lloop_args -call_func1: +.Lcall_func1: mov r2, lr /* restore r2 */ -call_func: +.Lcall_func: blx ip add sp, sp, r6 /* restore sp */ -return: +.Lreturn: add sp, sp, #4 /* make sp 8 byte aligned */ pop {r3} pop {r4, r5, r6, r7} mov lr, r3 bx lr + .cfi_endproc #if defined(__linux__) && defined(__ELF__) .section .note.GNU-stack,"",%progbits #endif